commit 698d75593433702996667aada53fb280ff283f6a Author: Jörg Prante Date: Mon Apr 1 22:48:07 2024 +0200 initial commit of 42.7.4.0 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d5b5511 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +/.settings +/.classpath +/.project +/.gradle +**/data +**/work +**/logs +**/.idea +**/target +**/out +**/build +.DS_Store +*.iml +*~ diff --git a/build.gradle b/build.gradle new file mode 100644 index 0000000..5bd6b7e --- /dev/null +++ b/build.gradle @@ -0,0 +1,34 @@ +plugins { + id 'maven-publish' + id 'signing' + id "io.github.gradle-nexus.publish-plugin" version "2.0.0-rc-1" +} + +wrapper { + gradleVersion = libs.versions.gradle.get() + distributionType = Wrapper.DistributionType.BIN +} + +ext { + user = 'joerg' + name = 'pgjdbc' + description = 'Fork of pgjdbc for simplified build and Java 21+' + inceptionYear = '2024' + url = 'https://xbib.org/' + user + '/' + name + scmUrl = 'https://xbib.org/' + user + '/' + name + scmConnection = 'scm:git:git://xbib.org/' + user + '/' + name + '.git' + scmDeveloperConnection = 'scm:git:ssh://forgejo@xbib.org:' + user + '/' + name + '.git' + issueManagementSystem = 'Forgejo' + issueManagementUrl = ext.scmUrl + '/issues' + licenseName = 'The Apache License, Version 2.0' + licenseUrl = 'http://www.apache.org/licenses/LICENSE-2.0.txt' +} + +subprojects { + apply from: rootProject.file('gradle/compile/java.gradle') + apply from: rootProject.file('gradle/test/junit5.gradle') + apply from: rootProject.file('gradle/repositories/maven.gradle') + apply from: rootProject.file('gradle/publish/maven.gradle') +} +apply from: rootProject.file('gradle/publish/sonatype.gradle') +apply from: rootProject.file('gradle/publish/forgejo.gradle') diff --git a/gradle.properties b/gradle.properties new file mode 100644 index 0000000..dbb89c5 --- /dev/null +++ b/gradle.properties @@ -0,0 +1,4 @@ +group = org.xbib.jdbc +name = pgjdbc +version = 42.7.4.0 + diff --git a/gradle/compile/java.gradle b/gradle/compile/java.gradle new file mode 100644 index 0000000..9547876 --- /dev/null +++ b/gradle/compile/java.gradle @@ -0,0 +1,30 @@ + +apply plugin: 'java-library' + +java { + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } + modularity.inferModulePath.set(true) + withSourcesJar() + withJavadocJar() +} + +jar { + manifest { + attributes('Implementation-Version': project.version) + } + duplicatesStrategy = DuplicatesStrategy.INCLUDE +} + +tasks.withType(JavaCompile) { + options.fork = true + options.forkOptions.jvmArgs += ['-Duser.language=en','-Duser.country=US'] + options.compilerArgs.add('-Xlint:all') + options.encoding = 'UTF-8' +} + +tasks.withType(Javadoc) { + options.addStringOption('Xdoclint:none', '-quiet') + options.encoding = 'UTF-8' +} diff --git a/gradle/documentation/asciidoc.gradle b/gradle/documentation/asciidoc.gradle new file mode 100644 index 0000000..87ba22e --- /dev/null +++ b/gradle/documentation/asciidoc.gradle @@ -0,0 +1,55 @@ +apply plugin: 'org.xbib.gradle.plugin.asciidoctor' + +configurations { + asciidoclet +} + +dependencies { + asciidoclet "org.asciidoctor:asciidoclet:${project.property('asciidoclet.version')}" +} + + +asciidoctor { + backends 'html5' + outputDir = file("${rootProject.projectDir}/docs") + separateOutputDirs = false + attributes 'source-highlighter': 'coderay', + idprefix: '', + idseparator: '-', + toc: 'left', + doctype: 'book', + icons: 'font', + encoding: 'utf-8', + sectlink: true, + sectanchors: true, + linkattrs: true, + imagesdir: 'img', + stylesheet: "${projectDir}/src/docs/asciidoc/css/foundation.css" +} + + +/*javadoc { +options.docletpath = configurations.asciidoclet.files.asType(List) +options.doclet = 'org.asciidoctor.Asciidoclet' +//options.overview = "src/docs/asciidoclet/overview.adoc" +options.addStringOption "-base-dir", "${projectDir}" +options.addStringOption "-attribute", + "name=${project.name},version=${project.version},title-link=https://github.com/xbib/${project.name}" +configure(options) { + noTimestamp = true +} +}*/ + + +/*javadoc { + options.docletpath = configurations.asciidoclet.files.asType(List) + options.doclet = 'org.asciidoctor.Asciidoclet' + options.overview = "${rootProject.projectDir}/src/docs/asciidoclet/overview.adoc" + options.addStringOption "-base-dir", "${projectDir}" + options.addStringOption "-attribute", + "name=${project.name},version=${project.version},title-link=https://github.com/xbib/${project.name}" + options.destinationDirectory(file("${projectDir}/docs/javadoc")) + configure(options) { + noTimestamp = true + } +}*/ diff --git a/gradle/ide/idea.gradle b/gradle/ide/idea.gradle new file mode 100644 index 0000000..a4ee4a5 --- /dev/null +++ b/gradle/ide/idea.gradle @@ -0,0 +1,13 @@ +apply plugin: 'idea' + +idea { + module { + outputDir file('build/classes/java/main') + testOutputDir file('build/classes/java/test') + } + project { + jdkName = '17' + languageLevel = '17' + vcs = 'Git' + } +} diff --git a/gradle/publish/forgejo.gradle b/gradle/publish/forgejo.gradle new file mode 100644 index 0000000..b99b2fb --- /dev/null +++ b/gradle/publish/forgejo.gradle @@ -0,0 +1,16 @@ +if (project.hasProperty('forgeJoToken')) { + publishing { + repositories { + maven { + url 'https://xbib.org/api/packages/joerg/maven' + credentials(HttpHeaderCredentials) { + name = "Authorization" + value = "token ${project.property('forgeJoToken')}" + } + authentication { + header(HttpHeaderAuthentication) + } + } + } + } +} diff --git a/gradle/publish/ivy.gradle b/gradle/publish/ivy.gradle new file mode 100644 index 0000000..fe0a848 --- /dev/null +++ b/gradle/publish/ivy.gradle @@ -0,0 +1,27 @@ +apply plugin: 'ivy-publish' + +publishing { + repositories { + ivy { + url = "https://xbib.org/repo" + } + } + publications { + ivy(IvyPublication) { + from components.java + descriptor { + license { + name = 'The Apache License, Version 2.0' + url = 'http://www.apache.org/licenses/LICENSE-2.0.txt' + } + author { + name = 'Jörg Prante' + url = 'http://example.com/users/jane' + } + descriptor.description { + text = rootProject.ext.description + } + } + } + } +} \ No newline at end of file diff --git a/gradle/publish/maven.gradle b/gradle/publish/maven.gradle new file mode 100644 index 0000000..02d909e --- /dev/null +++ b/gradle/publish/maven.gradle @@ -0,0 +1,51 @@ + +publishing { + publications { + "${project.name}"(MavenPublication) { + from components.java + pom { + artifactId = project.name + name = project.name + description = rootProject.ext.description + url = rootProject.ext.url + inceptionYear = rootProject.ext.inceptionYear + packaging = 'jar' + organization { + name = 'xbib' + url = 'https://xbib.org' + } + developers { + developer { + id = 'jprante' + name = 'Jörg Prante' + email = 'joergprante@gmail.com' + url = 'https://xbib.org/joerg' + } + } + scm { + url = rootProject.ext.scmUrl + connection = rootProject.ext.scmConnection + developerConnection = rootProject.ext.scmDeveloperConnection + } + issueManagement { + system = rootProject.ext.issueManagementSystem + url = rootProject.ext.issueManagementUrl + } + licenses { + license { + name = rootProject.ext.licenseName + url = rootProject.ext.licenseUrl + distribution = 'repo' + } + } + } + } + } +} + +if (project.hasProperty("signing.keyId")) { + apply plugin: 'signing' + signing { + sign publishing.publications."${project.name}" + } +} diff --git a/gradle/publish/sonatype.gradle b/gradle/publish/sonatype.gradle new file mode 100644 index 0000000..02744cd --- /dev/null +++ b/gradle/publish/sonatype.gradle @@ -0,0 +1,12 @@ + +if (project.hasProperty('ossrhUsername') && project.hasProperty('ossrhPassword')) { + nexusPublishing { + repositories { + sonatype { + username = project.property('ossrhUsername') + password = project.property('ossrhPassword') + packageGroup = "org.xbib" + } + } + } +} diff --git a/gradle/quality/checkstyle.gradle b/gradle/quality/checkstyle.gradle new file mode 100644 index 0000000..85b8bd8 --- /dev/null +++ b/gradle/quality/checkstyle.gradle @@ -0,0 +1,19 @@ + +apply plugin: 'checkstyle' + +tasks.withType(Checkstyle) { + ignoreFailures = true + reports { + xml.getRequired().set(true) + html.getRequired().set(true) + } +} + +checkstyle { + configFile = rootProject.file('gradle/quality/checkstyle.xml') + ignoreFailures = true + showViolations = true + checkstyleMain { + source = sourceSets.main.allSource + } +} diff --git a/gradle/quality/checkstyle.xml b/gradle/quality/checkstyle.xml new file mode 100644 index 0000000..66a9aae --- /dev/null +++ b/gradle/quality/checkstyle.xml @@ -0,0 +1,333 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gradle/quality/cyclonedx.gradle b/gradle/quality/cyclonedx.gradle new file mode 100644 index 0000000..d94a87c --- /dev/null +++ b/gradle/quality/cyclonedx.gradle @@ -0,0 +1,11 @@ +cyclonedxBom { + includeConfigs = [ 'runtimeClasspath' ] + skipConfigs = [ 'compileClasspath', 'testCompileClasspath' ] + projectType = "library" + schemaVersion = "1.4" + destination = file("build/reports") + outputName = "bom" + outputFormat = "json" + includeBomSerialNumber = true + componentVersion = "2.0.0" +} \ No newline at end of file diff --git a/gradle/quality/pmd.gradle b/gradle/quality/pmd.gradle new file mode 100644 index 0000000..55fcfda --- /dev/null +++ b/gradle/quality/pmd.gradle @@ -0,0 +1,17 @@ + +apply plugin: 'pmd' + +tasks.withType(Pmd) { + ignoreFailures = true + reports { + xml.getRequired().set(true) + html.getRequired().set(true) + } +} + +pmd { + ignoreFailures = true + consoleOutput = false + toolVersion = "6.51.0" + ruleSetFiles = rootProject.files('gradle/quality/pmd/category/java/bestpractices.xml') +} diff --git a/gradle/quality/sonarqube.gradle b/gradle/quality/sonarqube.gradle new file mode 100644 index 0000000..d8eddd0 --- /dev/null +++ b/gradle/quality/sonarqube.gradle @@ -0,0 +1,10 @@ + +sonarqube { + properties { + property "sonar.projectName", "${project.group} ${project.name}" + property "sonar.sourceEncoding", "UTF-8" + property "sonar.tests", "src/test/java" + property "sonar.scm.provider", "git" + property "sonar.junit.reportsPath", "build/test-results/test/" + } +} diff --git a/gradle/quality/spotbugs.gradle b/gradle/quality/spotbugs.gradle new file mode 100644 index 0000000..0b577c9 --- /dev/null +++ b/gradle/quality/spotbugs.gradle @@ -0,0 +1,15 @@ + +apply plugin: 'com.github.spotbugs' + +spotbugs { + //effort = "max" + //reportLevel = "low" + ignoreFailures = true +} + +spotbugsMain { + reports { + xml.getRequired().set(false) + html.getRequired().set(true) + } +} diff --git a/gradle/repositories/maven.gradle b/gradle/repositories/maven.gradle new file mode 100644 index 0000000..ec58acb --- /dev/null +++ b/gradle/repositories/maven.gradle @@ -0,0 +1,4 @@ +repositories { + mavenLocal() + mavenCentral() +} diff --git a/gradle/test/junit5.gradle b/gradle/test/junit5.gradle new file mode 100644 index 0000000..6cace6f --- /dev/null +++ b/gradle/test/junit5.gradle @@ -0,0 +1,26 @@ +dependencies { + testImplementation testLibs.junit.jupiter.api + testImplementation testLibs.junit.jupiter.params + testImplementation testLibs.hamcrest + testRuntimeOnly testLibs.junit.jupiter.engine + testRuntimeOnly testLibs.junit.jupiter.platform.launcher +} + +test { + useJUnitPlatform() + failFast = true + systemProperty 'java.util.logging.config.file', 'src/test/resources/logging.properties' + testLogging { + events 'STARTED', 'PASSED', 'FAILED', 'SKIPPED' + showStandardStreams = true + } + afterSuite { desc, result -> + if (!desc.parent) { + println "\nTest result: ${result.resultType}" + println "Test summary: ${result.testCount} tests, " + + "${result.successfulTestCount} succeeded, " + + "${result.failedTestCount} failed, " + + "${result.skippedTestCount} skipped" + } + } +} diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..d64cd49 Binary files /dev/null and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..e6aba25 --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew new file mode 100755 index 0000000..1aa94a4 --- /dev/null +++ b/gradlew @@ -0,0 +1,249 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 0000000..6689b85 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/pgjdbc/build.gradle b/pgjdbc/build.gradle new file mode 100644 index 0000000..a6e49ca --- /dev/null +++ b/pgjdbc/build.gradle @@ -0,0 +1,3 @@ +dependencies { + api project(':scram-client') +} diff --git a/pgjdbc/src/main/java/module-info.java b/pgjdbc/src/main/java/module-info.java new file mode 100644 index 0000000..f74f2cc --- /dev/null +++ b/pgjdbc/src/main/java/module-info.java @@ -0,0 +1,25 @@ +import java.sql.Driver; + +module org.xbib.jdbc.pgjdbc { + requires java.logging; + requires java.management; + requires java.naming; + requires transitive java.sql; + requires transitive java.security.jgss; + requires org.xbib.scram.client; + exports org.postgresql; + exports org.postgresql.copy; + exports org.postgresql.core; + exports org.postgresql.core.v3; + exports org.postgresql.fastpath; + exports org.postgresql.jdbc; + exports org.postgresql.jdbc2; + exports org.postgresql.largeobject; + exports org.postgresql.replication; + exports org.postgresql.replication.fluent; + exports org.postgresql.replication.fluent.logical; + exports org.postgresql.replication.fluent.physical; + exports org.postgresql.util; + exports org.postgresql.xml; + provides Driver with org.postgresql.Driver; +} diff --git a/pgjdbc/src/main/java/org/postgresql/Driver.java b/pgjdbc/src/main/java/org/postgresql/Driver.java new file mode 100644 index 0000000..2fac15d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/Driver.java @@ -0,0 +1,796 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +import org.postgresql.jdbc.PgConnection; +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.jdbcurlresolver.PgPassParser; +import org.postgresql.jdbcurlresolver.PgServiceConfParser; +import org.postgresql.util.DriverInfo; +import org.postgresql.util.GT; +import org.postgresql.util.HostSpec; +import org.postgresql.util.PGPropertyUtil; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.SharedTimer; +import org.postgresql.util.URLCoder; + +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.URL; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + *

The Java SQL framework allows for multiple database drivers. Each driver should supply a class + * that implements the Driver interface

+ * + *

The DriverManager will try to load as many drivers as it can find and then for any given + * connection request, it will ask each driver in turn to try to connect to the target URL.

+ * + *

It is strongly recommended that each Driver class should be small and standalone so that the + * Driver class can be loaded and queried without bringing in vast quantities of supporting code.

+ * + *

When a Driver class is loaded, it should create an instance of itself and register it with the + * DriverManager. This means that a user can load and register a driver by doing + * Class.forName("foo.bah.Driver")

+ * + * @see org.postgresql.PGConnection + * @see java.sql.Driver + */ +@SuppressWarnings("try") +public class Driver implements java.sql.Driver { + + private static Driver registeredDriver; + private static final Logger PARENT_LOGGER = Logger.getLogger("org.postgresql"); + private static final Logger LOGGER = Logger.getLogger("org.postgresql.Driver"); + private static final SharedTimer SHARED_TIMER = new SharedTimer(); + + static { + try { + // moved the registerDriver from the constructor to here + // because some clients call the driver themselves (I know, as + // my early jdbc work did - and that was based on other examples). + // Placing it here, means that the driver is registered once only. + register(); + } catch (SQLException e) { + throw new ExceptionInInitializerError(e); + } + } + + // Helper to retrieve default properties from classloader resource + // properties files. + private Properties defaultProperties; + + private final ResourceLock lock = new ResourceLock(); + + public Driver() { + } + + private Properties getDefaultProperties() throws IOException { + try (ResourceLock ignore = lock.obtain()) { + if (defaultProperties != null) { + return defaultProperties; + } + + // Make sure we load properties with the maximum possible privileges. + try { + defaultProperties = + doPrivileged(new PrivilegedExceptionAction() { + @Override + public Properties run() throws IOException { + return loadDefaultProperties(); + } + }); + } catch (PrivilegedActionException e) { + Exception ex = e.getException(); + if (ex instanceof IOException) { + throw (IOException) ex; + } + throw new RuntimeException(e); + } catch (Throwable e) { + if (e instanceof IOException) { + throw (IOException) e; + } + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } + if (e instanceof Error) { + throw (Error) e; + } + throw new RuntimeException(e); + } + + return defaultProperties; + } + } + + @SuppressWarnings("unchecked") + private static T doPrivileged(PrivilegedExceptionAction action) throws Throwable { + try { + Class accessControllerClass = Class.forName("java.security.AccessController"); + Method doPrivileged = accessControllerClass.getMethod("doPrivileged", + PrivilegedExceptionAction.class); + return (T) doPrivileged.invoke(null, action); + } catch (ClassNotFoundException e) { + return action.run(); + } catch (InvocationTargetException e) { + throw e.getCause(); + } + } + + private Properties loadDefaultProperties() throws IOException { + Properties merged = new Properties(); + + try { + PGProperty.USER.set(merged, System.getProperty("user.name")); + } catch (SecurityException se) { + // We're just trying to set a default, so if we can't + // it's not a big deal. + } + + // If we are loaded by the bootstrap classloader, getClassLoader() + // may return null. In that case, try to fall back to the system + // classloader. + // + // We should not need to catch SecurityException here as we are + // accessing either our own classloader, or the system classloader + // when our classloader is null. The ClassLoader javadoc claims + // neither case can throw SecurityException. + ClassLoader cl = getClass().getClassLoader(); + if (cl == null) { + LOGGER.log(Level.FINE, "Can't find our classloader for the Driver; " + + "attempt to use the system class loader"); + cl = ClassLoader.getSystemClassLoader(); + } + + if (cl == null) { + LOGGER.log(Level.WARNING, "Can't find a classloader for the Driver; not loading driver " + + "configuration from org/postgresql/driverconfig.properties"); + return merged; // Give up on finding defaults. + } + + LOGGER.log(Level.FINE, "Loading driver configuration via classloader {0}", cl); + + // When loading the driver config files we don't want settings found + // in later files in the classpath to override settings specified in + // earlier files. To do this we've got to read the returned + // Enumeration into temporary storage. + ArrayList urls = new ArrayList<>(); + Enumeration urlEnum = cl.getResources("org/postgresql/driverconfig.properties"); + while (urlEnum.hasMoreElements()) { + urls.add(urlEnum.nextElement()); + } + + for (int i = urls.size() - 1; i >= 0; i--) { + URL url = urls.get(i); + LOGGER.log(Level.FINE, "Loading driver configuration from: {0}", url); + InputStream is = url.openStream(); + merged.load(is); + is.close(); + } + + return merged; + } + + /** + *

Try to make a database connection to the given URL. The driver should return "null" if it + * realizes it is the wrong kind of driver to connect to the given URL. This will be common, as + * when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each + * loaded driver in turn.

+ * + *

The driver should raise an SQLException if it is the right driver to connect to the given URL, + * but has trouble connecting to the database.

+ * + *

The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as + * connection arguments.

+ * + *
    + *
  • user - (required) The user to connect as
  • + *
  • password - (optional) The password for the user
  • + *
  • ssl -(optional) Use SSL when connecting to the server
  • + *
  • readOnly - (optional) Set connection to read-only by default
  • + *
  • charSet - (optional) The character set to be used for converting to/from + * the database to unicode. If multibyte is enabled on the server then the character set of the + * database is used as the default, otherwise the jvm character encoding is used as the default. + * This value is only used when connecting to a 7.2 or older server.
  • + *
  • loglevel - (optional) Enable logging of messages from the driver. The value is an integer + * from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to + * DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.
  • + *
  • compatible - (optional) This is used to toggle between different functionality + * as it changes across different releases of the jdbc driver code. The values here are versions + * of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on + * LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in + * functionality could be disabled by setting the compatible level to be "7.1", in which case the + * driver will revert to the 7.1 functionality.
  • + *
+ * + *

Normally, at least "user" and "password" properties should be included in the properties. For a + * list of supported character encoding , see + * http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will + * probably want to have set up the Postgres database itself to use the same encoding, with the + * {@code -E } argument to createdb.

+ * + *

Our protocol takes the forms:

+ * + *
+   *  jdbc:postgresql://host:port/database?param1=val1&...
+   * 
+ * + * @param url the URL of the database to connect to + * @param info a list of arbitrary tag/value pairs as connection arguments + * @return a connection to the URL or null if it isnt us + * @exception SQLException if a database access error occurs or the url is + * {@code null} + * @see java.sql.Driver#connect + */ + @Override + public Connection connect(String url, Properties info) throws SQLException { + if (url == null) { + throw new SQLException("url is null"); + } + // get defaults + Properties defaults; + + if (!url.startsWith("jdbc:postgresql:")) { + return null; + } + try { + defaults = getDefaultProperties(); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Error loading default settings from driverconfig.properties"), + PSQLState.UNEXPECTED_ERROR, ioe); + } + + // override defaults with provided properties + Properties props = new Properties(defaults); + if (info != null) { + Set e = info.stringPropertyNames(); + for (String propName : e) { + String propValue = info.getProperty(propName); + if (propValue == null) { + throw new PSQLException( + GT.tr("Properties for the driver contains a non-string value for the key ") + + propName, + PSQLState.UNEXPECTED_ERROR); + } + props.setProperty(propName, propValue); + } + } + // parse URL and add more properties + if ((props = parseURL(url, props)) == null) { + throw new PSQLException( + GT.tr("Unable to parse URL {0}", url), + PSQLState.UNEXPECTED_ERROR); + } + try { + + LOGGER.log(Level.FINE, "Connecting with URL: {0}", url); + + // Enforce login timeout, if specified, by running the connection + // attempt in a separate thread. If we hit the timeout without the + // connection completing, we abandon the connection attempt in + // the calling thread, but the separate thread will keep trying. + // Eventually, the separate thread will either fail or complete + // the connection; at that point we clean up the connection if + // we managed to establish one after all. See ConnectThread for + // more details. + long timeout = timeout(props); + if (timeout <= 0) { + return makeConnection(url, props); + } + + ConnectThread ct = new ConnectThread(url, props); + Thread thread = new Thread(ct, "PostgreSQL JDBC driver connection thread"); + thread.setDaemon(true); // Don't prevent the VM from shutting down + thread.start(); + return ct.getResult(timeout); + } catch (PSQLException ex1) { + LOGGER.log(Level.FINE, "Connection error: ", ex1); + // re-throw the exception, otherwise it will be caught next, and a + // org.postgresql.unusual error will be returned instead. + throw ex1; + } catch (Exception ex2) { + if ("java.security.AccessControlException".equals(ex2.getClass().getName())) { + // java.security.AccessControlException has been deprecated for removal, so compare the class name + throw new PSQLException( + GT.tr( + "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."), + PSQLState.UNEXPECTED_ERROR, ex2); + } + LOGGER.log(Level.FINE, "Unexpected connection error: ", ex2); + throw new PSQLException( + GT.tr( + "Something unusual has occurred to cause the driver to fail. Please report this exception."), + PSQLState.UNEXPECTED_ERROR, ex2); + } + } + + /** + * this is an empty method left here for graalvm + * we removed the ability to setup the logger from properties + * due to a security issue + * @param props Connection Properties + */ + private void setupLoggerFromProperties(final Properties props) { + } + + /** + * Perform a connect in a separate thread; supports getting the results from the original thread + * while enforcing a login timeout. + */ + private static class ConnectThread implements Runnable { + private final ResourceLock lock = new ResourceLock(); + private final Condition lockCondition = lock.newCondition(); + + ConnectThread(String url, Properties props) { + this.url = url; + this.props = props; + } + + @Override + public void run() { + Connection conn; + Throwable error; + + try { + conn = makeConnection(url, props); + error = null; + } catch (Throwable t) { + conn = null; + error = t; + } + + try (ResourceLock ignore = lock.obtain()) { + if (abandoned) { + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + } + } + } else { + result = conn; + resultException = error; + lockCondition.signal(); + } + } + } + + /** + * Get the connection result from this (assumed running) thread. If the timeout is reached + * without a result being available, a SQLException is thrown. + * + * @param timeout timeout in milliseconds + * @return the new connection, if successful + * @throws SQLException if a connection error occurs or the timeout is reached + */ + public Connection getResult(long timeout) throws SQLException { + long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout; + try (ResourceLock ignore = lock.obtain()) { + while (true) { + if (result != null) { + return result; + } + + Throwable resultException = this.resultException; + if (resultException != null) { + if (resultException instanceof SQLException) { + resultException.fillInStackTrace(); + throw (SQLException) resultException; + } else { + throw new PSQLException( + GT.tr( + "Something unusual has occurred to cause the driver to fail. Please report this exception."), + PSQLState.UNEXPECTED_ERROR, resultException); + } + } + + long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); + if (delay <= 0) { + abandoned = true; + throw new PSQLException(GT.tr("Connection attempt timed out."), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } + + try { + lockCondition.await(delay, TimeUnit.MILLISECONDS); + } catch (InterruptedException ie) { + + // reset the interrupt flag + Thread.currentThread().interrupt(); + abandoned = true; + + // throw an unchecked exception which will hopefully not be ignored by the calling code + throw new RuntimeException(GT.tr("Interrupted while attempting to connect.")); + } + } + } + } + + private final String url; + private final Properties props; + private Connection result; + private Throwable resultException; + private boolean abandoned; + } + + /** + * Create a connection from URL and properties. Always does the connection work in the current + * thread without enforcing a timeout, regardless of any timeout specified in the properties. + * + * @param url the original URL + * @param props the parsed/defaulted connection properties + * @return a new connection + * @throws SQLException if the connection could not be made + */ + private static Connection makeConnection(String url, Properties props) throws SQLException { + return new PgConnection(hostSpecs(props), props, url); + } + + /** + * Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers + * will return true if they understand the subprotocol specified in the URL and false if they + * don't. Our protocols start with jdbc:postgresql: + * + * @param url the URL of the driver + * @return true if this driver accepts the given URL + * @see java.sql.Driver#acceptsURL + */ + @Override + public boolean acceptsURL(String url) { + return parseURL(url, null) != null; + } + + /** + *

The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties + * it should prompt a human for in order to get enough information to connect to a database.

+ * + *

Note that depending on the values the human has supplied so far, additional values may become + * necessary, so it may be necessary to iterate through several calls to getPropertyInfo

+ * + * @param url the Url of the database to connect to + * @param info a proposed list of tag/value pairs that will be sent on connect open. + * @return An array of DriverPropertyInfo objects describing possible properties. This array may + * be an empty array if no properties are required + * @see java.sql.Driver#getPropertyInfo + */ + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) { + Properties copy = new Properties(info); + Properties parse = parseURL(url, copy); + if (parse != null) { + copy = parse; + } + + PGProperty[] knownProperties = PGProperty.values(); + DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length]; + for (int i = 0; i < props.length; i++) { + props[i] = knownProperties[i].toDriverPropertyInfo(copy); + } + + return props; + } + + @Override + public int getMajorVersion() { + return DriverInfo.MAJOR_VERSION; + } + + @Override + public int getMinorVersion() { + return DriverInfo.MINOR_VERSION; + } + + /** + * Returns the server version series of this driver and the specific build number. + * + * @return JDBC driver version + * @deprecated use {@link #getMajorVersion()} and {@link #getMinorVersion()} instead + */ + @Deprecated + public static String getVersion() { + return DriverInfo.DRIVER_FULL_NAME; + } + + /** + *

Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true" + * here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC + * compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.

+ * + *

For PostgreSQL, this is not yet possible, as we are not SQL92 compliant (yet).

+ */ + @Override + public boolean jdbcCompliant() { + return false; + } + + /** + * Constructs a new DriverURL, splitting the specified URL into its component parts. + * + * @param url JDBC URL to parse + * @param defaults Default properties + * @return Properties with elements added from the url + */ + public static Properties parseURL(String url, Properties defaults) { + // priority 1 - URL values + Properties priority1Url = new Properties(); + // priority 2 - Properties given as argument to DriverManager.getConnection() + // argument "defaults" EXCLUDING defaults + // priority 3 - Values retrieved by "service" + Properties priority3Service = new Properties(); + // priority 4 - Properties loaded by Driver.loadDefaultProperties() (user, org/postgresql/driverconfig.properties) + // argument "defaults" INCLUDING defaults + // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME + + String urlServer = url; + String urlArgs = ""; + + int qPos = url.indexOf('?'); + if (qPos != -1) { + urlServer = url.substring(0, qPos); + urlArgs = url.substring(qPos + 1); + } + + if (!urlServer.startsWith("jdbc:postgresql:")) { + LOGGER.log(Level.FINE, "JDBC URL must start with \"jdbc:postgresql:\" but was: {0}", url); + return null; + } + urlServer = urlServer.substring("jdbc:postgresql:".length()); + + if ("//".equals(urlServer) || "///".equals(urlServer)) { + urlServer = ""; + } else if (urlServer.startsWith("//")) { + urlServer = urlServer.substring(2); + long slashCount = urlServer.chars().filter(ch -> ch == '/').count(); + if (slashCount > 1) { + LOGGER.log(Level.WARNING, "JDBC URL contains too many / characters: {0}", url); + return null; + } + int slash = urlServer.indexOf('/'); + if (slash == -1) { + LOGGER.log(Level.WARNING, "JDBC URL must contain a / at the end of the host or port: {0}", url); + return null; + } + if (!urlServer.endsWith("/")) { + String value = urlDecode(urlServer.substring(slash + 1)); + if (value == null) { + return null; + } + PGProperty.PG_DBNAME.set(priority1Url, value); + } + urlServer = urlServer.substring(0, slash); + + String[] addresses = urlServer.split(","); + StringBuilder hosts = new StringBuilder(); + StringBuilder ports = new StringBuilder(); + for (String address : addresses) { + int portIdx = address.lastIndexOf(':'); + if (portIdx != -1 && address.lastIndexOf(']') < portIdx) { + String portStr = address.substring(portIdx + 1); + ports.append(portStr); + CharSequence hostStr = address.subSequence(0, portIdx); + if (hostStr.length() == 0) { + hosts.append(PGProperty.PG_HOST.getDefaultValue()); + } else { + hosts.append(hostStr); + } + } else { + ports.append(PGProperty.PG_PORT.getDefaultValue()); + hosts.append(address); + } + ports.append(','); + hosts.append(','); + } + ports.setLength(ports.length() - 1); + hosts.setLength(hosts.length() - 1); + PGProperty.PG_HOST.set(priority1Url, hosts.toString()); + PGProperty.PG_PORT.set(priority1Url, ports.toString()); + } else if (urlServer.startsWith("/")) { + return null; + } else { + String value = urlDecode(urlServer); + if (value == null) { + return null; + } + priority1Url.setProperty(PGProperty.PG_DBNAME.getName(), value); + } + + // parse the args part of the url + String[] args = urlArgs.split("&"); + String serviceName = null; + for (String token : args) { + if (token.isEmpty()) { + continue; + } + int pos = token.indexOf('='); + if (pos == -1) { + priority1Url.setProperty(token, ""); + } else { + String pName = PGPropertyUtil.translatePGServiceToPGProperty(token.substring(0, pos)); + String pValue = urlDecode(token.substring(pos + 1)); + if (pValue == null) { + return null; + } + if (PGProperty.SERVICE.getName().equals(pName)) { + serviceName = pValue; + } else { + priority1Url.setProperty(pName, pValue); + } + } + } + + // load pg_service.conf + if (serviceName != null) { + LOGGER.log(Level.FINE, "Processing option [?service={0}]", serviceName); + Properties result = PgServiceConfParser.getServiceProperties(serviceName); + if (result == null) { + LOGGER.log(Level.WARNING, "Definition of service [{0}] not found", serviceName); + return null; + } + priority3Service.putAll(result); + } + + // combine result based on order of priority + Properties result = new Properties(); + result.putAll(priority1Url); + if (defaults != null) { + // priority 2 - forEach() returns all entries EXCEPT defaults + defaults.forEach(result::putIfAbsent); + } + priority3Service.forEach(result::putIfAbsent); + if (defaults != null) { + // priority 4 - stringPropertyNames() returns all entries INCLUDING defaults + defaults.stringPropertyNames().forEach(s -> result.putIfAbsent(s, defaults.getProperty(s))); + } + // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME + result.putIfAbsent(PGProperty.PG_PORT.getName(), PGProperty.PG_PORT.getDefaultValue()); + result.putIfAbsent(PGProperty.PG_HOST.getName(), PGProperty.PG_HOST.getDefaultValue()); + if (PGProperty.USER.getOrDefault(result) != null) { + result.putIfAbsent(PGProperty.PG_DBNAME.getName(), PGProperty.USER.getOrDefault(result)); + } + + // consistency check + if (!PGPropertyUtil.propertiesConsistencyCheck(result)) { + return null; + } + + // try to load .pgpass if password is missing + if (PGProperty.PASSWORD.getOrDefault(result) == null) { + String password = PgPassParser.getPassword( + PGProperty.PG_HOST.getOrDefault(result), PGProperty.PG_PORT.getOrDefault(result), PGProperty.PG_DBNAME.getOrDefault(result), PGProperty.USER.getOrDefault(result) + ); + if (password != null && !password.isEmpty()) { + PGProperty.PASSWORD.set(result, password); + } + } + // + return result; + } + + // decode url, on failure log and return null + private static String urlDecode(String url) { + try { + return URLCoder.decode(url); + } catch (IllegalArgumentException e) { + LOGGER.log(Level.FINE, "Url [{0}] parsing failed with error [{1}]", new Object[]{url, e.getMessage()}); + } + return null; + } + + /** + * @return the address portion of the URL + */ + private static HostSpec[] hostSpecs(Properties props) { + String[] hosts = PGProperty.PG_HOST.getOrDefault(props).split(","); + String[] ports = PGProperty.PG_PORT.getOrDefault(props).split(","); + String localSocketAddress = PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(props); + HostSpec[] hostSpecs = new HostSpec[hosts.length]; + for (int i = 0; i < hostSpecs.length; i++) { + hostSpecs[i] = new HostSpec(hosts[i], Integer.parseInt(ports[i]), localSocketAddress); + } + return hostSpecs; + } + + /** + * @return the timeout from the URL, in milliseconds + */ + private static long timeout(Properties props) { + String timeout = PGProperty.LOGIN_TIMEOUT.getOrDefault(props); + if (timeout != null) { + try { + return (long) (Float.parseFloat(timeout) * 1000); + } catch (NumberFormatException e) { + LOGGER.log(Level.WARNING, "Couldn't parse loginTimeout value: {0}", timeout); + } + } + return (long) DriverManager.getLoginTimeout() * 1000; + } + + /** + * This method was added in v6.5, and simply throws an SQLException for an unimplemented method. I + * decided to do it this way while implementing the JDBC2 extensions to JDBC, as it should help + * keep the overall driver size down. It now requires the call Class and the function name to help + * when the driver is used with closed software that don't report the stack trace + * + * @param callClass the call Class + * @param functionName the name of the unimplemented function with the type of its arguments + * @return PSQLException with a localized message giving the complete description of the + * unimplemented function + */ + public static SQLFeatureNotSupportedException notImplemented(Class callClass, + String functionName) { + return new SQLFeatureNotSupportedException( + GT.tr("Method {0} is not yet implemented.", callClass.getName() + "." + functionName), + PSQLState.NOT_IMPLEMENTED.getState()); + } + + @Override + public Logger getParentLogger() { + return PARENT_LOGGER; + } + + public static SharedTimer getSharedTimer() { + return SHARED_TIMER; + } + + /** + * Register the driver against {@link DriverManager}. This is done automatically when the class is + * loaded. Dropping the driver from DriverManager's list is possible using {@link #deregister()} + * method. + * + * @throws IllegalStateException if the driver is already registered + * @throws SQLException if registering the driver fails + */ + public static void register() throws SQLException { + if (isRegistered()) { + throw new IllegalStateException( + "Driver is already registered. It can only be registered once."); + } + Driver registeredDriver = new Driver(); + DriverManager.registerDriver(registeredDriver); + Driver.registeredDriver = registeredDriver; + } + + /** + * According to JDBC specification, this driver is registered against {@link DriverManager} when + * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the + * class can be gc'ed if necessary. + * + * @throws IllegalStateException if the driver is not registered + * @throws SQLException if deregistering the driver fails + */ + public static void deregister() throws SQLException { + if (registeredDriver == null) { + throw new IllegalStateException( + "Driver is not registered (or it has not been registered using Driver.register() method)"); + } + DriverManager.deregisterDriver(registeredDriver); + registeredDriver = null; + } + + /** + * @return {@code true} if the driver is registered against {@link DriverManager} + */ + public static boolean isRegistered() { + return registeredDriver != null; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/PGConnection.java b/pgjdbc/src/main/java/org/postgresql/PGConnection.java new file mode 100644 index 0000000..b0b438c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/PGConnection.java @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +import java.util.TimeZone; +import org.postgresql.copy.CopyManager; +import org.postgresql.fastpath.Fastpath; +import org.postgresql.jdbc.AutoSave; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.largeobject.LargeObjectManager; +import org.postgresql.replication.PGReplicationConnection; +import org.postgresql.util.GT; +import org.postgresql.util.PGobject; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.PasswordUtil; + +import java.sql.Array; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.Map; + +/** + * This interface defines the public PostgreSQL extensions to java.sql.Connection. All Connections + * returned by the PostgreSQL driver implement PGConnection. + */ +public interface PGConnection { + + /** + * Creates an {@link Array} wrapping elements. This is similar to + * {@link java.sql.Connection#createArrayOf(String, Object[])}, but also + * provides support for primitive arrays. + * + * @param typeName + * The SQL name of the type to map the elements to. + * Must not be {@code null}. + * @param elements + * The array of objects to map. A {@code null} value will result in + * an {@link Array} representing {@code null}. + * @return An {@link Array} wrapping elements. + * @throws SQLException + * If for some reason the array cannot be created. + * @see java.sql.Connection#createArrayOf(String, Object[]) + */ + Array createArrayOf(String typeName, Object elements) throws SQLException; + + /** + * This method returns any notifications that have been received since the last call to this + * method. Returns null if there have been no notifications. + * + * @return notifications that have been received + * @throws SQLException if something wrong happens + * @since 7.3 + */ + PGNotification[] getNotifications() throws SQLException; + + /** + * This method returns any notifications that have been received since the last call to this + * method. Returns null if there have been no notifications. A timeout can be specified so the + * driver waits for notifications. + * + * @param timeoutMillis when 0, blocks forever. when > 0, blocks up to the specified number of millis + * or until at least one notification has been received. If more than one notification is + * about to be received, these will be returned in one batch. + * @return notifications that have been received + * @throws SQLException if something wrong happens + * @since 43 + */ + PGNotification[] getNotifications(int timeoutMillis) throws SQLException; + + /** + * This returns the COPY API for the current connection. + * + * @return COPY API for the current connection + * @throws SQLException if something wrong happens + * @since 8.4 + */ + CopyManager getCopyAPI() throws SQLException; + + /** + * This returns the LargeObject API for the current connection. + * + * @return LargeObject API for the current connection + * @throws SQLException if something wrong happens + * @since 7.3 + */ + LargeObjectManager getLargeObjectAPI() throws SQLException; + + /** + * This returns the Fastpath API for the current connection. + * + * @return Fastpath API for the current connection + * @throws SQLException if something wrong happens + * @since 7.3 + * @deprecated This API is somewhat obsolete, as one may achieve similar performance + * and greater functionality by setting up a prepared statement to define + * the function call. Then, executing the statement with binary transmission of parameters + * and results substitutes for a fast-path function call. + */ + @Deprecated + Fastpath getFastpathAPI() throws SQLException; + + /** + * This allows client code to add a handler for one of org.postgresql's more unique data types. It + * is approximately equivalent to addDataType(type, Class.forName(name)). + * + * @param type JDBC type name + * @param className class name + * @throws RuntimeException if the type cannot be registered (class not found, etc). + * @deprecated As of 8.0, replaced by {@link #addDataType(String, Class)}. This deprecated method + * does not work correctly for registering classes that cannot be directly loaded by + * the JDBC driver's classloader. + */ + @Deprecated + void addDataType(String type, String className); + + /** + *

This allows client code to add a handler for one of org.postgresql's more unique data types.

+ * + *

NOTE: This is not part of JDBC, but an extension.

+ * + *

The best way to use this is as follows:

+ * + *
+   * ...
+   * ((org.postgresql.PGConnection)myconn).addDataType("mytype", my.class.name.class);
+   * ...
+   * 
+ * + *

where myconn is an open Connection to org.postgresql.

+ * + *

The handling class must extend org.postgresql.util.PGobject

+ * + * @param type the PostgreSQL type to register + * @param klass the class implementing the Java representation of the type; this class must + * implement {@link org.postgresql.util.PGobject}). + * @throws SQLException if klass does not implement + * {@link org.postgresql.util.PGobject}). + * @see org.postgresql.util.PGobject + * @since 8.0 + */ + void addDataType(String type, Class klass) throws SQLException; + + /** + * Set the default statement reuse threshold before enabling server-side prepare. See + * {@link org.postgresql.PGStatement#setPrepareThreshold(int)} for details. + * + * @param threshold the new threshold + * @since build 302 + */ + void setPrepareThreshold(int threshold); + + /** + * Get the default server-side prepare reuse threshold for statements created from this + * connection. + * + * @return the current threshold + * @since build 302 + */ + int getPrepareThreshold(); + + /** + * Set the default fetch size for statements created from this connection. + * + * @param fetchSize new default fetch size + * @throws SQLException if specified negative fetchSize parameter + * @see Statement#setFetchSize(int) + */ + void setDefaultFetchSize(int fetchSize) throws SQLException; + + /** + * Get the default fetch size for statements created from this connection. + * + * @return current state for default fetch size + * @see PGProperty#DEFAULT_ROW_FETCH_SIZE + * @see Statement#getFetchSize() + */ + int getDefaultFetchSize(); + + /** + * Return the process ID (PID) of the backend server process handling this connection. + * + * @return PID of backend server process. + */ + int getBackendPID(); + + /** + * Sends a query cancellation for this connection. + * @throws SQLException if there are problems cancelling the query + */ + void cancelQuery() throws SQLException; + + /** + * Return the given string suitably quoted to be used as an identifier in an SQL statement string. + * Quotes are added only if necessary (i.e., if the string contains non-identifier characters or + * would be case-folded). Embedded quotes are properly doubled. + * + * @param identifier input identifier + * @return the escaped identifier + * @throws SQLException if something goes wrong + */ + String escapeIdentifier(String identifier) throws SQLException; + + /** + * Return the given string suitably quoted to be used as a string literal in an SQL statement + * string. Embedded single-quotes and backslashes are properly doubled. Note that quote_literal + * returns null on null input. + * + * @param literal input literal + * @return the quoted literal + * @throws SQLException if something goes wrong + */ + String escapeLiteral(String literal) throws SQLException; + + /** + *

Returns the query mode for this connection.

+ * + *

When running in simple query mode, certain features are not available: callable statements, + * partial result set fetch, bytea type, etc.

+ *

The list of supported features is subject to change.

+ * + * @return the preferred query mode + * @see PreferQueryMode + */ + PreferQueryMode getPreferQueryMode(); + + /** + * Connection configuration regarding automatic per-query savepoints. + * + * @return connection configuration regarding automatic per-query savepoints + * @see PGProperty#AUTOSAVE + */ + AutoSave getAutosave(); + + /** + * Configures if connection should use automatic savepoints. + * @param autoSave connection configuration regarding automatic per-query savepoints + * @see PGProperty#AUTOSAVE + */ + void setAutosave(AutoSave autoSave); + + /** + * @return replication API for the current connection + */ + PGReplicationConnection getReplicationAPI(); + + /** + * Change a user's password to the specified new password. + * + *

+ * If the specific encryption type is not specified, this method defaults to querying the database server for the server's default password_encryption. + * This method does not send the new password in plain text to the server. + * Instead, it encrypts the password locally and sends the encoded hash so that the plain text password is never sent on the wire. + *

+ * + *

+ * Acceptable values for encryptionType are null, "md5", or "scram-sha-256". + * Users should avoid "md5" unless they are explicitly targeting an older server that does not support the more secure SCRAM. + *

+ * + * @param user The username of the database user + * @param newPassword The new password for the database user. The implementation will zero + * out the array after use + * @param encryptionType The type of password encryption to use or null if the database server default should be used. + * @throws SQLException If the password could not be altered + */ + default void alterUserPassword(String user, char[] newPassword, String encryptionType) throws SQLException { + try (Statement stmt = ((Connection) this).createStatement()) { + if (encryptionType == null) { + try (ResultSet rs = stmt.executeQuery("SHOW password_encryption")) { + if (!rs.next()) { + throw new PSQLException(GT.tr("Expected a row when reading password_encryption but none was found"), + PSQLState.NO_DATA); + } + encryptionType = rs.getString(1); + if (encryptionType == null) { + throw new PSQLException(GT.tr("SHOW password_encryption returned null value"), + PSQLState.NO_DATA); + } + } + } + String sql = PasswordUtil.genAlterUserPasswordSQL(user, newPassword, encryptionType); + stmt.execute(sql); + } finally { + Arrays.fill(newPassword, (char) 0); + } + } + + /** + *

Returns the current values of all parameters reported by the server.

+ * + *

PostgreSQL reports values for a subset of parameters (GUCs) to the client + * at connect-time, then sends update messages whenever the values change + * during a session. PgJDBC records the latest values and exposes it to client + * applications via getParameterStatuses().

+ * + *

PgJDBC exposes individual accessors for some of these parameters as + * listed below. They are more backwards-compatible and should be preferred + * where possible.

+ * + *

Not all parameters are reported, only those marked + * GUC_REPORT in the source code. The pg_settings + * view does not expose information about which parameters are reportable. + * PgJDBC's map will only contain the parameters the server reports values + * for, so you cannot use this method as a substitute for running a + * SHOW paramname; or SELECT + * current_setting('paramname'); query for arbitrary parameters.

+ * + *

Parameter names are case-insensitive and case-preserving + * in this map, like in PostgreSQL itself. So DateStyle and + * datestyle are the same key.

+ * + *

+ * As of PostgreSQL 11 the reportable parameter list, and related PgJDBC + * interfaces or assessors, are: + *

+ * + *
    + *
  • + * application_name - + * {@link java.sql.Connection#getClientInfo()}, + * {@link java.sql.Connection#setClientInfo(java.util.Properties)} + * and ApplicationName connection property. + *
  • + *
  • + * client_encoding - PgJDBC always sets this to UTF8. + * See allowEncodingChanges connection property. + *
  • + *
  • DateStyle - PgJDBC requires this to always be set to ISO
  • + *
  • standard_conforming_strings - indirectly via {@link #escapeLiteral(String)}
  • + *
  • + * TimeZone - set from JDK timezone see {@link java.util.TimeZone#getDefault()} + * and {@link java.util.TimeZone#setDefault(TimeZone)} + *
  • + *
  • integer_datetimes
  • + *
  • IntervalStyle
  • + *
  • server_encoding
  • + *
  • server_version
  • + *
  • is_superuser
  • + *
  • session_authorization
  • + *
+ * + *

Note that some PgJDBC operations will change server parameters + * automatically.

+ * + * @return unmodifiable map of case-insensitive parameter names to parameter values + * @since 42.2.6 + */ + Map getParameterStatuses(); + + /** + * Shorthand for getParameterStatuses().get(...) . + * + * @param parameterName case-insensitive parameter name + * @return parameter value if defined, or null if no parameter known + * @see #getParameterStatuses + * @since 42.2.6 + */ + String getParameterStatus(String parameterName); + + /** + * Turn on/off adaptive fetch for connection. Existing statements and resultSets won't be affected + * by change here. + * + * @param adaptiveFetch desired state of adaptive fetch. + */ + void setAdaptiveFetch(boolean adaptiveFetch); + + /** + * Get state of adaptive fetch for connection. + * + * @return state of adaptive fetch (turned on or off) + */ + boolean getAdaptiveFetch(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java b/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java new file mode 100644 index 0000000..ac4e611 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +import java.util.HashMap; +import java.util.Map; + +/** + * Some environment variables are intended to have same meaning as libpq describes here: + * https://www.postgresql.org/docs/current/libpq-envars.html + */ +public enum PGEnvironment { + + /** + * Specified location of password file. + */ + ORG_POSTGRESQL_PGPASSFILE( + "org.postgresql.pgpassfile", + null, + "Specified location of password file."), + + /** + * Specified location of password file. + */ + PGPASSFILE( + "PGPASSFILE", + "pgpass", + "Specified location of password file."), + + /** + * The connection service resource (file, url) allows connection parameters to be associated + * with a single service name. + */ + ORG_POSTGRESQL_PGSERVICEFILE( + "org.postgresql.pgservicefile", + null, + "Specifies the service resource to resolve connection properties."), + + /** + * The connection service resource (file, url) allows connection parameters to be associated + * with a single service name. + */ + PGSERVICEFILE( + "PGSERVICEFILE", + "pg_service.conf", + "Specifies the service resource to resolve connection properties."), + + /** + * sets the directory containing the PGSERVICEFILE file and possibly other system-wide + * configuration files. + */ + PGSYSCONFDIR( + "PGSYSCONFDIR", + null, + "Specifies the directory containing the PGSERVICEFILE file"), + ; + + private final String name; + private final String defaultValue; + private final String description; + + PGEnvironment(String name, String defaultValue, String description) { + this.name = name; + this.defaultValue = defaultValue; + this.description = description; + } + + private static final Map PROPS_BY_NAME = new HashMap<>(); + + static { + for (PGEnvironment prop : PGEnvironment.values()) { + if (PROPS_BY_NAME.put(prop.getName(), prop) != null) { + throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName()); + } + } + } + + /** + * Returns the name of the parameter. + * + * @return the name of the parameter + */ + public String getName() { + return name; + } + + /** + * Returns the default value for this parameter. + * + * @return the default value for this parameter or null + */ + public String getDefaultValue() { + return defaultValue; + } + + /** + * Returns the description for this parameter. + * + * @return the description for this parameter + */ + public String getDescription() { + return description; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/PGNotification.java b/pgjdbc/src/main/java/org/postgresql/PGNotification.java new file mode 100644 index 0000000..03c8bb8 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/PGNotification.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +/** + * This interface defines the public PostgreSQL extension for Notifications. + */ +public interface PGNotification { + /** + * Returns name of this notification. + * + * @return name of this notification + * @since 7.3 + */ + String getName(); + + /** + * Returns the process id of the backend process making this notification. + * + * @return process id of the backend process making this notification + * @since 7.3 + */ + int getPID(); + + /** + * Returns additional information from the notifying process. This feature has only been + * implemented in server versions 9.0 and later, so previous versions will always return an empty + * String. + * + * @return additional information from the notifying process + * @since 8.0 + */ + String getParameter(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/PGProperty.java b/pgjdbc/src/main/java/org/postgresql/PGProperty.java new file mode 100644 index 0000000..571ed36 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/PGProperty.java @@ -0,0 +1,1031 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +import org.postgresql.util.DriverInfo; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.Connection; +import java.sql.DriverPropertyInfo; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * All connection parameters that can be either set in JDBC URL, in Driver properties or in + * datasource setters. + */ +public enum PGProperty { + + /** + * Specifies if number of rows, used during fetching rows of a result set, should be computed + * dynamically. Number of rows will be calculated by dividing maxResultBuffer size by max row size + * observed so far, rounded down. First fetch will have number of rows declared in + * defaultRowFetchSize. Number of rows can be limited by adaptiveFetchMinimum and + * adaptiveFetchMaximum. Requires declaring of maxResultBuffer and defaultRowFetchSize to work. + * Default value is false. + */ + ADAPTIVE_FETCH( + "adaptiveFetch", + "false", + "Specifies if number of rows fetched in ResultSet should be adaptive to maxResultBuffer and max row size."), + + /** + * Specifies the highest number of rows which can be calculated by adaptiveFetch. Requires + * adaptiveFetch set to true to work. Default value is -1 (used as infinity). + */ + ADAPTIVE_FETCH_MAXIMUM( + "adaptiveFetchMaximum", + "-1", + "Specifies maximum number of rows used by adaptive fetch."), + + /** + * Specifies the lowest number of rows which can be calculated by adaptiveFetch. Requires + * adaptiveFetch set to true to work. Default value is 0. + */ + ADAPTIVE_FETCH_MINIMUM( + "adaptiveFetchMinimum", + "0", + "Specifies minimum number of rows used by adaptive fetch."), + + /** + * When using the V3 protocol the driver monitors changes in certain server configuration + * parameters that should not be touched by end users. The {@code client_encoding} setting is set + * by the driver and should not be altered. If the driver detects a change it will abort the + * connection. + */ + ALLOW_ENCODING_CHANGES( + "allowEncodingChanges", + "false", + "Allow for changes in client_encoding"), + + /** + * The application name (require server version >= 9.0). + */ + APPLICATION_NAME( + "ApplicationName", + DriverInfo.DRIVER_NAME, + "Name of the Application (backend >= 9.0)"), + + /** + * Assume the server is at least that version. + */ + ASSUME_MIN_SERVER_VERSION( + "assumeMinServerVersion", + null, + "Assume the server is at least that version"), + + /** + * AuthenticationPluginClass + */ + + AUTHENTICATION_PLUGIN_CLASS_NAME( + "authenticationPluginClassName", + null, + "Name of class which implements AuthenticationPlugin" + ), + + /** + * Specifies what the driver should do if a query fails. In {@code autosave=always} mode, JDBC driver sets a savepoint before each query, + * and rolls back to that savepoint in case of failure. In {@code autosave=never} mode (default), no savepoint dance is made ever. + * In {@code autosave=conservative} mode, savepoint is set for each query, however the rollback is done only for rare cases + * like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries + */ + AUTOSAVE( + "autosave", + "never", + "Specifies what the driver should do if a query fails. In autosave=always mode, JDBC driver sets a savepoint before each query, " + + "and rolls back to that savepoint in case of failure. In autosave=never mode (default), no savepoint dance is made ever. " + + "In autosave=conservative mode, safepoint is set for each query, however the rollback is done only for rare cases" + + " like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries", + false, + new String[]{"always", "never", "conservative"}), + + /** + * Use binary format for sending and receiving data if possible. + */ + BINARY_TRANSFER( + "binaryTransfer", + "true", + "Use binary format for sending and receiving data if possible"), + + /** + * Comma separated list of types to disable binary transfer. Either OID numbers or names. + * Overrides values in the driver default set and values set with binaryTransferEnable. + */ + BINARY_TRANSFER_DISABLE( + "binaryTransferDisable", + "", + "Comma separated list of types to disable binary transfer. Either OID numbers or names. Overrides values in the driver default set and values set with binaryTransferEnable."), + + /** + * Comma separated list of types to enable binary transfer. Either OID numbers or names + */ + BINARY_TRANSFER_ENABLE( + "binaryTransferEnable", + "", + "Comma separated list of types to enable binary transfer. Either OID numbers or names"), + + /** + * Cancel command is sent out of band over its own connection, so cancel message can itself get + * stuck. + * This property controls "connect timeout" and "socket timeout" used for cancel commands. + * The timeout is specified in seconds. Default value is 10 seconds. + */ + CANCEL_SIGNAL_TIMEOUT( + "cancelSignalTimeout", + "10", + "The timeout that is used for sending cancel command."), + + /** + * Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not + */ + CLEANUP_SAVEPOINTS( + "cleanupSavepoints", + "false", + "Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not", + false, + new String[]{"true", "false"}), + + /** + *

The timeout value used for socket connect operations. If connecting to the server takes longer + * than this value, the connection is broken.

+ * + *

The timeout is specified in seconds and a value of zero means that it is disabled.

+ */ + CONNECT_TIMEOUT( + "connectTimeout", + "10", + "The timeout value in seconds used for socket connect operations."), + + /** + * Specify the schema (or several schema separated by commas) to be set in the search-path. This schema will be used to resolve + * unqualified object names used in statements over this connection. + */ + CURRENT_SCHEMA( + "currentSchema", + null, + "Specify the schema (or several schema separated by commas) to be set in the search-path"), + + /** + * Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache. + */ + DATABASE_METADATA_CACHE_FIELDS( + "databaseMetadataCacheFields", + "65536", + "Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache."), + + /** + * Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache. + */ + DATABASE_METADATA_CACHE_FIELDS_MIB( + "databaseMetadataCacheFieldsMiB", + "5", + "Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache."), + + /** + * Default parameter for {@link java.sql.Statement#getFetchSize()}. A value of {@code 0} means + * that need fetch all rows at once + */ + DEFAULT_ROW_FETCH_SIZE( + "defaultRowFetchSize", + "0", + "Positive number of rows that should be fetched from the database when more rows are needed for ResultSet by each fetch iteration"), + + /** + * Enable optimization that disables column name sanitiser. + */ + DISABLE_COLUMN_SANITISER( + "disableColumnSanitiser", + "false", + "Enable optimization that disables column name sanitiser"), + + /** + * Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11) + * In {@code escapeSyntaxCallMode=select} mode (the default), the driver always uses a SELECT statement (allowing function invocation only). + * In {@code escapeSyntaxCallMode=callIfNoReturn} mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement. + * In {@code escapeSyntaxCallMode=call} mode, the driver always uses a CALL statement (allowing procedure invocation only). + */ + ESCAPE_SYNTAX_CALL_MODE( + "escapeSyntaxCallMode", + "select", + "Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)" + + "In escapeSyntaxCallMode=select mode (the default), the driver always uses a SELECT statement (allowing function invocation only)." + + "In escapeSyntaxCallMode=callIfNoReturn mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement." + + "In escapeSyntaxCallMode=call mode, the driver always uses a CALL statement (allowing procedure invocation only).", + false, + new String[]{"select", "callIfNoReturn", "call"}), + + /** + * Group startup parameters in a transaction + * This is important in pool-by-transaction scenarios in order to make sure that all the statements + * reaches the same connection that is being initialized. All of the startup parameters will be wrapped + * in a transaction + * Note this is off by default as pgbouncer in statement mode + */ + GROUP_STARTUP_PARAMETERS( + "groupStartupParameters", + "false", + "This is important in pool-by-transaction scenarios in order to make sure that all " + + "the statements reaches the same connection that is being initialized." + ), + + GSS_ENC_MODE( + "gssEncMode", + "allow", + "Force Encoded GSS Mode", + false, + new String[]{"disable", "allow", "prefer", "require"} + ), + + /** + * Force one of + *
    + *
  • SSPI (Windows transparent single-sign-on)
  • + *
  • GSSAPI (Kerberos, via JSSE)
  • + *
+ * to be used when the server requests Kerberos or SSPI authentication. + */ + GSS_LIB( + "gsslib", + "auto", + "Force SSSPI or GSSAPI", + false, + new String[]{"auto", "sspi", "gssapi"}), + + /** + *

After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover + * without a timeout here, the client can wait forever. The pattern for requesting a GSS encrypted connection is the same so we provide the same + * timeout mechanism This timeout will be set before the request and reset after

+ */ + GSS_RESPONSE_TIMEOUT( + "gssResponseTimeout", + "5000", + "Time in milliseconds we wait for a response from the server after requesting a GSS upgrade"), + + + /** + * Enable mode to filter out the names of database objects for which the current user has no privileges + * granted from appearing in the DatabaseMetaData returned by the driver. + */ + HIDE_UNPRIVILEGED_OBJECTS( + "hideUnprivilegedObjects", + "false", + "Enable hiding of database objects for which the current user has no privileges granted from the DatabaseMetaData"), + + HOST_RECHECK_SECONDS( + "hostRecheckSeconds", + "10", + "Specifies period (seconds) after which the host status is checked again in case it has changed"), + + /** + * Specifies the name of the JAAS system or application login configuration. + */ + JAAS_APPLICATION_NAME( + "jaasApplicationName", + "pgjdbc", + "Specifies the name of the JAAS system or application login configuration."), + + /** + * Flag to enable/disable obtaining a GSS credential via JAAS login before authenticating. + * Useful if setting system property javax.security.auth.useSubjectCredsOnly=false + * or using native GSS with system property sun.security.jgss.native=true + */ + JAAS_LOGIN( + "jaasLogin", + "true", + "Login with JAAS before doing GSSAPI authentication"), + + /** + * The Kerberos service name to use when authenticating with GSSAPI. This is equivalent to libpq's + * PGKRBSRVNAME environment variable. + */ + KERBEROS_SERVER_NAME( + "kerberosServerName", + null, + "The Kerberos service name to use when authenticating with GSSAPI."), + + LOAD_BALANCE_HOSTS( + "loadBalanceHosts", + "false", + "If disabled hosts are connected in the given order. If enabled hosts are chosen randomly from the set of suitable candidates"), + + /** + *

If this is set then the client side will bind to this address. This is useful if you need + * to choose which interface to connect to.

+ */ + LOCAL_SOCKET_ADDRESS( + "localSocketAddress", + null, + "Local Socket address, if set bind the client side of the socket to this address"), + + /** + * This property is no longer used by the driver and will be ignored. + * @deprecated Logging is configured via java.util.logging. + */ + @Deprecated + LOGGER_FILE( + "loggerFile", + null, + "File name output of the Logger"), + + /** + * This property is no longer used by the driver and will be ignored. + * @deprecated Logging is configured via java.util.logging. + */ + @Deprecated + LOGGER_LEVEL( + "loggerLevel", + null, + "Logger level of the driver", + false, + new String[]{"OFF", "DEBUG", "TRACE"}), + + /** + * Specify how long to wait for establishment of a database connection. The timeout is specified + * in seconds. + */ + LOGIN_TIMEOUT( + "loginTimeout", + "0", + "Specify how long in seconds to wait for establishment of a database connection."), + + /** + * Whether to include full server error detail in exception messages. + */ + LOG_SERVER_ERROR_DETAIL( + "logServerErrorDetail", + "true", + "Include full server error detail in exception messages. If disabled then only the error itself will be included."), + + /** + * When connections that are not explicitly closed are garbage collected, log the stacktrace from + * the opening of the connection to trace the leak source. + */ + LOG_UNCLOSED_CONNECTIONS( + "logUnclosedConnections", + "false", + "When connections that are not explicitly closed are garbage collected, log the stacktrace from the opening of the connection to trace the leak source"), + + /** + * Specifies size of buffer during fetching result set. Can be specified as specified size or + * percent of heap memory. + */ + MAX_RESULT_BUFFER( + "maxResultBuffer", + null, + "Specifies size of buffer during fetching result set. Can be specified as specified size or percent of heap memory."), + + /** + * Specify 'options' connection initialization parameter. + * The value of this parameter may contain spaces and other special characters or their URL representation. + */ + OPTIONS( + "options", + null, + "Specify 'options' connection initialization parameter."), + + /** + * Password to use when authenticating. + */ + PASSWORD( + "password", + null, + "Password to use when authenticating.", + false), + + /** + * Database name to connect to (may be specified directly in the JDBC URL). + */ + PG_DBNAME( + "PGDBNAME", + null, + "Database name to connect to (may be specified directly in the JDBC URL)", + true), + + /** + * Hostname of the PostgreSQL server (may be specified directly in the JDBC URL). + */ + PG_HOST( + "PGHOST", + "localhost", + "Hostname of the PostgreSQL server (may be specified directly in the JDBC URL)", + false), + + /** + * Port of the PostgreSQL server (may be specified directly in the JDBC URL). + */ + PG_PORT( + "PGPORT", + "5432", + "Port of the PostgreSQL server (may be specified directly in the JDBC URL)"), + + /** + *

Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), + * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, + * extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.

+ * + *

This mode is meant for debugging purposes and/or for cases when extended protocol cannot be used (e.g. logical replication protocol)

+ */ + PREFER_QUERY_MODE( + "preferQueryMode", + "extended", + "Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), " + + "extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, " + + "extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.", false, + new String[]{"extended", "extendedForPrepared", "extendedCacheEverything", "simple"}), + + /** + * Specifies the maximum number of entries in cache of prepared statements. A value of {@code 0} + * disables the cache. + */ + PREPARED_STATEMENT_CACHE_QUERIES( + "preparedStatementCacheQueries", + "256", + "Specifies the maximum number of entries in per-connection cache of prepared statements. A value of {@code 0} disables the cache."), + + /** + * Specifies the maximum size (in megabytes) of the prepared statement cache. A value of {@code 0} + * disables the cache. + */ + PREPARED_STATEMENT_CACHE_SIZE_MIB( + "preparedStatementCacheSizeMiB", + "5", + "Specifies the maximum size (in megabytes) of a per-connection prepared statement cache. A value of {@code 0} disables the cache."), + + /** + * Sets the default threshold for enabling server-side prepare. A value of {@code -1} stands for + * forceBinary + */ + PREPARE_THRESHOLD( + "prepareThreshold", + "5", + "Statement prepare threshold. A value of {@code -1} stands for forceBinary"), + + /** + * Force use of a particular protocol version when connecting, if set, disables protocol version + * fallback. + */ + PROTOCOL_VERSION( + "protocolVersion", + null, + "Force use of a particular protocol version when connecting, currently only version 3 is supported.", + false, + new String[]{"3"}), + + /** + * Quote returning columns. + * There are some ORM's that quote everything, including returning columns + * If we quote them, then we end up sending ""colname"" to the backend + * which will not be found + */ + QUOTE_RETURNING_IDENTIFIERS( + "quoteReturningIdentifiers", + "true", + "Quote identifiers provided in returning array", + false), + /** + * Puts this connection in read-only mode. + */ + READ_ONLY( + "readOnly", + "false", + "Puts this connection in read-only mode"), + + /** + * Connection parameter to control behavior when + * {@link Connection#setReadOnly(boolean)} is set to {@code true}. + */ + READ_ONLY_MODE( + "readOnlyMode", + "transaction", + "Controls the behavior when a connection is set to be read only, one of 'ignore', 'transaction', or 'always' " + + "When 'ignore', setting readOnly has no effect. " + + "When 'transaction' setting readOnly to 'true' will cause transactions to BEGIN READ ONLY if autocommit is 'false'. " + + "When 'always' setting readOnly to 'true' will set the session to READ ONLY if autoCommit is 'true' " + + "and the transaction to BEGIN READ ONLY if autocommit is 'false'.", + false, + new String[]{"ignore", "transaction", "always"}), + + /** + * Socket read buffer size (SO_RECVBUF). A value of {@code -1}, which is the default, means system + * default. + */ + RECEIVE_BUFFER_SIZE( + "receiveBufferSize", + "-1", + "Socket read buffer size"), + + /** + *

Connection parameter passed in the startup message. This parameter accepts two values; "true" + * and "database". Passing "true" tells the backend to go into walsender mode, wherein a small set + * of replication commands can be issued instead of SQL statements. Only the simple query protocol + * can be used in walsender mode. Passing "database" as the value instructs walsender to connect + * to the database specified in the dbname parameter, which will allow the connection to be used + * for logical replication from that database.

+ *

Parameter should be use together with {@link PGProperty#ASSUME_MIN_SERVER_VERSION} with + * parameter >= 9.4 (backend >= 9.4)

+ */ + REPLICATION( + "replication", + null, + "Connection parameter passed in startup message, one of 'true' or 'database' " + + "Passing 'true' tells the backend to go into walsender mode, " + + "wherein a small set of replication commands can be issued instead of SQL statements. " + + "Only the simple query protocol can be used in walsender mode. " + + "Passing 'database' as the value instructs walsender to connect " + + "to the database specified in the dbname parameter, " + + "which will allow the connection to be used for logical replication " + + "from that database. " + + "(backend >= 9.4)"), + + /** + * Configure optimization to enable batch insert re-writing. + */ + REWRITE_BATCHED_INSERTS( + "reWriteBatchedInserts", + "false", + "Enable optimization to rewrite and collapse compatible INSERT statements that are batched."), + + /** + * Socket write buffer size (SO_SNDBUF). A value of {@code -1}, which is the default, means system + * default. + */ + SEND_BUFFER_SIZE( + "sendBufferSize", + "-1", + "Socket write buffer size"), + + /** + * Service name to use for additional parameters. It specifies a service name in "pg_service + * .conf" that holds additional connection parameters. This allows applications to specify only + * a service name so connection parameters can be centrally maintained. + */ + SERVICE( + "service", + null, + "Service name to be searched in pg_service.conf resource"), + + /** + * Socket factory used to create socket. A null value, which is the default, means system default. + */ + SOCKET_FACTORY( + "socketFactory", + null, + "Specify a socket factory for socket creation"), + + /** + * The String argument to give to the constructor of the Socket Factory. + */ + SOCKET_FACTORY_ARG( + "socketFactoryArg", + null, + "Argument forwarded to constructor of SocketFactory class."), + + /** + * The timeout value used for socket read operations. If reading from the server takes longer than + * this value, the connection is closed. This can be used as both a brute force global query + * timeout and a method of detecting network problems. The timeout is specified in seconds and a + * value of zero means that it is disabled. + */ + SOCKET_TIMEOUT( + "socketTimeout", + "0", + "The timeout value in seconds max(2147484) used for socket read operations."), + + /** + * Control use of SSL: empty or {@code true} values imply {@code sslmode==verify-full} + */ + SSL( + "ssl", + null, + "Control use of SSL (any non-null value causes SSL to be required)"), + + /** + * File containing the SSL Certificate. Default will be the file {@code postgresql.crt} in {@code + * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows). + */ + SSL_CERT( + "sslcert", + null, + "The location of the client's SSL certificate"), + + /** + * Classname of the SSL Factory to use (instance of {@link javax.net.ssl.SSLSocketFactory}). + */ + SSL_FACTORY( + "sslfactory", + "org.postgresql.ssl.LibPQFactory", + "Provide a SSLSocketFactory class when using SSL."), + + /** + * The String argument to give to the constructor of the SSL Factory. + * @deprecated use {@code ..Factory(Properties)} constructor. + */ + @Deprecated + SSL_FACTORY_ARG( + "sslfactoryarg", + null, + "Argument forwarded to constructor of SSLSocketFactory class."), + + /** + * Classname of the SSL HostnameVerifier to use (instance of {@link javax.net.ssl.HostnameVerifier}). + */ + SSL_HOSTNAME_VERIFIER( + "sslhostnameverifier", + null, + "A class, implementing javax.net.ssl.HostnameVerifier that can verify the server"), + + /** + * File containing the SSL Key. Default will be the file {@code postgresql.pk8} in {@code $HOME/.postgresql} (*nix) + * or {@code %APPDATA%\postgresql} (windows). + */ + SSL_KEY( + "sslkey", + null, + "The location of the client's PKCS#8 SSL key"), + + /** + * Parameter governing the use of SSL. The allowed values are {@code disable}, {@code allow}, + * {@code prefer}, {@code require}, {@code verify-ca}, {@code verify-full}. + * If {@code ssl} property is empty or set to {@code true} it implies {@code verify-full}. + * Default mode is "require" + */ + SSL_MODE( + "sslmode", + null, + "Parameter governing the use of SSL", + false, + new String[]{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}), + + /** + * The SSL password to use in the default CallbackHandler. + */ + SSL_PASSWORD( + "sslpassword", + null, + "The password for the client's ssl key (ignored if sslpasswordcallback is set)"), + + + /** + * The classname instantiating {@link javax.security.auth.callback.CallbackHandler} to use. + */ + SSL_PASSWORD_CALLBACK( + "sslpasswordcallback", + null, + "A class, implementing javax.security.auth.callback.CallbackHandler that can handle PasswordCallback for the ssl password."), + + /** + *

After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover + * without a timeout here, the client can wait forever. This timeout will be set before the request and reset after

+ */ + SSL_RESPONSE_TIMEOUT( + "sslResponseTimeout", + "5000", + "Time in milliseconds we wait for a response from the server after requesting SSL upgrade"), + + /** + * File containing the root certificate when validating server ({@code sslmode} = {@code + * verify-ca} or {@code verify-full}). Default will be the file {@code root.crt} in {@code + * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows). + */ + SSL_ROOT_CERT( + "sslrootcert", + null, + "The location of the root certificate for authenticating the server."), + + /** + * Specifies the name of the SSPI service class that forms the service class part of the SPN. The + * default, {@code POSTGRES}, is almost always correct. + */ + SSPI_SERVICE_CLASS( + "sspiServiceClass", + "POSTGRES", + "The Windows SSPI service class for SPN"), + + /** + * Bind String to either {@code unspecified} or {@code varchar}. Default is {@code varchar} for + * 8.0+ backends. + */ + STRING_TYPE( + "stringtype", + null, + "The type to bind String parameters as (usually 'varchar', 'unspecified' allows implicit casting to other types)", + false, + new String[]{"unspecified", "varchar"}), + + TARGET_SERVER_TYPE( + "targetServerType", + "any", + "Specifies what kind of server to connect", + false, + new String []{"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary", "preferPrimary"}), + + /** + * Enable or disable TCP keep-alive. The default is {@code false}. + */ + TCP_KEEP_ALIVE( + "tcpKeepAlive", + "false", + "Enable or disable TCP keep-alive. The default is {@code false}."), + + TCP_NO_DELAY( + "tcpNoDelay", + "true", + "Enable or disable TCP no delay. The default is (@code true}." + ), + /** + * Specifies the length to return for types of unknown length. + */ + UNKNOWN_LENGTH( + "unknownLength", + Integer.toString(Integer.MAX_VALUE), + "Specifies the length to return for types of unknown length"), + + /** + * Username to connect to the database as. + */ + USER( + "user", + null, + "Username to connect to the database as.", + true), + + /** + * Use SPNEGO in SSPI authentication requests. + */ + USE_SPNEGO( + "useSpnego", + "false", + "Use SPNEGO in SSPI authentication requests"), + + /** + * Factory class to instantiate factories for XML processing. + * The default factory disables external entity processing. + * Legacy behavior with external entity processing can be enabled by specifying a value of LEGACY_INSECURE. + * Or specify a custom class that implements {@link org.postgresql.xml.PGXmlFactoryFactory}. + */ + XML_FACTORY_FACTORY( + "xmlFactoryFactory", + "", + "Factory class to instantiate factories for XML processing"), + + ; + + private final String name; + private final String defaultValue; + private final boolean required; + private final String description; + private final String [] choices; + + PGProperty(String name, String defaultValue, String description) { + this(name, defaultValue, description, false); + } + + PGProperty(String name, String defaultValue, String description, boolean required) { + this(name, defaultValue, description, required, (String[]) null); + } + + PGProperty(String name, String defaultValue, String description, boolean required, + String [] choices) { + this.name = name; + this.defaultValue = defaultValue; + this.required = required; + this.description = description; + this.choices = choices; + } + + private static final Map PROPS_BY_NAME = new HashMap<>(); + + static { + for (PGProperty prop : PGProperty.values()) { + if (PROPS_BY_NAME.put(prop.getName(), prop) != null) { + throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName()); + } + } + } + + /** + * Returns the name of the connection parameter. The name is the key that must be used in JDBC URL + * or in Driver properties + * + * @return the name of the connection parameter + */ + public String getName() { + return name; + } + + /** + * Returns the default value for this connection parameter. + * + * @return the default value for this connection parameter or null + */ + public String getDefaultValue() { + return defaultValue; + } + + /** + * Returns whether this parameter is required. + * + * @return whether this parameter is required + */ + public boolean isRequired() { + return required; + } + + /** + * Returns the description for this connection parameter. + * + * @return the description for this connection parameter + */ + public String getDescription() { + return description; + } + + /** + * Returns the available values for this connection parameter. + * + * @return the available values for this connection parameter or null + */ + public String [] getChoices() { + return choices; + } + + /** + * Returns the value of the connection parameter from the given {@link Properties} or the + * default value. + * + * @param properties properties to take actual value from + * @return evaluated value for this connection parameter + */ + public String getOrDefault(Properties properties) { + return properties.getProperty(name, defaultValue); + } + + /** + * Returns the value of the connection parameter from the given {@link Properties} or the + * default value + * @param properties properties to take actual value from + * @return evaluated value for this connection parameter or null + * @deprecated use {@link #getOrDefault(Properties)} instead + */ + @Deprecated + public String get(Properties properties) { + return getOrDefault(properties); + } + + /** + * Returns the value of the connection parameter from the given {@link Properties} or null if there + * is no default value + * @param properties properties object to get value from + * @return evaluated value for this connection parameter + */ + public String getOrNull(Properties properties) { + return properties.getProperty(name); + } + + /** + * Set the value for this connection parameter in the given {@link Properties}. + * + * @param properties properties in which the value should be set + * @param value value for this connection parameter + */ + public void set(Properties properties, String value) { + if (value == null) { + properties.remove(name); + } else { + properties.setProperty(name, value); + } + } + + /** + * Return the boolean value for this connection parameter in the given {@link Properties}. + * + * @param properties properties to take actual value from + * @return evaluated value for this connection parameter converted to boolean + */ + public boolean getBoolean(Properties properties) { + return Boolean.parseBoolean(getOrDefault(properties)); + } + + /** + * Return the int value for this connection parameter in the given {@link Properties}. Prefer the + * use of {@link #getInt(Properties)} anywhere you can throw an {@link java.sql.SQLException}. + * + * @param properties properties to take actual value from + * @return evaluated value for this connection parameter converted to int + * @throws NumberFormatException if it cannot be converted to int. + */ + @SuppressWarnings("nullness:argument") + public int getIntNoCheck(Properties properties) { + String value = getOrDefault(properties); + return Integer.parseInt(value); + } + + /** + * Return the int value for this connection parameter in the given {@link Properties}. + * + * @param properties properties to take actual value from + * @return evaluated value for this connection parameter converted to int + * @throws PSQLException if it cannot be converted to int. + */ + @SuppressWarnings("nullness:argument") + public int getInt(Properties properties) throws PSQLException { + String value = getOrDefault(properties); + try { + return Integer.parseInt(value); + } catch (NumberFormatException nfe) { + throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}", + getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe); + } + } + + /** + * Return the {@link Integer} value for this connection parameter in the given {@link Properties}. + * + * @param properties properties to take actual value from + * @return evaluated value for this connection parameter converted to Integer or null + * @throws PSQLException if unable to parse property as integer + */ + public Integer getInteger(Properties properties) throws PSQLException { + String value = getOrDefault(properties); + if (value == null) { + return null; + } + try { + return Integer.parseInt(value); + } catch (NumberFormatException nfe) { + throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}", + getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe); + } + } + + /** + * Set the boolean value for this connection parameter in the given {@link Properties}. + * + * @param properties properties in which the value should be set + * @param value boolean value for this connection parameter + */ + public void set(Properties properties, boolean value) { + properties.setProperty(name, Boolean.toString(value)); + } + + /** + * Set the int value for this connection parameter in the given {@link Properties}. + * + * @param properties properties in which the value should be set + * @param value int value for this connection parameter + */ + public void set(Properties properties, int value) { + properties.setProperty(name, Integer.toString(value)); + } + + /** + * Test whether this property is present in the given {@link Properties}. + * + * @param properties set of properties to check current in + * @return true if the parameter is specified in the given properties + */ + public boolean isPresent(Properties properties) { + return getSetString(properties) != null; + } + + /** + * Convert this connection parameter and the value read from the given {@link Properties} into a + * {@link DriverPropertyInfo}. + * + * @param properties properties to take actual value from + * @return a DriverPropertyInfo representing this connection parameter + */ + public DriverPropertyInfo toDriverPropertyInfo(Properties properties) { + DriverPropertyInfo propertyInfo = new DriverPropertyInfo(name, getOrDefault(properties)); + propertyInfo.required = required; + propertyInfo.description = description; + propertyInfo.choices = choices; + return propertyInfo; + } + + public static PGProperty forName(String name) { + return PROPS_BY_NAME.get(name); + } + + /** + * Return the property if exists but avoiding the default. Allowing the caller to detect the lack + * of a property. + * + * @param properties properties bundle + * @return the value of a set property + */ + public String getSetString(Properties properties) { + Object o = properties.get(name); + if (o instanceof String) { + return (String) o; + } + return null; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java b/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java new file mode 100644 index 0000000..8fc678b --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +/** + * A ref cursor based result set. + * + * @deprecated As of 8.0, this interface is only present for backwards- compatibility purposes. New + * code should call getString() on the ResultSet that contains the refcursor to obtain + * the underlying cursor name. + */ +@Deprecated +public interface PGRefCursorResultSet { + + /** + * @return the name of the cursor. + * @deprecated As of 8.0, replaced with calling getString() on the ResultSet that this ResultSet + * was obtained from. + */ + @Deprecated + String getRefCursor(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java b/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java new file mode 100644 index 0000000..b0575cc --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +import org.postgresql.core.Field; + +import java.sql.SQLException; + +public interface PGResultSetMetaData { + + /** + * Returns the underlying column name of a query result, or "" if it is unable to be determined. + * + * @param column column position (1-based) + * @return underlying column name of a query result + * @throws SQLException if something wrong happens + * @since 8.0 + */ + String getBaseColumnName(int column) throws SQLException; + + /** + * Returns the underlying table name of query result, or "" if it is unable to be determined. + * + * @param column column position (1-based) + * @return underlying table name of query result + * @throws SQLException if something wrong happens + * @since 8.0 + */ + String getBaseTableName(int column) throws SQLException; + + /** + * Returns the underlying schema name of query result, or "" if it is unable to be determined. + * + * @param column column position (1-based) + * @return underlying schema name of query result + * @throws SQLException if something wrong happens + * @since 8.0 + */ + String getBaseSchemaName(int column) throws SQLException; + + /** + * Is a column Text or Binary? + * + * @param column column position (1-based) + * @return 0 if column data format is TEXT, or 1 if BINARY + * @throws SQLException if something wrong happens + * @see Field#BINARY_FORMAT + * @see Field#TEXT_FORMAT + * @since 9.4 + */ + int getFormat(int column) throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/PGStatement.java b/pgjdbc/src/main/java/org/postgresql/PGStatement.java new file mode 100644 index 0000000..8a79ba9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/PGStatement.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql; + +import java.sql.SQLException; + +/** + * This interface defines the public PostgreSQL extensions to java.sql.Statement. All Statements + * constructed by the PostgreSQL driver implement PGStatement. + */ +public interface PGStatement { + // We can't use Long.MAX_VALUE or Long.MIN_VALUE for java.sql.date + // because this would break the 'normalization contract' of the + // java.sql.Date API. + // The follow values are the nearest MAX/MIN values with hour, + // minute, second, millisecond set to 0 - this is used for + // -infinity / infinity representation in Java + long DATE_POSITIVE_INFINITY = 9223372036825200000L; + long DATE_NEGATIVE_INFINITY = -9223372036832400000L; + long DATE_POSITIVE_SMALLER_INFINITY = 185543533774800000L; + long DATE_NEGATIVE_SMALLER_INFINITY = -185543533774800000L; + + /** + * Returns the Last inserted/updated oid. + * + * @return OID of last insert + * @throws SQLException if something goes wrong + * @since 7.3 + */ + long getLastOID() throws SQLException; + + /** + * Turn on the use of prepared statements in the server (server side prepared statements are + * unrelated to jdbc PreparedStatements) As of build 302, this method is equivalent to + * setPrepareThreshold(1). + * + * @param flag use server prepare + * @throws SQLException if something goes wrong + * @since 7.3 + * @deprecated As of build 302, replaced by {@link #setPrepareThreshold(int)} + */ + @Deprecated + void setUseServerPrepare(boolean flag) throws SQLException; + + /** + * Checks if this statement will be executed as a server-prepared statement. A return value of + * true indicates that the next execution of the statement will be done as a + * server-prepared statement, assuming the underlying protocol supports it. + * + * @return true if the next reuse of this statement will use a server-prepared statement + */ + boolean isUseServerPrepare(); + + /** + *

Sets the reuse threshold for using server-prepared statements.

+ * + *

If threshold is a non-zero value N, the Nth and subsequent reuses of a + * PreparedStatement will use server-side prepare.

+ * + *

If threshold is zero, server-side prepare will not be used.

+ * + *

The reuse threshold is only used by PreparedStatement and CallableStatement objects; it is + * ignored for plain Statements.

+ * + * @param threshold the new threshold for this statement + * @throws SQLException if an exception occurs while changing the threshold + * @since build 302 + */ + void setPrepareThreshold(int threshold) throws SQLException; + + /** + * Gets the server-side prepare reuse threshold in use for this statement. + * + * @return the current threshold + * @see #setPrepareThreshold(int) + * @since build 302 + */ + int getPrepareThreshold(); + + /** + * Turn on/off adaptive fetch for statement. Existing resultSets won't be affected by change + * here. + * + * @param adaptiveFetch desired state of adaptive fetch. + */ + void setAdaptiveFetch(boolean adaptiveFetch); + + /** + * Get state of adaptive fetch for statement. + * + * @return state of adaptive fetch (turned on or off) + */ + boolean getAdaptiveFetch(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyDual.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyDual.java new file mode 100644 index 0000000..c5db1df --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyDual.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.copy; + +/** + * Bidirectional via copy stream protocol. Via bidirectional copy protocol work PostgreSQL + * replication. + * + * @see CopyIn + * @see CopyOut + */ +public interface CopyDual extends CopyIn, CopyOut { +} diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java new file mode 100644 index 0000000..b0cd5b4 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.copy; + +import org.postgresql.util.ByteStreamWriter; + +import java.sql.SQLException; + +/** + * Copy bulk data from client into a PostgreSQL table very fast. + */ +public interface CopyIn extends CopyOperation { + + /** + * Writes specified part of given byte array to an open and writable copy operation. + * + * @param buf array of bytes to write + * @param off offset of first byte to write (normally zero) + * @param siz number of bytes to write (normally buf.length) + * @throws SQLException if the operation fails + */ + void writeToCopy(byte[] buf, int off, int siz) throws SQLException; + + /** + * Writes a ByteStreamWriter to an open and writable copy operation. + * + * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter + * @throws SQLException if the operation fails + */ + void writeToCopy(ByteStreamWriter from) throws SQLException; + + /** + * Force any buffered output to be sent over the network to the backend. In general this is a + * useless operation as it will get pushed over in due time or when endCopy is called. Some + * specific modified server versions (Truviso) want this data sooner. If you are unsure if you + * need to use this method, don't. + * + * @throws SQLException if the operation fails. + */ + void flushCopy() throws SQLException; + + /** + * Finishes copy operation successfully. + * + * @return number of updated rows for server 8.2 or newer (see getHandledRowCount()) + * @throws SQLException if the operation fails. + */ + long endCopy() throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java new file mode 100644 index 0000000..8849f19 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.copy; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.Encoding; +import org.postgresql.core.QueryExecutor; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Reader; +import java.io.Writer; +import java.sql.SQLException; + +/** + * API for PostgreSQL COPY bulk data transfer. + */ +public class CopyManager { + // I don't know what the best buffer size is, so we let people specify it if + // they want, and if they don't know, we don't make them guess, so that if we + // do figure it out we can just set it here and they reap the rewards. + // Note that this is currently being used for both a number of bytes and a number + // of characters. + static final int DEFAULT_BUFFER_SIZE = 65536; + + private final Encoding encoding; + private final QueryExecutor queryExecutor; + private final BaseConnection connection; + + public CopyManager(BaseConnection connection) throws SQLException { + this.encoding = connection.getEncoding(); + this.queryExecutor = connection.getQueryExecutor(); + this.connection = connection; + } + + public CopyIn copyIn(String sql) throws SQLException { + CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit()); + if (op == null || op instanceof CopyIn) { + return (CopyIn) op; + } else { + op.cancelCopy(); + throw new PSQLException(GT.tr("Requested CopyIn but got {0}", op.getClass().getName()), + PSQLState.WRONG_OBJECT_TYPE); + } + } + + public CopyOut copyOut(String sql) throws SQLException { + CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit()); + if (op == null || op instanceof CopyOut) { + return (CopyOut) op; + } else { + op.cancelCopy(); + throw new PSQLException(GT.tr("Requested CopyOut but got {0}", op.getClass().getName()), + PSQLState.WRONG_OBJECT_TYPE); + } + } + + public CopyDual copyDual(String sql) throws SQLException { + CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit()); + if (op == null || op instanceof CopyDual) { + return (CopyDual) op; + } else { + op.cancelCopy(); + throw new PSQLException(GT.tr("Requested CopyDual but got {0}", op.getClass().getName()), + PSQLState.WRONG_OBJECT_TYPE); + } + } + + /** + * Pass results of a COPY TO STDOUT query from database into a Writer. + * + * @param sql COPY TO STDOUT statement + * @param to the Writer to write the results to (row by row). + * The Writer is not closed at the end of the Copy Out operation. + * @return number of rows updated for server 8.2 or newer; -1 for older + * @throws SQLException on database usage errors + * @throws IOException upon writer or database connection failure + */ + public long copyOut(final String sql, Writer to) throws SQLException, IOException { + byte[] buf; + CopyOut cp = copyOut(sql); + try { + while ((buf = cp.readFromCopy()) != null) { + to.write(encoding.decode(buf)); + } + return cp.getHandledRowCount(); + } catch (IOException ioEX) { + // if not handled this way the close call will hang, at least in 8.2 + if (cp.isActive()) { + cp.cancelCopy(); + } + try { // read until exhausted or operation cancelled SQLException + while ((buf = cp.readFromCopy()) != null) { + } + } catch (SQLException sqlEx) { + } // typically after several kB + throw ioEX; + } finally { // see to it that we do not leave the connection locked + if (cp.isActive()) { + cp.cancelCopy(); + } + } + } + + /** + * Pass results of a COPY TO STDOUT query from database into an OutputStream. + * + * @param sql COPY TO STDOUT statement + * @param to the stream to write the results to (row by row) + * The stream is not closed at the end of the operation. This is intentional so the + * caller can continue to write to the output stream + * @return number of rows updated for server 8.2 or newer; -1 for older + * @throws SQLException on database usage errors + * @throws IOException upon output stream or database connection failure + */ + public long copyOut(final String sql, OutputStream to) throws SQLException, IOException { + byte[] buf; + CopyOut cp = copyOut(sql); + try { + while ((buf = cp.readFromCopy()) != null) { + to.write(buf); + } + return cp.getHandledRowCount(); + } catch (IOException ioEX) { + // if not handled this way the close call will hang, at least in 8.2 + if (cp.isActive()) { + cp.cancelCopy(); + } + try { // read until exhausted or operation cancelled SQLException + while ((buf = cp.readFromCopy()) != null) { + } + } catch (SQLException sqlEx) { + } // typically after several kB + throw ioEX; + } finally { // see to it that we do not leave the connection locked + if (cp.isActive()) { + cp.cancelCopy(); + } + } + } + + /** + * Use COPY FROM STDIN for very fast copying from a Reader into a database table. + * + * @param sql COPY FROM STDIN statement + * @param from a CSV file or such + * @return number of rows updated for server 8.2 or newer; -1 for older + * @throws SQLException on database usage issues + * @throws IOException upon reader or database connection failure + */ + public long copyIn(final String sql, Reader from) throws SQLException, IOException { + return copyIn(sql, from, DEFAULT_BUFFER_SIZE); + } + + /** + * Use COPY FROM STDIN for very fast copying from a Reader into a database table. + * + * @param sql COPY FROM STDIN statement + * @param from a CSV file or such + * @param bufferSize number of characters to buffer and push over network to server at once + * @return number of rows updated for server 8.2 or newer; -1 for older + * @throws SQLException on database usage issues + * @throws IOException upon reader or database connection failure + */ + public long copyIn(final String sql, Reader from, int bufferSize) + throws SQLException, IOException { + char[] cbuf = new char[bufferSize]; + int len; + CopyIn cp = copyIn(sql); + try { + while ((len = from.read(cbuf)) >= 0) { + if (len > 0) { + byte[] buf = encoding.encode(new String(cbuf, 0, len)); + cp.writeToCopy(buf, 0, buf.length); + } + } + return cp.endCopy(); + } finally { // see to it that we do not leave the connection locked + if (cp.isActive()) { + cp.cancelCopy(); + } + } + } + + /** + * Use COPY FROM STDIN for very fast copying from an InputStream into a database table. + * + * @param sql COPY FROM STDIN statement + * @param from a CSV file or such + * @return number of rows updated for server 8.2 or newer; -1 for older + * @throws SQLException on database usage issues + * @throws IOException upon input stream or database connection failure + */ + public long copyIn(final String sql, InputStream from) throws SQLException, IOException { + return copyIn(sql, from, DEFAULT_BUFFER_SIZE); + } + + /** + * Use COPY FROM STDIN for very fast copying from an InputStream into a database table. + * + * @param sql COPY FROM STDIN statement + * @param from a CSV file or such + * @param bufferSize number of bytes to buffer and push over network to server at once + * @return number of rows updated for server 8.2 or newer; -1 for older + * @throws SQLException on database usage issues + * @throws IOException upon input stream or database connection failure + */ + public long copyIn(final String sql, InputStream from, int bufferSize) + throws SQLException, IOException { + byte[] buf = new byte[bufferSize]; + int len; + CopyIn cp = copyIn(sql); + try { + while ((len = from.read(buf)) >= 0) { + if (len > 0) { + cp.writeToCopy(buf, 0, len); + } + } + return cp.endCopy(); + } finally { // see to it that we do not leave the connection locked + if (cp.isActive()) { + cp.cancelCopy(); + } + } + } + + /** + * Use COPY FROM STDIN for very fast copying from an ByteStreamWriter into a database table. + * + * @param sql COPY FROM STDIN statement + * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter + * @return number of rows updated for server 8.2 or newer; -1 for older + * @throws SQLException on database usage issues + * @throws IOException upon input stream or database connection failure + */ + public long copyIn(String sql, ByteStreamWriter from) + throws SQLException, IOException { + CopyIn cp = copyIn(sql); + try { + cp.writeToCopy(from); + return cp.endCopy(); + } finally { // see to it that we do not leave the connection locked + if (cp.isActive()) { + cp.cancelCopy(); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java new file mode 100644 index 0000000..239c629 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.copy; + +import java.sql.SQLException; + +/** + * Exchange bulk data between client and PostgreSQL database tables. See CopyIn and CopyOut for full + * interfaces for corresponding copy directions. + */ +public interface CopyOperation { + + /** + * @return number of fields in each row for this operation + */ + int getFieldCount(); + + /** + * @return overall format of each row: 0 = textual, 1 = binary + */ + int getFormat(); + + /** + * @param field number of field (0..fieldCount()-1) + * @return format of requested field: 0 = textual, 1 = binary + */ + int getFieldFormat(int field); + + /** + * @return is connection reserved for this Copy operation? + */ + boolean isActive(); + + /** + * Cancels this copy operation, discarding any exchanged data. + * + * @throws SQLException if cancelling fails + */ + void cancelCopy() throws SQLException; + + /** + * After successful end of copy, returns the number of database records handled in that operation. + * Only implemented in PostgreSQL server version 8.2 and up. Otherwise, returns -1. + * + * @return number of handled rows or -1 + */ + long getHandledRowCount(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java new file mode 100644 index 0000000..e7918e1 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.copy; + +import java.sql.SQLException; + +public interface CopyOut extends CopyOperation { + /** + * Blocks wait for a row of data to be received from server on an active copy operation. + * + * @return byte array received from server, null if server complete copy operation + * @throws SQLException if something goes wrong for example socket timeout + */ + byte [] readFromCopy() throws SQLException; + + /** + * Wait for a row of data to be received from server on an active copy operation. + * + * @param block {@code true} if need wait data from server otherwise {@code false} and will read + * pending message from server + * @return byte array received from server, if pending message from server absent and use no + * blocking mode return null + * @throws SQLException if something goes wrong for example socket timeout + */ + byte [] readFromCopy(boolean block) throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java new file mode 100644 index 0000000..aefd13a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.copy; + +import org.postgresql.PGConnection; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.io.InputStream; +import java.sql.SQLException; +import java.util.Arrays; + +/** + * InputStream for reading from a PostgreSQL COPY TO STDOUT operation. + */ +public class PGCopyInputStream extends InputStream implements CopyOut { + private CopyOut op; + private byte [] buf; + private int at; + private int len; + + /** + * Uses given connection for specified COPY TO STDOUT operation. + * + * @param connection database connection to use for copying (protocol version 3 required) + * @param sql COPY TO STDOUT statement + * @throws SQLException if initializing the operation fails + */ + public PGCopyInputStream(PGConnection connection, String sql) throws SQLException { + this(connection.getCopyAPI().copyOut(sql)); + } + + /** + * Use given CopyOut operation for reading. + * + * @param op COPY TO STDOUT operation + */ + public PGCopyInputStream(CopyOut op) { + this.op = op; + } + + private CopyOut getOp() { + return op; + } + + private byte [] fillBuffer() throws IOException { + if (at >= len) { + try { + buf = getOp().readFromCopy(); + } catch (SQLException sqle) { + throw new IOException(GT.tr("Copying from database failed: {0}", sqle.getMessage()), sqle); + } + if (buf == null) { + at = -1; + } else { + at = 0; + len = buf.length; + } + } + return buf; + } + + private void checkClosed() throws IOException { + if (op == null) { + throw new IOException(GT.tr("This copy stream is closed.")); + } + } + + @Override + public int available() throws IOException { + checkClosed(); + return buf != null ? len - at : 0; + } + + @Override + public int read() throws IOException { + checkClosed(); + byte[] buf = fillBuffer(); + return buf != null ? (buf[at++] & 0xFF) : -1; + } + + @Override + public int read(byte[] buf) throws IOException { + return read(buf, 0, buf.length); + } + + @Override + public int read(byte[] buf, int off, int siz) throws IOException { + checkClosed(); + int got = 0; + byte[] data = fillBuffer(); + for (; got < siz && data != null; data = fillBuffer()) { + int length = Math.min(siz - got, len - at); + System.arraycopy(data, at, buf, off + got, length); + at += length; + got += length; + } + return got == 0 && data == null ? -1 : got; + } + + @Override + public byte [] readFromCopy() throws SQLException { + byte[] result = null; + try { + byte[] buf = fillBuffer(); + if (buf != null) { + if (at > 0 || len < buf.length) { + result = Arrays.copyOfRange(buf, at, len); + } else { + result = buf; + } + // Mark the buffer as fully read + at = len; + } + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Read from copy failed."), PSQLState.CONNECTION_FAILURE, ioe); + } + return result; + } + + @Override + public byte [] readFromCopy(boolean block) throws SQLException { + return readFromCopy(); + } + + @Override + public void close() throws IOException { + // Don't complain about a double close. + CopyOut op = this.op; + if (op == null) { + return; + } + + if (op.isActive()) { + try { + op.cancelCopy(); + } catch (SQLException se) { + throw new IOException("Failed to close copy reader.", se); + } + } + this.op = null; + } + + @Override + public void cancelCopy() throws SQLException { + getOp().cancelCopy(); + } + + @Override + public int getFormat() { + return getOp().getFormat(); + } + + @Override + public int getFieldFormat(int field) { + return getOp().getFieldFormat(field); + } + + @Override + public int getFieldCount() { + return getOp().getFieldCount(); + } + + @Override + public boolean isActive() { + return op != null && op.isActive(); + } + + @Override + public long getHandledRowCount() { + return getOp().getHandledRowCount(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java new file mode 100644 index 0000000..322a5a9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.copy; + +import org.postgresql.PGConnection; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; + +import java.io.IOException; +import java.io.OutputStream; +import java.sql.SQLException; + +/** + * OutputStream for buffered input into a PostgreSQL COPY FROM STDIN operation. + */ +public class PGCopyOutputStream extends OutputStream implements CopyIn { + private CopyIn op; + private final byte[] copyBuffer; + private final byte[] singleByteBuffer = new byte[1]; + private int at; + + /** + * Uses given connection for specified COPY FROM STDIN operation. + * + * @param connection database connection to use for copying (protocol version 3 required) + * @param sql COPY FROM STDIN statement + * @throws SQLException if initializing the operation fails + */ + public PGCopyOutputStream(PGConnection connection, String sql) throws SQLException { + this(connection, sql, CopyManager.DEFAULT_BUFFER_SIZE); + } + + /** + * Uses given connection for specified COPY FROM STDIN operation. + * + * @param connection database connection to use for copying (protocol version 3 required) + * @param sql COPY FROM STDIN statement + * @param bufferSize try to send this many bytes at a time + * @throws SQLException if initializing the operation fails + */ + public PGCopyOutputStream(PGConnection connection, String sql, int bufferSize) + throws SQLException { + this(connection.getCopyAPI().copyIn(sql), bufferSize); + } + + /** + * Use given CopyIn operation for writing. + * + * @param op COPY FROM STDIN operation + */ + public PGCopyOutputStream(CopyIn op) { + this(op, CopyManager.DEFAULT_BUFFER_SIZE); + } + + /** + * Use given CopyIn operation for writing. + * + * @param op COPY FROM STDIN operation + * @param bufferSize try to send this many bytes at a time + */ + public PGCopyOutputStream(CopyIn op, int bufferSize) { + this.op = op; + copyBuffer = new byte[bufferSize]; + } + + private CopyIn getOp() { + return op; + } + + @Override + public void write(int b) throws IOException { + checkClosed(); + if (b < 0 || b > 255) { + throw new IOException(GT.tr("Cannot write to copy a byte of value {0}", b)); + } + singleByteBuffer[0] = (byte) b; + write(singleByteBuffer, 0, 1); + } + + @Override + public void write(byte[] buf) throws IOException { + write(buf, 0, buf.length); + } + + @Override + public void write(byte[] buf, int off, int siz) throws IOException { + checkClosed(); + try { + writeToCopy(buf, off, siz); + } catch (SQLException se) { + throw new IOException("Write to copy failed.", se); + } + } + + private void checkClosed() throws IOException { + if (op == null) { + throw new IOException(GT.tr("This copy stream is closed.")); + } + } + + @Override + public void close() throws IOException { + // Don't complain about a double close. + CopyIn op = this.op; + if (op == null) { + return; + } + + if (op.isActive()) { + try { + endCopy(); + } catch (SQLException se) { + throw new IOException("Ending write to copy failed.", se); + } + } + this.op = null; + } + + @Override + public void flush() throws IOException { + checkClosed(); + try { + getOp().writeToCopy(copyBuffer, 0, at); + at = 0; + getOp().flushCopy(); + } catch (SQLException e) { + throw new IOException("Unable to flush stream", e); + } + } + + @Override + public void writeToCopy(byte[] buf, int off, int siz) throws SQLException { + if (at > 0 + && siz > copyBuffer.length - at) { // would not fit into rest of our buf, so flush buf + getOp().writeToCopy(copyBuffer, 0, at); + at = 0; + } + if (siz > copyBuffer.length) { // would still not fit into buf, so just pass it through + getOp().writeToCopy(buf, off, siz); + } else { // fits into our buf, so save it there + System.arraycopy(buf, off, copyBuffer, at, siz); + at += siz; + } + } + + @Override + public void writeToCopy(ByteStreamWriter from) throws SQLException { + if (at > 0) { + // flush existing buffer so order is preserved + getOp().writeToCopy(copyBuffer, 0, at); + at = 0; + } + getOp().writeToCopy(from); + } + + @Override + public int getFormat() { + return getOp().getFormat(); + } + + @Override + public int getFieldFormat(int field) { + return getOp().getFieldFormat(field); + } + + @Override + public void cancelCopy() throws SQLException { + getOp().cancelCopy(); + } + + @Override + public int getFieldCount() { + return getOp().getFieldCount(); + } + + @Override + public boolean isActive() { + return op != null && getOp().isActive(); + } + + @Override + public void flushCopy() throws SQLException { + getOp().flushCopy(); + } + + @Override + public long endCopy() throws SQLException { + if (at > 0) { + getOp().writeToCopy(copyBuffer, 0, at); + } + getOp().endCopy(); + return getHandledRowCount(); + } + + @Override + public long getHandledRowCount() { + return getOp().getHandledRowCount(); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java b/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java new file mode 100644 index 0000000..3aed133 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.io.IOException; +import java.lang.ref.Reference; +import java.lang.ref.ReferenceQueue; +import java.lang.ref.SoftReference; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Provides the canonicalization/interning of {@code String} instances which contain only ascii characters, + * keyed by the {@code byte[]} representation (in ascii). + * + *

+ * The values are stored in {@link SoftReference}s, allowing them to be garbage collected if not in use and there is + * memory pressure. + *

+ * + *

+ * NOTE: Instances are safe for concurrent use. + *

+ * + * @author Brett Okken + */ +final class AsciiStringInterner { + + private abstract static class BaseKey { + private final int hash; + + BaseKey(int hash) { + this.hash = hash; + } + + @Override + public final int hashCode() { + return hash; + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof BaseKey)) { + return false; + } + final BaseKey other = (BaseKey) obj; + return equalsBytes(other); + } + + abstract boolean equalsBytes(BaseKey other); + + abstract boolean equals(byte[] other, int offset, int length); + + abstract void appendString(StringBuilder sb); + } + + /** + * Only used for lookups, never to actually store entries. + */ + private static class TempKey extends BaseKey { + final byte[] bytes; + final int offset; + final int length; + + TempKey(int hash, byte[] bytes, int offset, int length) { + super(hash); + this.bytes = bytes; + this.offset = offset; + this.length = length; + } + + @Override + boolean equalsBytes(BaseKey other) { + return other.equals(bytes, offset, length); + } + + @Override + public boolean equals(byte[] other, int offset, int length) { + return arrayEquals(this.bytes, this.offset, this.length, other, offset, length); + } + + @Override + void appendString(StringBuilder sb) { + for (int i = offset, j = offset + length; i < j; i++) { + sb.append((char) bytes[i]); + } + } + } + + /** + * Instance used for inserting values into the cache. The {@code byte[]} must be a copy + * that will never be mutated. + */ + private static final class Key extends BaseKey { + final byte[] key; + + Key(byte[] key, int hash) { + super(hash); + this.key = key; + } + + /** + * {@inheritDoc} + */ + @Override + boolean equalsBytes(BaseKey other) { + return other.equals(key, 0, key.length); + } + + @Override + public boolean equals(byte[] other, int offset, int length) { + return arrayEquals(this.key, 0, this.key.length, other, offset, length); + } + + /** + * {@inheritDoc} + */ + @Override + void appendString(StringBuilder sb) { + for (int i = 0; i < key.length; i++) { + sb.append((char) key[i]); + } + } + } + + /** + * Custom {@link SoftReference} implementation which maintains a reference to the key in the cache, + * which allows aggressive cleaning when garbage collector collects the {@code String} instance. + */ + private final class StringReference extends SoftReference { + + private final BaseKey key; + + StringReference(BaseKey key, String referent) { + super(referent, refQueue); + this.key = key; + } + + void dispose() { + cache.remove(key, this); + } + } + + /** + * Contains the canonicalized values, keyed by the ascii {@code byte[]}. + */ + final ConcurrentMap> cache = new ConcurrentHashMap<>(128); + + /** + * Used for {@link Reference} as values in {@code cache}. + */ + final ReferenceQueue refQueue = new ReferenceQueue<>(); + + /** + * Preemptively populates a value into the cache. This is intended to be used with {@code String} constants + * which are frequently used. While this can work with other {@code String} values, if val is ever + * garbage collected, it will not be actively removed from this instance. + * + * @param val The value to intern. Must not be {@code null}. + * @return Indication if val is an ascii String and placed into cache. + */ + public boolean putString(String val) { + //ask for utf-8 so that we can detect if any of the characters are not ascii + final byte[] copy = val.getBytes(StandardCharsets.UTF_8); + final int hash = hashKey(copy, 0, copy.length); + if (hash == 0) { + return false; + } + final Key key = new Key(copy, hash); + //we are assuming this is a java interned string from , so this is unlikely to ever be + //reclaimed. so there is no value in using the custom StringReference or hand off to + //the refQueue. + //on the outside chance it actually does get reclaimed, it will just hang around as an + //empty reference in the map unless/until attempted to be retrieved + cache.put(key, new SoftReference(val)); + return true; + } + + /** + * Produces a {@link String} instance for the given bytes. If all are valid ascii (i.e. {@code >= 0}) + * either an existing value will be returned, or the newly created {@code String} will be stored before being + * returned. + * + *

+ * If non-ascii bytes are discovered, the encoding will be used to + * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored). + *

+ * + * @param bytes The bytes of the String. Must not be {@code null}. + * @param offset Offset into bytes to start. + * @param length The number of bytes in bytes which are relevant. + * @param encoding To use if non-ascii bytes seen. + * @return Decoded {@code String} from bytes. + * @throws IOException If error decoding from Encoding. + */ + public String getString(byte[] bytes, int offset, int length, Encoding encoding) throws IOException { + if (length == 0) { + return ""; + } + + final int hash = hashKey(bytes, offset, length); + // 0 indicates the presence of a non-ascii character - defer to encoding to create the string + if (hash == 0) { + return encoding.decode(bytes, offset, length); + } + cleanQueue(); + // create a TempKey with the byte[] given + final TempKey tempKey = new TempKey(hash, bytes, offset, length); + SoftReference ref = cache.get(tempKey); + if (ref != null) { + final String val = ref.get(); + if (val != null) { + return val; + } + } + // in order to insert we need to create a "real" key with copy of bytes that will not be changed + final byte[] copy = Arrays.copyOfRange(bytes, offset, offset + length); + final Key key = new Key(copy, hash); + final String value = new String(copy, StandardCharsets.US_ASCII); + + // handle case where a concurrent thread has populated the map or existing value has cleared reference + ref = cache.compute(key, (k, v) -> { + if (v == null) { + return new StringReference(key, value); + } + final String val = v.get(); + return val != null ? v : new StringReference(key, value); + }); + + return ref.get(); + } + + /** + * Produces a {@link String} instance for the given bytes. + * + *

+ * If all are valid ascii (i.e. {@code >= 0}) and a corresponding {@code String} value exists, it + * will be returned. If no value exists, a {@code String} will be created, but not stored. + *

+ * + *

+ * If non-ascii bytes are discovered, the encoding will be used to + * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored). + *

+ * + * @param bytes The bytes of the String. Must not be {@code null}. + * @param offset Offset into bytes to start. + * @param length The number of bytes in bytes which are relevant. + * @param encoding To use if non-ascii bytes seen. + * @return Decoded {@code String} from bytes. + * @throws IOException If error decoding from Encoding. + */ + public String getStringIfPresent(byte[] bytes, int offset, int length, Encoding encoding) throws IOException { + if (length == 0) { + return ""; + } + + final int hash = hashKey(bytes, offset, length); + // 0 indicates the presence of a non-ascii character - defer to encoding to create the string + if (hash == 0) { + return encoding.decode(bytes, offset, length); + } + cleanQueue(); + // create a TempKey with the byte[] given + final TempKey tempKey = new TempKey(hash, bytes, offset, length); + SoftReference ref = cache.get(tempKey); + if (ref != null) { + final String val = ref.get(); + if (val != null) { + return val; + } + } + + return new String(bytes, offset, length, StandardCharsets.US_ASCII); + } + + /** + * Process any entries in {@link #refQueue} to purge from the {@link #cache}. + * @see StringReference#dispose() + */ + private void cleanQueue() { + Reference ref; + while ((ref = refQueue.poll()) != null) { + ((StringReference) ref).dispose(); + } + } + + /** + * Generates a hash value for the relevant entries in bytes as long as all values are ascii ({@code >= 0}). + * @return hash code for relevant bytes, or {@code 0} if non-ascii bytes present. + */ + private static int hashKey(byte[] bytes, int offset, int length) { + int result = 1; + for (int i = offset, j = offset + length; i < j; i++) { + final byte b = bytes[i]; + // bytes are signed values. all ascii values are positive + if (b < 0) { + return 0; + } + result = 31 * result + b; + } + return result; + } + + /** + * Performs equality check between a and b (with corresponding offset/length values). + *

+ * The {@code static boolean equals(byte[].class, int, int, byte[], int, int} method in {@link java.util.Arrays} + * is optimized for longer {@code byte[]} instances than is expected to be seen here. + *

+ */ + static boolean arrayEquals(byte[] a, int aOffset, int aLength, byte[] b, int bOffset, int bLength) { + if (aLength != bLength) { + return false; + } + //TODO: in jdk9, could use VarHandle to read 4 bytes at a time as an int for comparison + // or 8 bytes as a long - though we likely expect short values here + for (int i = 0; i < aLength; i++) { + if (a[aOffset + i] != b[bOffset + i]) { + return false; + } + } + return true; + } + + /** + * {@inheritDoc} + */ + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(32 + (8 * cache.size())); + sb.append("AsciiStringInterner ["); + cache.forEach((k, v) -> { + sb.append('\''); + k.appendString(sb); + sb.append("', "); + }); + //replace trailing ', ' with ']'; + final int length = sb.length(); + if (length > 21) { + sb.setLength(sb.length() - 2); + } + sb.append(']'); + return sb.toString(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java b/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java new file mode 100644 index 0000000..35dcb79 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.PGConnection; +import org.postgresql.PGProperty; +import org.postgresql.jdbc.FieldMetadata; +import org.postgresql.jdbc.TimestampUtils; +import org.postgresql.util.LruCache; +import org.postgresql.xml.PGXmlFactoryFactory; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.TimerTask; +import java.util.logging.Logger; + +/** + * Driver-internal connection interface. Application code should not use this interface. + */ +public interface BaseConnection extends PGConnection, Connection { + /** + * Cancel the current query executing on this connection. + * + * @throws SQLException if something goes wrong. + */ + @Override + void cancelQuery() throws SQLException; + + /** + * Execute a SQL query that returns a single resultset. Never causes a new transaction to be + * started regardless of the autocommit setting. + * + * @param s the query to execute + * @return the (non-null) returned resultset + * @throws SQLException if something goes wrong. + */ + ResultSet execSQLQuery(String s) throws SQLException; + + ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency) + throws SQLException; + + /** + * Execute a SQL query that does not return results. Never causes a new transaction to be started + * regardless of the autocommit setting. + * + * @param s the query to execute + * @throws SQLException if something goes wrong. + */ + void execSQLUpdate(String s) throws SQLException; + + /** + * Get the QueryExecutor implementation for this connection. + * + * @return the (non-null) executor + */ + QueryExecutor getQueryExecutor(); + + /** + * Internal protocol for work with physical and logical replication. Physical replication available + * only since PostgreSQL version 9.1. Logical replication available only since PostgreSQL version 9.4. + * + * @return not null replication protocol + */ + ReplicationProtocol getReplicationProtocol(); + + /** + *

Construct and return an appropriate object for the given type and value. This only considers + * the types registered via {@link org.postgresql.PGConnection#addDataType(String, Class)} and + * {@link org.postgresql.PGConnection#addDataType(String, String)}.

+ * + *

If no class is registered as handling the given type, then a generic + * {@link org.postgresql.util.PGobject} instance is returned.

+ * + *

value or byteValue must be non-null

+ * @param type the backend typename + * @param value the type-specific string representation of the value + * @param byteValue the type-specific binary representation of the value + * @return an appropriate object; never null. + * @throws SQLException if something goes wrong + */ + Object getObject(String type, String value, byte [] byteValue) + throws SQLException; + + Encoding getEncoding() throws SQLException; + + TypeInfo getTypeInfo(); + + /** + *

Check if we have at least a particular server version.

+ * + *

The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12 + * is 90012.

+ * + * @param ver the server version to check, of the form xxyyzz eg 90401 + * @return true if the server version is at least "ver". + */ + boolean haveMinimumServerVersion(int ver); + + /** + *

Check if we have at least a particular server version.

+ * + *

The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12 + * is 90012.

+ * + * @param ver the server version to check + * @return true if the server version is at least "ver". + */ + boolean haveMinimumServerVersion(Version ver); + + /** + * Encode a string using the database's client_encoding (usually UTF8, but can vary on older + * server versions). This is used when constructing synthetic resultsets (for example, in metadata + * methods). + * + * @param str the string to encode + * @return an encoded representation of the string + * @throws SQLException if something goes wrong. + */ + byte[] encodeString(String str) throws SQLException; + + /** + * Escapes a string for use as string-literal within an SQL command. The method chooses the + * applicable escaping rules based on the value of {@link #getStandardConformingStrings()}. + * + * @param str a string value + * @return the escaped representation of the string + * @throws SQLException if the string contains a {@code \0} character + */ + String escapeString(String str) throws SQLException; + + /** + * Returns whether the server treats string-literals according to the SQL standard or if it uses + * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape + * characters in string-literals. Since 8.2, this depends on the value of the + * {@code standard_conforming_strings} server variable. + * + * @return true if the server treats string literals according to the SQL standard + * @see QueryExecutor#getStandardConformingStrings() + */ + boolean getStandardConformingStrings(); + + // Ew. Quick hack to give access to the connection-specific utils implementation. + @Deprecated + TimestampUtils getTimestampUtils(); + + // Get the per-connection logger. + Logger getLogger(); + + // Get the bind-string-as-varchar config flag + boolean getStringVarcharFlag(); + + /** + * Get the current transaction state of this connection. + * + * @return current transaction state of this connection + */ + TransactionState getTransactionState(); + + /** + * Returns true if value for the given oid should be sent using binary transfer. False if value + * should be sent using text transfer. + * + * @param oid The oid to check. + * @return True for binary transfer, false for text transfer. + */ + boolean binaryTransferSend(int oid); + + /** + * Return whether to disable column name sanitation. + * + * @return true column sanitizer is disabled + */ + boolean isColumnSanitiserDisabled(); + + /** + * Schedule a TimerTask for later execution. The task will be scheduled with the shared Timer for + * this connection. + * + * @param timerTask timer task to schedule + * @param milliSeconds delay in milliseconds + */ + void addTimerTask(TimerTask timerTask, long milliSeconds); + + /** + * Invoke purge() on the underlying shared Timer so that internal resources will be released. + */ + void purgeTimerTasks(); + + /** + * Return metadata cache for given connection. + * + * @return metadata cache + */ + LruCache getFieldMetadataCache(); + + CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, + String... columnNames) + throws SQLException; + + /** + * By default, the connection resets statement cache in case deallocate all/discard all + * message is observed. + * This API allows to disable that feature for testing purposes. + * + * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed + */ + void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate); + + /** + * Indicates if statements to backend should be hinted as read only. + * + * @return Indication if hints to backend (such as when transaction begins) + * should be read only. + * @see PGProperty#READ_ONLY_MODE + */ + boolean hintReadOnly(); + + /** + * Retrieve the factory to instantiate XML processing factories. + * + * @return The factory to use to instantiate XML processing factories + * @throws SQLException if the class cannot be found or instantiated. + */ + PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException; + + /** + * Indicates if error details from server used in included in logging and exceptions. + * + * @return true if should be included and passed on to other exceptions + */ + boolean getLogServerErrorDetail(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java b/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java new file mode 100644 index 0000000..d9d4aea --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.util.CanEstimateSize; + +/** + * This class is used as a cache key for simple statements that have no "returning columns". + * Prepared statements that have no returning columns use just {@code String sql} as a key. + * Simple and Prepared statements that have returning columns use {@link QueryWithReturningColumnsKey} + * as a cache key. + */ +class BaseQueryKey implements CanEstimateSize { + public final String sql; + public final boolean isParameterized; + public final boolean escapeProcessing; + + BaseQueryKey(String sql, boolean isParameterized, boolean escapeProcessing) { + this.sql = sql; + this.isParameterized = isParameterized; + this.escapeProcessing = escapeProcessing; + } + + @Override + public String toString() { + return "BaseQueryKey{" + + "sql='" + sql + '\'' + + ", isParameterized=" + isParameterized + + ", escapeProcessing=" + escapeProcessing + + '}'; + } + + @Override + public long getSize() { + if (sql == null) { // just in case + return 16; + } + return 16 + sql.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BaseQueryKey that = (BaseQueryKey) o; + + if (isParameterized != that.isParameterized) { + return false; + } + if (escapeProcessing != that.escapeProcessing) { + return false; + } + return sql != null ? sql.equals(that.sql) : that.sql == null; + + } + + @Override + public int hashCode() { + int result = sql != null ? sql.hashCode() : 0; + result = 31 * result + (isParameterized ? 1 : 0); + result = 31 * result + (escapeProcessing ? 1 : 0); + return result; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java b/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java new file mode 100644 index 0000000..d7f8a66 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.PGStatement; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; + +/** + * Driver-internal statement interface. Application code should not use this interface. + */ +public interface BaseStatement extends PGStatement, Statement { + /** + * Create a synthetic resultset from data provided by the driver. + * + * @param fields the column metadata for the resultset + * @param tuples the resultset data + * @return the new ResultSet + * @throws SQLException if something goes wrong + */ + ResultSet createDriverResultSet(Field[] fields, List tuples) throws SQLException; + + /** + * Create a resultset from data retrieved from the server. + * + * @param originalQuery the query that generated this resultset; used when dealing with updateable + * resultsets + * @param fields the column metadata for the resultset + * @param tuples the resultset data + * @param cursor the cursor to use to retrieve more data from the server; if null, no additional + * data is present. + * @return the new ResultSet + * @throws SQLException if something goes wrong + */ + ResultSet createResultSet(Query originalQuery, Field[] fields, List tuples, + ResultCursor cursor) throws SQLException; + + /** + * Execute a query, passing additional query flags. + * + * @param sql the query to execute (JDBC-style query) + * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into + * the default flags. + * @return true if there is a result set + * @throws SQLException if something goes wrong. + */ + boolean executeWithFlags(String sql, int flags) throws SQLException; + + /** + * Execute a query, passing additional query flags. + * + * @param cachedQuery the query to execute (native to PostgreSQL) + * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into + * the default flags. + * @return true if there is a result set + * @throws SQLException if something goes wrong. + */ + boolean executeWithFlags(CachedQuery cachedQuery, int flags) throws SQLException; + + /** + * Execute a prepared query, passing additional query flags. + * + * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into + * the default flags. + * @return true if there is a result set + * @throws SQLException if something goes wrong. + */ + boolean executeWithFlags(int flags) throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java b/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java new file mode 100644 index 0000000..23ac4cd --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.util.CanEstimateSize; + +/** + * Stores information on the parsed JDBC query. It is used to cut parsing overhead when executing + * the same query through {@link java.sql.Connection#prepareStatement(String)}. + */ +public class CachedQuery implements CanEstimateSize { + /** + * Cache key. {@link String} or {@code org.postgresql.util.CanEstimateSize}. + */ + public final Object key; + public final Query query; + public final boolean isFunction; + + private int executeCount; + + public CachedQuery(Object key, Query query, boolean isFunction) { + assert key instanceof String || key instanceof CanEstimateSize + : "CachedQuery.key should either be String or implement CanEstimateSize." + + " Actual class is " + key.getClass(); + this.key = key; + this.query = query; + this.isFunction = isFunction; + } + + public void increaseExecuteCount() { + if (executeCount < Integer.MAX_VALUE) { + executeCount++; + } + } + + public void increaseExecuteCount(int inc) { + int newValue = executeCount + inc; + if (newValue > 0) { // if overflows, just ignore the update + executeCount = newValue; + } + } + + /** + * Number of times this statement has been used. + * + * @return number of times this statement has been used + */ + public int getExecuteCount() { + return executeCount; + } + + @Override + public long getSize() { + long queryLength; + if (key instanceof String) { + queryLength = ((String) key).length() * 2L; // 2 bytes per char, revise with Java 9's compact strings + } else { + queryLength = ((CanEstimateSize) key).getSize(); + } + return queryLength * 2 /* original query and native sql */ + + 100L /* entry in hash map, CachedQuery wrapper, etc */; + } + + @Override + public String toString() { + return "CachedQuery{" + + "executeCount=" + executeCount + + ", query=" + query + + ", isFunction=" + isFunction + + '}'; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java b/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java new file mode 100644 index 0000000..90af15d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.util.LruCache; + +import java.sql.SQLException; +import java.util.List; + +/** + * Creates an instance of {@link CachedQuery} for a given connection. + */ +class CachedQueryCreateAction implements LruCache.CreateAction { + private static final String[] EMPTY_RETURNING = new String[0]; + private final QueryExecutor queryExecutor; + + CachedQueryCreateAction(QueryExecutor queryExecutor) { + this.queryExecutor = queryExecutor; + } + + @Override + public CachedQuery create(Object key) throws SQLException { + assert key instanceof String || key instanceof BaseQueryKey + : "Query key should be String or BaseQueryKey. Given " + key.getClass() + ", sql: " + + key; + BaseQueryKey queryKey; + String parsedSql; + if (key instanceof BaseQueryKey) { + queryKey = (BaseQueryKey) key; + parsedSql = queryKey.sql; + } else { + queryKey = null; + parsedSql = (String) key; + } + if (key instanceof String || queryKey.escapeProcessing) { + parsedSql = + Parser.replaceProcessing(parsedSql, true, queryExecutor.getStandardConformingStrings()); + } + boolean isFunction; + if (key instanceof CallableQueryKey) { + JdbcCallParseInfo callInfo = + Parser.modifyJdbcCall(parsedSql, queryExecutor.getStandardConformingStrings(), + queryExecutor.getServerVersionNum(), queryExecutor.getProtocolVersion(), queryExecutor.getEscapeSyntaxCallMode()); + parsedSql = callInfo.getSql(); + isFunction = callInfo.isFunction(); + } else { + isFunction = false; + } + boolean isParameterized = key instanceof String || queryKey.isParameterized; + boolean splitStatements = isParameterized || queryExecutor.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) >= 0; + + String[] returningColumns; + if (key instanceof QueryWithReturningColumnsKey) { + returningColumns = ((QueryWithReturningColumnsKey) key).columnNames; + } else { + returningColumns = EMPTY_RETURNING; + } + + List queries = Parser.parseJdbcSql(parsedSql, + queryExecutor.getStandardConformingStrings(), isParameterized, splitStatements, + queryExecutor.isReWriteBatchedInsertsEnabled(), queryExecutor.getQuoteReturningIdentifiers(), + returningColumns + ); + + Query query = queryExecutor.wrap(queries); + return new CachedQuery(key, query, isFunction); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/CallableQueryKey.java b/pgjdbc/src/main/java/org/postgresql/core/CallableQueryKey.java new file mode 100644 index 0000000..d65ab18 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/CallableQueryKey.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +/** + * Serves as a cache key for {@link java.sql.CallableStatement}. + * Callable statements require some special parsing before use (due to JDBC {@code {?= call...}} + * syntax, thus a special cache key class is used to trigger proper parsing for callable statements. + */ +class CallableQueryKey extends BaseQueryKey { + + CallableQueryKey(String sql) { + super(sql, true, true); + } + + @Override + public String toString() { + return "CallableQueryKey{" + + "sql='" + sql + '\'' + + ", isParameterized=" + isParameterized + + ", escapeProcessing=" + escapeProcessing + + '}'; + } + + @Override + public int hashCode() { + return super.hashCode() * 31; + } + + @Override + public boolean equals(Object o) { + // Nothing interesting here, overriding equals to make hashCode and equals paired + return super.equals(o); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/CommandCompleteParser.java b/pgjdbc/src/main/java/org/postgresql/core/CommandCompleteParser.java new file mode 100644 index 0000000..a4e52b3 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/CommandCompleteParser.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +/** + * Parses {@code oid} and {@code rows} from a {@code CommandComplete (B)} message (end of Execute). + */ +public final class CommandCompleteParser { + private long oid; + private long rows; + + public CommandCompleteParser() { + } + + public long getOid() { + return oid; + } + + public long getRows() { + return rows; + } + + void set(long oid, long rows) { + this.oid = oid; + this.rows = rows; + } + + /** + * Parses {@code CommandComplete (B)} message. + * Status is in the format of "COMMAND OID ROWS" where both 'OID' and 'ROWS' are optional + * and COMMAND can have spaces within it, like CREATE TABLE. + * + * @param status COMMAND OID ROWS message + * @throws PSQLException in case the status cannot be parsed + */ + public void parse(String status) throws PSQLException { + // Assumption: command neither starts nor ends with a digit + if (!Parser.isDigitAt(status, status.length() - 1)) { + set(0, 0); + return; + } + + // Scan backwards, while searching for a maximum of two number groups + // COMMAND OID ROWS + // COMMAND ROWS + long oid = 0; + long rows = 0; + try { + int lastSpace = status.lastIndexOf(' '); + // Status ends with a digit => it is ROWS + if (Parser.isDigitAt(status, lastSpace + 1)) { + rows = Parser.parseLong(status, lastSpace + 1, status.length()); + + if (Parser.isDigitAt(status, lastSpace - 1)) { + int penultimateSpace = status.lastIndexOf(' ', lastSpace - 1); + if (Parser.isDigitAt(status, penultimateSpace + 1)) { + oid = Parser.parseLong(status, penultimateSpace + 1, lastSpace); + } + } + } + } catch (NumberFormatException e) { + // This should only occur if the oid or rows are out of 0..Long.MAX_VALUE range + throw new PSQLException( + GT.tr("Unable to parse the count in command completion tag: {0}.", status), + PSQLState.CONNECTION_FAILURE, e); + } + set(oid, rows); + } + + @Override + public String toString() { + return "CommandStatus{" + + "oid=" + oid + + ", rows=" + rows + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CommandCompleteParser that = (CommandCompleteParser) o; + + if (oid != that.oid) { + return false; + } + return rows == that.rows; + } + + @Override + public int hashCode() { + int result = (int) (oid ^ (oid >>> 32)); + result = 31 * result + (int) (rows ^ (rows >>> 32)); + return result; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ConnectionFactory.java b/pgjdbc/src/main/java/org/postgresql/core/ConnectionFactory.java new file mode 100644 index 0000000..45a0008 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ConnectionFactory.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import org.postgresql.PGProperty; +import org.postgresql.core.v3.ConnectionFactoryImpl; +import org.postgresql.util.GT; +import org.postgresql.util.HostSpec; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Handles protocol-specific connection setup. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public abstract class ConnectionFactory { + + private static final Logger LOGGER = Logger.getLogger(ConnectionFactory.class.getName()); + + public ConnectionFactory() { + } + + /** + *

Establishes and initializes a new connection.

+ * + *

If the "protocolVersion" property is specified, only that protocol version is tried. Otherwise, + * all protocols are tried in order, falling back to older protocols as necessary.

+ * + *

Currently, protocol versions 3 (7.4+) is supported.

+ * + * @param hostSpecs at least one host and port to connect to; multiple elements for round-robin + * failover + * @param info extra properties controlling the connection; notably, "password" if present + * supplies the password to authenticate with. + * @return the new, initialized, connection + * @throws SQLException if the connection could not be established. + */ + public static QueryExecutor openConnection(HostSpec[] hostSpecs, + Properties info) throws SQLException { + String protoName = PGProperty.PROTOCOL_VERSION.getOrDefault(info); + + if (protoName == null || protoName.isEmpty() || "3".equals(protoName)) { + ConnectionFactory connectionFactory = new ConnectionFactoryImpl(); + QueryExecutor queryExecutor = connectionFactory.openConnectionImpl( + hostSpecs, info); + if (queryExecutor != null) { + return queryExecutor; + } + } + + throw new PSQLException( + GT.tr("A connection could not be made using the requested protocol {0}.", protoName), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } + + /** + * Implementation of {@link #openConnection} for a particular protocol version. Implemented by + * subclasses of {@link ConnectionFactory}. + * + * @param hostSpecs at least one host and port to connect to; multiple elements for round-robin + * failover + * @param info extra properties controlling the connection; notably, "password" if present + * supplies the password to authenticate with. + * @return the new, initialized, connection, or null if this protocol version is not + * supported by the server. + * @throws SQLException if the connection could not be established for a reason other than + * protocol version incompatibility. + */ + public abstract QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException; + + /** + * Safely close the given stream. + * + * @param newStream The stream to close. + */ + protected void closeStream(PGStream newStream) { + if (newStream != null) { + try { + newStream.close(); + } catch (IOException e) { + LOGGER.log(Level.WARNING, "Failed to closed stream with error: {0}", e); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Encoding.java b/pgjdbc/src/main/java/org/postgresql/core/Encoding.java new file mode 100644 index 0000000..0afc258 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Encoding.java @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.io.Writer; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Representation of a particular character encoding. + */ +public class Encoding { + + private static final Logger LOGGER = Logger.getLogger(Encoding.class.getName()); + + private static final Encoding DEFAULT_ENCODING = new Encoding(); + + private static final Encoding UTF8_ENCODING = new Encoding(StandardCharsets.UTF_8, true); + + /* + * Preferred JVM encodings for backend encodings. + */ + private static final HashMap encodings = new HashMap<>(); + + static { + //Note: this list should match the set of supported server + // encodings found in backend/util/mb/encnames.c + encodings.put("SQL_ASCII", new String[]{"ASCII", "US-ASCII"}); + encodings.put("UNICODE", new String[]{"UTF-8", "UTF8"}); + encodings.put("UTF8", new String[]{"UTF-8", "UTF8"}); + encodings.put("LATIN1", new String[]{"ISO8859_1"}); + encodings.put("LATIN2", new String[]{"ISO8859_2"}); + encodings.put("LATIN3", new String[]{"ISO8859_3"}); + encodings.put("LATIN4", new String[]{"ISO8859_4"}); + encodings.put("ISO_8859_5", new String[]{"ISO8859_5"}); + encodings.put("ISO_8859_6", new String[]{"ISO8859_6"}); + encodings.put("ISO_8859_7", new String[]{"ISO8859_7"}); + encodings.put("ISO_8859_8", new String[]{"ISO8859_8"}); + encodings.put("LATIN5", new String[]{"ISO8859_9"}); + encodings.put("LATIN7", new String[]{"ISO8859_13"}); + encodings.put("LATIN9", new String[]{"ISO8859_15_FDIS"}); + encodings.put("EUC_JP", new String[]{"EUC_JP"}); + encodings.put("EUC_CN", new String[]{"EUC_CN"}); + encodings.put("EUC_KR", new String[]{"EUC_KR"}); + encodings.put("JOHAB", new String[]{"Johab"}); + encodings.put("EUC_TW", new String[]{"EUC_TW"}); + encodings.put("SJIS", new String[]{"MS932", "SJIS"}); + encodings.put("BIG5", new String[]{"Big5", "MS950", "Cp950"}); + encodings.put("GBK", new String[]{"GBK", "MS936"}); + encodings.put("UHC", new String[]{"MS949", "Cp949", "Cp949C"}); + encodings.put("TCVN", new String[]{"Cp1258"}); + encodings.put("WIN1256", new String[]{"Cp1256"}); + encodings.put("WIN1250", new String[]{"Cp1250"}); + encodings.put("WIN874", new String[]{"MS874", "Cp874"}); + encodings.put("WIN", new String[]{"Cp1251"}); + encodings.put("ALT", new String[]{"Cp866"}); + // We prefer KOI8-U, since it is a superset of KOI8-R. + encodings.put("KOI8", new String[]{"KOI8_U", "KOI8_R"}); + // If the database isn't encoding-aware then we can't have + // any preferred encodings. + encodings.put("UNKNOWN", new String[0]); + // The following encodings do not have a java equivalent + encodings.put("MULE_INTERNAL", new String[0]); + encodings.put("LATIN6", new String[0]); + encodings.put("LATIN8", new String[0]); + encodings.put("LATIN10", new String[0]); + } + + static final AsciiStringInterner INTERNER = new AsciiStringInterner(); + + private final Charset encoding; + private final boolean fastASCIINumbers; + + /** + * Uses the default charset of the JVM. + */ + private Encoding() { + this(Charset.defaultCharset()); + } + + /** + * Subclasses may use this constructor if they know in advance of their ASCII number + * compatibility. + * + * @param encoding charset to use + * @param fastASCIINumbers whether this encoding is compatible with ASCII numbers. + */ + protected Encoding(Charset encoding, boolean fastASCIINumbers) { + if (encoding == null) { + throw new NullPointerException("Null encoding charset not supported"); + } + this.encoding = encoding; + this.fastASCIINumbers = fastASCIINumbers; + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, "Creating new Encoding {0} with fastASCIINumbers {1}", + new Object[]{encoding, fastASCIINumbers}); + } + } + + /** + * Use the charset passed as parameter and tests at creation time whether the specified encoding + * is compatible with ASCII numbers. + * + * @param encoding charset to use + */ + protected Encoding(Charset encoding) { + this(encoding, testAsciiNumbers(encoding)); + } + + /** + * Returns true if this encoding has characters '-' and '0'..'9' in exactly same position as + * ascii. + * + * @return true if the bytes can be scanned directly for ascii numbers. + */ + public boolean hasAsciiNumbers() { + return fastASCIINumbers; + } + + /** + * Construct an Encoding for a given JVM encoding. + * + * @param jvmEncoding the name of the JVM encoding + * @return an Encoding instance for the specified encoding, or an Encoding instance for the + * default JVM encoding if the specified encoding is unavailable. + */ + public static Encoding getJVMEncoding(String jvmEncoding) { + if ("UTF-8".equals(jvmEncoding)) { + return UTF8_ENCODING; + } + if (Charset.isSupported(jvmEncoding)) { + return new Encoding(Charset.forName(jvmEncoding)); + } + return DEFAULT_ENCODING; + } + + /** + * Construct an Encoding for a given database encoding. + * + * @param databaseEncoding the name of the database encoding + * @return an Encoding instance for the specified encoding, or an Encoding instance for the + * default JVM encoding if the specified encoding is unavailable. + */ + public static Encoding getDatabaseEncoding(String databaseEncoding) { + if ("UTF8".equals(databaseEncoding) || "UNICODE".equals(databaseEncoding)) { + return UTF8_ENCODING; + } + // If the backend encoding is known and there is a suitable + // encoding in the JVM we use that. Otherwise we fall back + // to the default encoding of the JVM. + String[] candidates = encodings.get(databaseEncoding); + if (candidates != null) { + for (String candidate : candidates) { + LOGGER.log(Level.FINEST, "Search encoding candidate {0}", candidate); + if (Charset.isSupported(candidate)) { + return new Encoding(Charset.forName(candidate)); + } + } + } + + // Try the encoding name directly -- maybe the charset has been + // provided by the user. + if (Charset.isSupported(databaseEncoding)) { + return new Encoding(Charset.forName(databaseEncoding)); + } + + // Fall back to default JVM encoding. + LOGGER.log(Level.FINEST, "{0} encoding not found, returning default encoding", databaseEncoding); + return DEFAULT_ENCODING; + } + + /** + * Indicates that string should be staged as a canonicalized value. + * + *

+ * This is intended for use with {@code String} constants. + *

+ * + * @param string The string to maintain canonicalized reference to. Must not be {@code null}. + * @see Encoding#decodeCanonicalized(byte[], int, int) + */ + public static void canonicalize(String string) { + INTERNER.putString(string); + } + + /** + * Get the name of the (JVM) encoding used. + * + * @return the JVM encoding name used by this instance. + */ + public String name() { + return encoding.name(); + } + + /** + * Encode a string to an array of bytes. + * + * @param s the string to encode + * @return a bytearray containing the encoded string + * @throws IOException if something goes wrong + */ + public byte [] encode(String s) throws IOException { + if (s == null) { + return null; + } + + return s.getBytes(encoding); + } + + /** + * Decode an array of bytes possibly into a canonicalized string. + * + *

+ * Only ascii compatible encoding support canonicalization and only ascii {@code String} values are eligible + * to be canonicalized. + *

+ * + * @param encodedString a byte array containing the string to decode + * @param offset the offset in encodedString of the first byte of the encoded + * representation + * @param length the length, in bytes, of the encoded representation + * @return the decoded string + * @throws IOException if something goes wrong + */ + public String decodeCanonicalized(byte[] encodedString, int offset, int length) throws IOException { + if (length == 0) { + return ""; + } + // if fastASCIINumbers is false, then no chance of the byte[] being ascii compatible characters + return fastASCIINumbers ? INTERNER.getString(encodedString, offset, length, this) + : decode(encodedString, offset, length); + } + + public String decodeCanonicalizedIfPresent(byte[] encodedString, int offset, int length) throws IOException { + if (length == 0) { + return ""; + } + // if fastASCIINumbers is false, then no chance of the byte[] being ascii compatible characters + return fastASCIINumbers ? INTERNER.getStringIfPresent(encodedString, offset, length, this) + : decode(encodedString, offset, length); + } + + /** + * Decode an array of bytes possibly into a canonicalized string. + * + *

+ * Only ascii compatible encoding support canonicalization and only ascii {@code String} values are eligible + * to be canonicalized. + *

+ * + * @param encodedString a byte array containing the string to decode + * @return the decoded string + * @throws IOException if something goes wrong + */ + public String decodeCanonicalized(byte[] encodedString) throws IOException { + return decodeCanonicalized(encodedString, 0, encodedString.length); + } + + /** + * Decode an array of bytes into a string. + * + * @param encodedString a byte array containing the string to decode + * @param offset the offset in encodedString of the first byte of the encoded + * representation + * @param length the length, in bytes, of the encoded representation + * @return the decoded string + * @throws IOException if something goes wrong + */ + public String decode(byte[] encodedString, int offset, int length) throws IOException { + return new String(encodedString, offset, length, encoding); + } + + /** + * Decode an array of bytes into a string. + * + * @param encodedString a byte array containing the string to decode + * @return the decoded string + * @throws IOException if something goes wrong + */ + public String decode(byte[] encodedString) throws IOException { + return decode(encodedString, 0, encodedString.length); + } + + /** + * Get a Reader that decodes the given InputStream using this encoding. + * + * @param in the underlying stream to decode from + * @return a non-null Reader implementation. + * @throws IOException if something goes wrong + */ + public Reader getDecodingReader(InputStream in) throws IOException { + return new InputStreamReader(in, encoding); + } + + /** + * Get a Writer that encodes to the given OutputStream using this encoding. + * + * @param out the underlying stream to encode to + * @return a non-null Writer implementation. + * @throws IOException if something goes wrong + */ + public Writer getEncodingWriter(OutputStream out) throws IOException { + return new OutputStreamWriter(out, encoding); + } + + /** + * Get an Encoding using the default encoding for the JVM. + * + * @return an Encoding instance + */ + public static Encoding defaultEncoding() { + return DEFAULT_ENCODING; + } + + @Override + public String toString() { + return encoding.name(); + } + + /** + * Checks whether this encoding is compatible with ASCII for the number characters '-' and + * '0'..'9'. Where compatible means that they are encoded with exactly same values. + * + * @return If faster ASCII number parsing can be used with this encoding. + */ + private static boolean testAsciiNumbers(Charset encoding) { + // TODO: test all postgres supported encoding to see if there are + // any which do _not_ have ascii numbers in same location + // at least all the encoding listed in the encodings hashmap have + // working ascii numbers + String test = "-0123456789"; + byte[] bytes = test.getBytes(encoding); + String res = new String(bytes, StandardCharsets.US_ASCII); + return test.equals(res); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/EncodingPredictor.java b/pgjdbc/src/main/java/org/postgresql/core/EncodingPredictor.java new file mode 100644 index 0000000..9116bee --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/EncodingPredictor.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.io.IOException; + +/** + *

Predicts encoding for error messages based on some heuristics.

+ * + *
    + *
  1. For certain languages, it is known how "FATAL" is translated
  2. + *
  3. For Japanese, several common words are hardcoded
  4. + *
  5. Then try various LATIN encodings
  6. + *
+ */ +public class EncodingPredictor { + + public EncodingPredictor() { + } + + /** + * In certain cases the encoding is not known for sure (e.g. before authentication). + * In such cases, backend might send messages in "native to database" encoding, + * thus pgjdbc has to guess the encoding nad + */ + public static class DecodeResult { + public final String result; + public final String encoding; // JVM name + + DecodeResult(String result, String encoding) { + this.result = result; + this.encoding = encoding; + } + } + + static class Translation { + public final String fatalText; + private final String [] texts; + public final String language; + public final String[] encodings; + + Translation(String fatalText, String [] texts, + String language, String... encodings) { + this.fatalText = fatalText; + this.texts = texts; + this.language = language; + this.encodings = encodings; + } + } + + private static final Translation[] FATAL_TRANSLATIONS = + new Translation[]{ + new Translation("ВАЖНО", null, "ru", "WIN", "ALT", "KOI8"), + new Translation("致命错误", null, "zh_CN", "EUC_CN", "GBK", "BIG5"), + new Translation("KATASTROFALNY", null, "pl", "LATIN2"), + new Translation("FATALE", null, "it", "LATIN1", "LATIN9"), + new Translation("FATAL", new String[]{"は存在しません" /* ~ does not exist */, + "ロール" /* ~ role */, "ユーザ" /* ~ user */}, "ja", "EUC_JP", "SJIS"), + new Translation(null, null, "fr/de/es/pt_BR", "LATIN1", "LATIN3", "LATIN4", "LATIN5", + "LATIN7", "LATIN9"), + }; + + public static DecodeResult decode(byte[] bytes, int offset, int length) { + Encoding defaultEncoding = Encoding.defaultEncoding(); + for (Translation tr : FATAL_TRANSLATIONS) { + for (String encoding : tr.encodings) { + Encoding encoder = Encoding.getDatabaseEncoding(encoding); + if (encoder == defaultEncoding) { + continue; + } + + // If there is a translation for "FATAL", then try typical encodings for that language + if (tr.fatalText != null) { + byte[] encoded; + try { + byte[] tmp = encoder.encode(tr.fatalText); + encoded = new byte[tmp.length + 2]; + encoded[0] = 'S'; + encoded[encoded.length - 1] = 0; + System.arraycopy(tmp, 0, encoded, 1, tmp.length); + } catch (IOException e) { + continue;// should not happen + } + + if (!arrayContains(bytes, offset, length, encoded, 0, encoded.length)) { + continue; + } + } + + // No idea how to tell Japanese from Latin languages, thus just hard-code certain Japanese words + if (tr.texts != null) { + boolean foundOne = false; + for (String text : tr.texts) { + try { + byte[] textBytes = encoder.encode(text); + if (arrayContains(bytes, offset, length, textBytes, 0, textBytes.length)) { + foundOne = true; + break; + } + } catch (IOException e) { + // do not care, will try other encodings + } + } + if (!foundOne) { + // Error message does not have key parts, will try other encodings + continue; + } + } + + try { + String decoded = encoder.decode(bytes, offset, length); + if (decoded.indexOf(65533) != -1) { + // bad character in string, try another encoding + continue; + } + return new DecodeResult(decoded, encoder.name()); + } catch (IOException e) { + // do not care + } + } + } + return null; + } + + private static boolean arrayContains( + byte[] first, int firstOffset, int firstLength, + byte[] second, int secondOffset, int secondLength + ) { + if (firstLength < secondLength) { + return false; + } + + for (int i = 0; i < firstLength; i++) { + for (; i < firstLength && first[firstOffset + i] != second[secondOffset]; i++) { + // find the first matching byte + } + + int j = 1; + for (; j < secondLength && first[firstOffset + i + j] == second[secondOffset + j]; j++) { + // compare arrays + } + if (j == secondLength) { + return true; + } + } + return false; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Field.java b/pgjdbc/src/main/java/org/postgresql/core/Field.java new file mode 100644 index 0000000..987f743 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Field.java @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.jdbc.FieldMetadata; + +import java.util.Locale; + +public class Field { + // The V3 protocol defines two constants for the format of data + public static final int TEXT_FORMAT = 0; + public static final int BINARY_FORMAT = 1; + + private final int length; // Internal Length of this field + private final int oid; // OID of the type + private final int mod; // type modifier of this field + private String columnLabel; // Column label + + private int format = TEXT_FORMAT; // In the V3 protocol each field has a format + // 0 = text, 1 = binary + // In the V2 protocol all fields in a + // binary cursor are binary and all + // others are text + + private final int tableOid; // OID of table ( zero if no table ) + private final int positionInTable; + + // Cache fields filled in by AbstractJdbc2ResultSetMetaData.fetchFieldMetaData. + // Don't use unless that has been called. + private FieldMetadata metadata; + + private int sqlType; + private String pgType = NOT_YET_LOADED; + + // New string to avoid clashes with other strings + private static final String NOT_YET_LOADED = new String("pgType is not yet loaded"); + + /** + * Construct a field based on the information fed to it. + * + * @param name the name (column name and label) of the field + * @param oid the OID of the field + * @param length the length of the field + * @param mod modifier + */ + public Field(String name, int oid, int length, int mod) { + this(name, oid, length, mod, 0, 0); + } + + /** + * Constructor without mod parameter. + * + * @param name the name (column name and label) of the field + * @param oid the OID of the field + */ + public Field(String name, int oid) { + this(name, oid, 0, -1); + } + + /** + * Construct a field based on the information fed to it. + * @param columnLabel the column label of the field + * @param oid the OID of the field + * @param length the length of the field + * @param mod modifier + * @param tableOid the OID of the columns' table + * @param positionInTable the position of column in the table (first column is 1, second column is 2, etc...) + */ + public Field(String columnLabel, int oid, int length, int mod, int tableOid, + int positionInTable) { + this.columnLabel = columnLabel; + this.oid = oid; + this.length = length; + this.mod = mod; + this.tableOid = tableOid; + this.positionInTable = positionInTable; + this.metadata = tableOid == 0 ? new FieldMetadata(columnLabel) : null; + } + + /** + * @return the oid of this Field's data type + */ + public int getOID() { + return oid; + } + + /** + * @return the mod of this Field's data type + */ + public int getMod() { + return mod; + } + + /** + * @return the column label of this Field's data type + */ + public String getColumnLabel() { + return columnLabel; + } + + /** + * @return the length of this Field's data type + */ + public int getLength() { + return length; + } + + /** + * @return the format of this Field's data (text=0, binary=1) + */ + public int getFormat() { + return format; + } + + /** + * @param format the format of this Field's data (text=0, binary=1) + */ + public void setFormat(int format) { + this.format = format; + } + + /** + * @return the columns' table oid, zero if no oid available + */ + public int getTableOid() { + return tableOid; + } + + public int getPositionInTable() { + return positionInTable; + } + + public FieldMetadata getMetadata() { + return metadata; + } + + public void setMetadata(FieldMetadata metadata) { + this.metadata = metadata; + } + + @Override + public String toString() { + return "Field(" + (columnLabel != null ? columnLabel : "") + + "," + Oid.toString(oid) + + "," + length + + "," + (format == TEXT_FORMAT ? 'T' : 'B') + + ")"; + } + + public void setSQLType(int sqlType) { + this.sqlType = sqlType; + } + + public int getSQLType() { + return sqlType; + } + + public void setPGType(String pgType) { + this.pgType = pgType; + } + + public String getPGType() { + return pgType; + } + + public boolean isTypeInitialized() { + return pgType != NOT_YET_LOADED; + } + + public void upperCaseLabel() { + columnLabel = columnLabel.toUpperCase(Locale.ROOT); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/FixedLengthOutputStream.java b/pgjdbc/src/main/java/org/postgresql/core/FixedLengthOutputStream.java new file mode 100644 index 0000000..7e7b4ff --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/FixedLengthOutputStream.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * A stream that refuses to write more than a maximum number of bytes. + */ +public class FixedLengthOutputStream extends OutputStream { + + private final int size; + private final OutputStream target; + private int written; + + public FixedLengthOutputStream(int size, OutputStream target) { + this.size = size; + this.target = target; + } + + @Override + public void write(int b) throws IOException { + verifyAllowed(1); + written++; + target.write(b); + } + + @Override + public void write(byte[] buf, int offset, int len) throws IOException { + if ((offset < 0) || (len < 0) || ((offset + len) > buf.length)) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } + verifyAllowed(len); + target.write(buf, offset, len); + written += len; + } + + public int remaining() { + return size - written; + } + + private void verifyAllowed(int wanted) throws IOException { + if (remaining() < wanted) { + throw new IOException("Attempt to write more than the specified " + size + " bytes"); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/JavaVersion.java b/pgjdbc/src/main/java/org/postgresql/core/JavaVersion.java new file mode 100644 index 0000000..43d52cc --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/JavaVersion.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +public enum JavaVersion { + // Note: order is important, + v1_8, + other; + + private static final JavaVersion RUNTIME_VERSION = from(System.getProperty("java.version")); + + /** + * Returns enum value that represents current runtime. For instance, when using -jre7.jar via Java + * 8, this would return v18 + * + * @return enum value that represents current runtime. + */ + public static JavaVersion getRuntimeVersion() { + return RUNTIME_VERSION; + } + + /** + * Java version string like in {@code "java.version"} property. + * + * @param version string like 1.6, 1.7, etc + * @return JavaVersion enum + */ + public static JavaVersion from(String version) { + if (version.startsWith("1.8")) { + return v1_8; + } + return other; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/JdbcCallParseInfo.java b/pgjdbc/src/main/java/org/postgresql/core/JdbcCallParseInfo.java new file mode 100644 index 0000000..d7f7028 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/JdbcCallParseInfo.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +/** + * Contains parse flags from {@link Parser#modifyJdbcCall(String, boolean, int, int, EscapeSyntaxCallMode)}. + */ +public class JdbcCallParseInfo { + private final String sql; + private final boolean isFunction; + + public JdbcCallParseInfo(String sql, boolean isFunction) { + this.sql = sql; + this.isFunction = isFunction; + } + + /** + * SQL in a native for certain backend version. + * + * @return SQL in a native for certain backend version + */ + public String getSql() { + return sql; + } + + /** + * Returns if given SQL is a function. + * + * @return {@code true} if given SQL is a function + */ + public boolean isFunction() { + return isFunction; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/NativeQuery.java b/pgjdbc/src/main/java/org/postgresql/core/NativeQuery.java new file mode 100644 index 0000000..3e56cc6 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/NativeQuery.java @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +/** + * Represents a query that is ready for execution by backend. The main difference from JDBC is ? are + * replaced with $1, $2, etc. + */ +public class NativeQuery { + private static final String[] BIND_NAMES = new String[128 * 10]; + private static final int[] NO_BINDS = new int[0]; + + public final String nativeSql; + public final int[] bindPositions; + public final SqlCommand command; + public final boolean multiStatement; + + static { + for (int i = 1; i < BIND_NAMES.length; i++) { + BIND_NAMES[i] = "$" + i; + } + } + + public NativeQuery(String nativeSql, SqlCommand dml) { + this(nativeSql, NO_BINDS, true, dml); + } + + public NativeQuery(String nativeSql, int [] bindPositions, boolean multiStatement, SqlCommand dml) { + this.nativeSql = nativeSql; + this.bindPositions = + bindPositions == null || bindPositions.length == 0 ? NO_BINDS : bindPositions; + this.multiStatement = multiStatement; + this.command = dml; + } + + /** + * Stringize this query to a human-readable form, substituting particular parameter values for + * parameter placeholders. + * + * @param parameters a ParameterList returned by this Query's {@link Query#createParameterList} + * method, or {@code null} to leave the parameter placeholders unsubstituted. + * @return a human-readable representation of this query + */ + public String toString(ParameterList parameters) { + if (bindPositions.length == 0) { + return nativeSql; + } + + int queryLength = nativeSql.length(); + String[] params = new String[bindPositions.length]; + for (int i = 1; i <= bindPositions.length; i++) { + String param = parameters == null ? "?" : parameters.toString(i, true); + params[i - 1] = param; + queryLength += param.length() - bindName(i).length(); + } + + StringBuilder sbuf = new StringBuilder(queryLength); + sbuf.append(nativeSql, 0, bindPositions[0]); + for (int i = 1; i <= bindPositions.length; i++) { + sbuf.append(params[i - 1]); + int nextBind = i < bindPositions.length ? bindPositions[i] : nativeSql.length(); + sbuf.append(nativeSql, bindPositions[i - 1] + bindName(i).length(), nextBind); + } + return sbuf.toString(); + } + + /** + * Returns $1, $2, etc names of bind variables used by backend. + * + * @param index index of a bind variable + * @return bind variable name + */ + public static String bindName(int index) { + return index < BIND_NAMES.length ? BIND_NAMES[index] : "$" + index; + } + + public static StringBuilder appendBindName(StringBuilder sb, int index) { + if (index < BIND_NAMES.length) { + return sb.append(bindName(index)); + } + sb.append('$'); + sb.append(index); + return sb; + } + + /** + * Calculate the text length required for the given number of bind variables + * including dollars. + * Do this to avoid repeated calls to + * AbstractStringBuilder.expandCapacity(...) and Arrays.copyOf + * + * @param bindCount total number of parameters in a query + * @return int total character length for $xyz kind of binds + */ + public static int calculateBindLength(int bindCount) { + int res = 0; + int bindLen = 2; // $1 + int maxBindsOfLen = 9; // $0 .. $9 + while (bindCount > 0) { + int numBinds = Math.min(maxBindsOfLen, bindCount); + bindCount -= numBinds; + res += bindLen * numBinds; + bindLen++; + maxBindsOfLen *= 10; // $0..$9 (9 items) -> $10..$99 (90 items) + } + return res; + } + + public SqlCommand getCommand() { + return command; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Notification.java b/pgjdbc/src/main/java/org/postgresql/core/Notification.java new file mode 100644 index 0000000..793a274 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Notification.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.PGNotification; + +public class Notification implements PGNotification { + + private final String name; + private final String parameter; + private final int pid; + + public Notification(String name, int pid) { + this(name, pid, ""); + } + + public Notification(String name, int pid, String parameter) { + this.name = name; + this.pid = pid; + this.parameter = parameter; + } + + /* + * Returns name of this notification + */ + @Override + public String getName() { + return name; + } + + /* + * Returns the process id of the backend process making this notification + */ + @Override + public int getPID() { + return pid; + } + + @Override + public String getParameter() { + return parameter; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Oid.java b/pgjdbc/src/main/java/org/postgresql/core/Oid.java new file mode 100644 index 0000000..9fbd267 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Oid.java @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * Provides constants for well-known backend OIDs for the types we commonly use. + */ +public class Oid { + public static final int UNSPECIFIED = 0; + public static final int INT2 = 21; + public static final int INT2_ARRAY = 1005; + public static final int INT4 = 23; + public static final int INT4_ARRAY = 1007; + public static final int INT8 = 20; + public static final int INT8_ARRAY = 1016; + public static final int TEXT = 25; + public static final int TEXT_ARRAY = 1009; + public static final int NUMERIC = 1700; + public static final int NUMERIC_ARRAY = 1231; + public static final int FLOAT4 = 700; + public static final int FLOAT4_ARRAY = 1021; + public static final int FLOAT8 = 701; + public static final int FLOAT8_ARRAY = 1022; + public static final int BOOL = 16; + public static final int BOOL_ARRAY = 1000; + public static final int DATE = 1082; + public static final int DATE_ARRAY = 1182; + public static final int TIME = 1083; + public static final int TIME_ARRAY = 1183; + public static final int TIMETZ = 1266; + public static final int TIMETZ_ARRAY = 1270; + public static final int TIMESTAMP = 1114; + public static final int TIMESTAMP_ARRAY = 1115; + public static final int TIMESTAMPTZ = 1184; + public static final int TIMESTAMPTZ_ARRAY = 1185; + public static final int BYTEA = 17; + public static final int BYTEA_ARRAY = 1001; + public static final int VARCHAR = 1043; + public static final int VARCHAR_ARRAY = 1015; + public static final int OID = 26; + public static final int OID_ARRAY = 1028; + public static final int BPCHAR = 1042; + public static final int BPCHAR_ARRAY = 1014; + public static final int MONEY = 790; + public static final int MONEY_ARRAY = 791; + public static final int NAME = 19; + public static final int NAME_ARRAY = 1003; + public static final int BIT = 1560; + public static final int BIT_ARRAY = 1561; + public static final int VOID = 2278; + public static final int INTERVAL = 1186; + public static final int INTERVAL_ARRAY = 1187; + public static final int CHAR = 18; // This is not char(N), this is "char" a single byte type. + public static final int CHAR_ARRAY = 1002; + public static final int VARBIT = 1562; + public static final int VARBIT_ARRAY = 1563; + public static final int UUID = 2950; + public static final int UUID_ARRAY = 2951; + public static final int XML = 142; + public static final int XML_ARRAY = 143; + public static final int POINT = 600; + public static final int POINT_ARRAY = 1017; + public static final int BOX = 603; + public static final int BOX_ARRAY = 1020; + public static final int JSONB = 3802; + public static final int JSONB_ARRAY = 3807; + public static final int JSON = 114; + public static final int JSON_ARRAY = 199; + public static final int REF_CURSOR = 1790; + public static final int REF_CURSOR_ARRAY = 2201; + public static final int LINE = 628; + public static final int LSEG = 601; + public static final int PATH = 602; + public static final int POLYGON = 604; + public static final int CIRCLE = 718; + public static final int CIDR = 650; + public static final int INET = 869; + public static final int MACADDR = 829; + public static final int MACADDR8 = 774; + public static final int TSVECTOR = 3614; + public static final int TSQUERY = 3615; + + private static final Map OID_TO_NAME = new HashMap<>(100); + private static final Map NAME_TO_OID = new HashMap<>(100); + + static { + for (Field field : Oid.class.getFields()) { + try { + int oid = field.getInt(null); + String name = field.getName().toUpperCase(Locale.ROOT); + OID_TO_NAME.put(oid, name); + NAME_TO_OID.put(name, oid); + } catch (IllegalAccessException e) { + // ignore + } + } + } + + public Oid() { + } + + /** + * Returns the name of the oid as string. + * + * @param oid The oid to convert to name. + * @return The name of the oid or {@code ""} if oid no constant for oid value has been + * defined. + */ + public static String toString(int oid) { + String name = OID_TO_NAME.get(oid); + if (name == null) { + name = ""; + } + return name; + } + + public static int valueOf(String oid) throws PSQLException { + if (oid.length() > 0 && !Character.isDigit(oid.charAt(0))) { + Integer id = NAME_TO_OID.get(oid); + if (id == null) { + id = NAME_TO_OID.get(oid.toUpperCase(Locale.ROOT)); + } + if (id != null) { + return id; + } + } else { + try { + // OID are unsigned 32bit integers, so Integer.parseInt is not enough + return (int) Long.parseLong(oid); + } catch (NumberFormatException ex) { + } + } + throw new PSQLException(GT.tr("oid type {0} not known and not a number", oid), + PSQLState.INVALID_PARAMETER_VALUE); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/PGBindException.java b/pgjdbc/src/main/java/org/postgresql/core/PGBindException.java new file mode 100644 index 0000000..1a91133 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/PGBindException.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.io.IOException; + +@SuppressWarnings("serial") +public class PGBindException extends IOException { + + private final IOException ioe; + + public PGBindException(IOException ioe) { + this.ioe = ioe; + } + + public IOException getIOException() { + return ioe; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/PGStream.java b/pgjdbc/src/main/java/org/postgresql/core/PGStream.java new file mode 100644 index 0000000..b914b07 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/PGStream.java @@ -0,0 +1,846 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.gss.GSSInputStream; +import org.postgresql.gss.GSSOutputStream; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; +import org.postgresql.util.HostSpec; +import org.postgresql.util.PGPropertyMaxResultBufferParser; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.ietf.jgss.GSSContext; +import org.ietf.jgss.MessageProp; + +import java.io.BufferedOutputStream; +import java.io.Closeable; +import java.io.EOFException; +import java.io.FilterOutputStream; +import java.io.Flushable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Writer; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.sql.SQLException; + +import javax.net.SocketFactory; + +/** + *

Wrapper around the raw connection to the server that implements some basic primitives + * (reading/writing formatted data, doing string encoding, etc).

+ * + *

In general, instances of PGStream are not threadsafe; the caller must ensure that only one thread + * at a time is accessing a particular PGStream instance.

+ */ +public class PGStream implements Closeable, Flushable { + private final SocketFactory socketFactory; + private final HostSpec hostSpec; + + private final byte[] int4Buf; + private final byte[] int2Buf; + + private Socket connection; + private VisibleBufferedInputStream pgInput; + private OutputStream pgOutput; + private byte [] streamBuffer; + + public boolean isGssEncrypted() { + return gssEncrypted; + } + + boolean gssEncrypted; + + public void setSecContext(GSSContext secContext) { + MessageProp messageProp = new MessageProp(0, true); + pgInput = new VisibleBufferedInputStream(new GSSInputStream(pgInput.getWrapped(), secContext, messageProp ), 8192); + pgOutput = new GSSOutputStream(pgOutput, secContext, messageProp, 16384); + gssEncrypted = true; + + } + + private long nextStreamAvailableCheckTime; + // This is a workaround for SSL sockets: sslInputStream.available() might return 0 + // so we perform "1ms reads" once in a while + private int minStreamAvailableCheckDelay = 1000; + + private Encoding encoding; + private Writer encodingWriter; + + private long maxResultBuffer = -1; + private long resultBufferByteCount; + + private int maxRowSizeBytes = -1; + + /** + * Constructor: Connect to the PostgreSQL back end and return a stream connection. + * + * @param socketFactory socket factory to use when creating sockets + * @param hostSpec the host and port to connect to + * @param timeout timeout in milliseconds, or 0 if no timeout set + * @throws IOException if an IOException occurs below it. + */ + @SuppressWarnings("this-escape") + public PGStream(SocketFactory socketFactory, HostSpec hostSpec, int timeout) throws IOException { + this.socketFactory = socketFactory; + this.hostSpec = hostSpec; + + Socket socket = createSocket(timeout); + changeSocket(socket); + setEncoding(Encoding.getJVMEncoding("UTF-8")); + + int2Buf = new byte[2]; + int4Buf = new byte[4]; + } + + @SuppressWarnings("this-escape") + public PGStream(PGStream pgStream, int timeout) throws IOException { + + /* + Some defaults + */ + int sendBufferSize = 1024; + int receiveBufferSize = 1024; + int soTimeout = 0; + boolean keepAlive = false; + boolean tcpNoDelay = true; + + /* + Get the existing values before closing the stream + */ + try { + sendBufferSize = pgStream.getSocket().getSendBufferSize(); + receiveBufferSize = pgStream.getSocket().getReceiveBufferSize(); + soTimeout = pgStream.getSocket().getSoTimeout(); + keepAlive = pgStream.getSocket().getKeepAlive(); + tcpNoDelay = pgStream.getSocket().getTcpNoDelay(); + + } catch ( SocketException ex ) { + // ignore it + } + //close the existing stream + pgStream.close(); + + this.socketFactory = pgStream.socketFactory; + this.hostSpec = pgStream.hostSpec; + + Socket socket = createSocket(timeout); + changeSocket(socket); + setEncoding(Encoding.getJVMEncoding("UTF-8")); + // set the buffer sizes and timeout + socket.setReceiveBufferSize(receiveBufferSize); + socket.setSendBufferSize(sendBufferSize); + setNetworkTimeout(soTimeout); + socket.setKeepAlive(keepAlive); + socket.setTcpNoDelay(tcpNoDelay); + + int2Buf = new byte[2]; + int4Buf = new byte[4]; + + } + + /** + * Constructor: Connect to the PostgreSQL back end and return a stream connection. + * + * @param socketFactory socket factory + * @param hostSpec the host and port to connect to + * @throws IOException if an IOException occurs below it. + * @deprecated use {@link #PGStream(SocketFactory, org.postgresql.util.HostSpec, int)} + */ + @Deprecated + public PGStream(SocketFactory socketFactory, HostSpec hostSpec) throws IOException { + this(socketFactory, hostSpec, 0); + } + + public HostSpec getHostSpec() { + return hostSpec; + } + + public Socket getSocket() { + return connection; + } + + public SocketFactory getSocketFactory() { + return socketFactory; + } + + /** + * Check for pending backend messages without blocking. Might return false when there actually are + * messages waiting, depending on the characteristics of the underlying socket. This is used to + * detect asynchronous notifies from the backend, when available. + * + * @return true if there is a pending backend message + * @throws IOException if something wrong happens + */ + public boolean hasMessagePending() throws IOException { + + boolean available = false; + + // In certain cases, available returns 0, yet there are bytes + if (pgInput.available() > 0) { + return true; + } + long now = System.nanoTime() / 1000000; + + if (now < nextStreamAvailableCheckTime && minStreamAvailableCheckDelay != 0) { + // Do not use ".peek" too often + return false; + } + + int soTimeout = getNetworkTimeout(); + connection.setSoTimeout(1); + try { + if (!pgInput.ensureBytes(1, false)) { + return false; + } + available = pgInput.peek() != -1; + } catch (SocketTimeoutException e) { + return false; + } finally { + connection.setSoTimeout(soTimeout); + } + + /* + If none available then set the next check time + In the event that there more async bytes available we will continue to get them all + see issue 1547 https://github.com/pgjdbc/pgjdbc/issues/1547 + */ + if (!available) { + nextStreamAvailableCheckTime = now + minStreamAvailableCheckDelay; + } + return available; + } + + public void setMinStreamAvailableCheckDelay(int delay) { + this.minStreamAvailableCheckDelay = delay; + } + + private Socket createSocket(int timeout) throws IOException { + Socket socket = null; + try { + socket = socketFactory.createSocket(); + String localSocketAddress = hostSpec.getLocalSocketAddress(); + if (localSocketAddress != null) { + socket.bind(new InetSocketAddress(InetAddress.getByName(localSocketAddress), 0)); + } + if (!socket.isConnected()) { + // When using a SOCKS proxy, the host might not be resolvable locally, + // thus we defer resolution until the traffic reaches the proxy. If there + // is no proxy, we must resolve the host to an IP to connect the socket. + InetSocketAddress address = hostSpec.shouldResolve() + ? new InetSocketAddress(hostSpec.getHost(), hostSpec.getPort()) + : InetSocketAddress.createUnresolved(hostSpec.getHost(), hostSpec.getPort()); + socket.connect(address, timeout); + } + return socket; + } catch ( Exception ex ) { + if (socket != null) { + try { + socket.close(); + } catch ( Exception ex1 ) { + ex.addSuppressed(ex1); + } + } + throw ex; + } + } + + /** + * Switch this stream to using a new socket. Any existing socket is not closed; it's + * assumed that we are changing to a new socket that delegates to the original socket (e.g. SSL). + * + * @param socket the new socket to change to + * @throws IOException if something goes wrong + */ + public void changeSocket(Socket socket) throws IOException { + assert connection != socket : "changeSocket is called with the current socket as argument." + + " This is a no-op, however, it re-allocates buffered streams, so refrain from" + + " excessive changeSocket calls"; + + this.connection = socket; + + // Submitted by Jason Venner . Disable Nagle + // as we are selective about flushing output only when we + // really need to. + connection.setTcpNoDelay(true); + + // Buffer sizes submitted by Sverre H Huseby + pgInput = new VisibleBufferedInputStream(connection.getInputStream(), 8192); + pgOutput = new BufferedOutputStream(connection.getOutputStream(), 8192); + + if (encoding != null) { + setEncoding(encoding); + } + } + + public Encoding getEncoding() { + return encoding; + } + + /** + * Change the encoding used by this connection. + * + * @param encoding the new encoding to use + * @throws IOException if something goes wrong + */ + public void setEncoding(Encoding encoding) throws IOException { + if (this.encoding != null && this.encoding.name().equals(encoding.name())) { + return; + } + // Close down any old writer. + if (encodingWriter != null) { + encodingWriter.close(); + } + + this.encoding = encoding; + + // Intercept flush() downcalls from the writer; our caller + // will call PGStream.flush() as needed. + OutputStream interceptor = new FilterOutputStream(pgOutput) { + @Override + public void flush() throws IOException { + } + + @Override + public void close() throws IOException { + super.flush(); + } + }; + + encodingWriter = encoding.getEncodingWriter(interceptor); + } + + /** + *

Get a Writer instance that encodes directly onto the underlying stream.

+ * + *

The returned Writer should not be closed, as it's a shared object. Writer.flush needs to be + * called when switching between use of the Writer and use of the PGStream write methods, but it + * won't actually flush output all the way out -- call {@link #flush} to actually ensure all + * output has been pushed to the server.

+ * + * @return the shared Writer instance + * @throws IOException if something goes wrong. + */ + public Writer getEncodingWriter() throws IOException { + if (encodingWriter == null) { + throw new IOException("No encoding has been set on this connection"); + } + return encodingWriter; + } + + /** + * Sends a single character to the back end. + * + * @param val the character to be sent + * @throws IOException if an I/O error occurs + */ + public void sendChar(int val) throws IOException { + pgOutput.write(val); + } + + /** + * Sends a 4-byte integer to the back end. + * + * @param val the integer to be sent + * @throws IOException if an I/O error occurs + */ + public void sendInteger4(int val) throws IOException { + int4Buf[0] = (byte) (val >>> 24); + int4Buf[1] = (byte) (val >>> 16); + int4Buf[2] = (byte) (val >>> 8); + int4Buf[3] = (byte) (val); + pgOutput.write(int4Buf); + } + + /** + * Sends a 2-byte integer (short) to the back end. + * + * @param val the integer to be sent + * @throws IOException if an I/O error occurs or {@code val} cannot be encoded in 2 bytes + */ + public void sendInteger2(int val) throws IOException { + if (val < 0 || val > 65535) { + throw new IllegalArgumentException("Tried to send an out-of-range integer as a 2-byte unsigned int value: " + val); + } + int2Buf[0] = (byte) (val >>> 8); + int2Buf[1] = (byte) val; + pgOutput.write(int2Buf); + } + + /** + * Send an array of bytes to the backend. + * + * @param buf The array of bytes to be sent + * @throws IOException if an I/O error occurs + */ + public void send(byte[] buf) throws IOException { + pgOutput.write(buf); + } + + /** + * Send a fixed-size array of bytes to the backend. If {@code buf.length < siz}, pad with zeros. + * If {@code buf.length > siz}, truncate the array. + * + * @param buf the array of bytes to be sent + * @param siz the number of bytes to be sent + * @throws IOException if an I/O error occurs + */ + public void send(byte[] buf, int siz) throws IOException { + send(buf, 0, siz); + } + + /** + * Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If + * {@code length > siz}, truncate the array. + * + * @param buf the array of bytes to be sent + * @param off offset in the array to start sending from + * @param siz the number of bytes to be sent + * @throws IOException if an I/O error occurs + */ + public void send(byte[] buf, int off, int siz) throws IOException { + int bufamt = buf.length - off; + pgOutput.write(buf, off, bufamt < siz ? bufamt : siz); + for (int i = bufamt; i < siz; i++) { + pgOutput.write(0); + } + } + + /** + * Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If + * {@code length > siz}, truncate the array. + * + * @param writer the stream writer to invoke to send the bytes + * @throws IOException if an I/O error occurs + */ + public void send(ByteStreamWriter writer) throws IOException { + final FixedLengthOutputStream fixedLengthStream = new FixedLengthOutputStream(writer.getLength(), pgOutput); + try { + writer.writeTo(new ByteStreamWriter.ByteStreamTarget() { + @Override + public OutputStream getOutputStream() { + return fixedLengthStream; + } + }); + } catch (IOException ioe) { + throw ioe; + } catch (Exception re) { + throw new IOException("Error writing bytes to stream", re); + } + for (int i = fixedLengthStream.remaining(); i > 0; i--) { + pgOutput.write(0); + } + } + + /** + * Receives a single character from the backend, without advancing the current protocol stream + * position. + * + * @return the character received + * @throws IOException if an I/O Error occurs + */ + public int peekChar() throws IOException { + int c = pgInput.peek(); + if (c < 0) { + throw new EOFException(); + } + return c; + } + + /** + * Receives a single character from the backend. + * + * @return the character received + * @throws IOException if an I/O Error occurs + */ + public int receiveChar() throws IOException { + int c = pgInput.read(); + if (c < 0) { + throw new EOFException(); + } + return c; + } + + /** + * Receives a four byte integer from the backend. + * + * @return the integer received from the backend + * @throws IOException if an I/O error occurs + */ + public int receiveInteger4() throws IOException { + if (pgInput.read(int4Buf) != 4) { + throw new EOFException(); + } + + return (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8 + | int4Buf[3] & 0xFF; + } + + /** + * Receives a two byte integer from the backend. + * + * @return the integer received from the backend + * @throws IOException if an I/O error occurs + */ + public int receiveInteger2() throws IOException { + if (pgInput.read(int2Buf) != 2) { + throw new EOFException(); + } + + return (int2Buf[0] & 0xFF) << 8 | int2Buf[1] & 0xFF; + } + + /** + * Receives a fixed-size string from the backend. + * + * @param len the length of the string to receive, in bytes. + * @return the decoded string + * @throws IOException if something wrong happens + */ + public String receiveString(int len) throws IOException { + if (!pgInput.ensureBytes(len)) { + throw new EOFException(); + } + + String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len); + pgInput.skip(len); + return res; + } + + /** + * Receives a fixed-size string from the backend, and tries to avoid "UTF-8 decode failed" + * errors. + * + * @param len the length of the string to receive, in bytes. + * @return the decoded string + * @throws IOException if something wrong happens + */ + public EncodingPredictor.DecodeResult receiveErrorString(int len) throws IOException { + if (!pgInput.ensureBytes(len)) { + throw new EOFException(); + } + + EncodingPredictor.DecodeResult res; + try { + String value = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len); + // no autodetect warning as the message was converted on its own + res = new EncodingPredictor.DecodeResult(value, null); + } catch (IOException e) { + res = EncodingPredictor.decode(pgInput.getBuffer(), pgInput.getIndex(), len); + if (res == null) { + Encoding enc = Encoding.defaultEncoding(); + String value = enc.decode(pgInput.getBuffer(), pgInput.getIndex(), len); + res = new EncodingPredictor.DecodeResult(value, enc.name()); + } + } + pgInput.skip(len); + return res; + } + + /** + * Receives a null-terminated string from the backend. If we don't see a null, then we assume + * something has gone wrong. + * + * @return string from back end + * @throws IOException if an I/O error occurs, or end of file + */ + public String receiveString() throws IOException { + int len = pgInput.scanCStringLength(); + String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len - 1); + pgInput.skip(len); + return res; + } + + /** + * Receives a null-terminated string from the backend and attempts to decode to a + * {@link Encoding#decodeCanonicalized(byte[], int, int) canonical} {@code String}. + * If we don't see a null, then we assume something has gone wrong. + * + * @return string from back end + * @throws IOException if an I/O error occurs, or end of file + * @see Encoding#decodeCanonicalized(byte[], int, int) + */ + public String receiveCanonicalString() throws IOException { + int len = pgInput.scanCStringLength(); + String res = encoding.decodeCanonicalized(pgInput.getBuffer(), pgInput.getIndex(), len - 1); + pgInput.skip(len); + return res; + } + + /** + * Receives a null-terminated string from the backend and attempts to decode to a + * {@link Encoding#decodeCanonicalizedIfPresent(byte[], int, int) canonical} {@code String}. + * If we don't see a null, then we assume something has gone wrong. + * + * @return string from back end + * @throws IOException if an I/O error occurs, or end of file + * @see Encoding#decodeCanonicalizedIfPresent(byte[], int, int) + */ + public String receiveCanonicalStringIfPresent() throws IOException { + int len = pgInput.scanCStringLength(); + String res = encoding.decodeCanonicalizedIfPresent(pgInput.getBuffer(), pgInput.getIndex(), len - 1); + pgInput.skip(len); + return res; + } + + /** + * Read a tuple from the back end. A tuple is a two dimensional array of bytes. This variant reads + * the V3 protocol's tuple representation. + * + * @return tuple from the back end + * @throws IOException if a data I/O error occurs + * @throws SQLException if read more bytes than set maxResultBuffer + */ + public Tuple receiveTupleV3() throws IOException, OutOfMemoryError, SQLException { + int messageSize = receiveInteger4(); // MESSAGE SIZE + int nf = receiveInteger2(); + //size = messageSize - 4 bytes of message size - 2 bytes of field count - 4 bytes for each column length + int dataToReadSize = messageSize - 4 - 2 - 4 * nf; + setMaxRowSizeBytes(dataToReadSize); + + byte[][] answer = new byte[nf][]; + + increaseByteCounter(dataToReadSize); + OutOfMemoryError oom = null; + for (int i = 0; i < nf; i++) { + int size = receiveInteger4(); + if (size != -1) { + try { + answer[i] = new byte[size]; + receive(answer[i], 0, size); + } catch (OutOfMemoryError oome) { + oom = oome; + skip(size); + } + } + } + + if (oom != null) { + throw oom; + } + + return new Tuple(answer); + } + + /** + * Reads in a given number of bytes from the backend. + * + * @param siz number of bytes to read + * @return array of bytes received + * @throws IOException if a data I/O error occurs + */ + public byte[] receive(int siz) throws IOException { + byte[] answer = new byte[siz]; + receive(answer, 0, siz); + return answer; + } + + /** + * Reads in a given number of bytes from the backend. + * + * @param buf buffer to store result + * @param off offset in buffer + * @param siz number of bytes to read + * @throws IOException if a data I/O error occurs + */ + public void receive(byte[] buf, int off, int siz) throws IOException { + int s = 0; + + while (s < siz) { + int w = pgInput.read(buf, off + s, siz - s); + if (w < 0) { + throw new EOFException(); + } + s += w; + } + } + + public void skip(int size) throws IOException { + long s = 0; + while (s < size) { + s += pgInput.skip(size - s); + } + } + + /** + * Copy data from an input stream to the connection. + * + * @param inStream the stream to read data from + * @param remaining the number of bytes to copy + * @throws IOException if a data I/O error occurs + */ + public void sendStream(InputStream inStream, int remaining) throws IOException { + int expectedLength = remaining; + byte[] streamBuffer = this.streamBuffer; + if (streamBuffer == null) { + this.streamBuffer = streamBuffer = new byte[8192]; + } + + while (remaining > 0) { + int count = remaining > streamBuffer.length ? streamBuffer.length : remaining; + int readCount; + + try { + readCount = inStream.read(streamBuffer, 0, count); + if (readCount < 0) { + throw new EOFException( + GT.tr("Premature end of input stream, expected {0} bytes, but only read {1}.", + expectedLength, expectedLength - remaining)); + } + } catch (IOException ioe) { + while (remaining > 0) { + send(streamBuffer, count); + remaining -= count; + count = remaining > streamBuffer.length ? streamBuffer.length : remaining; + } + throw new PGBindException(ioe); + } + + send(streamBuffer, readCount); + remaining -= readCount; + } + } + + /** + * Flush any pending output to the backend. + * + * @throws IOException if an I/O error occurs + */ + @Override + public void flush() throws IOException { + if (encodingWriter != null) { + encodingWriter.flush(); + } + pgOutput.flush(); + } + + /** + * Consume an expected EOF from the backend. + * + * @throws IOException if an I/O error occurs + * @throws SQLException if we get something other than an EOF + */ + public void receiveEOF() throws SQLException, IOException { + int c = pgInput.read(); + if (c < 0) { + return; + } + throw new PSQLException(GT.tr("Expected an EOF from server, got: {0}", c), + PSQLState.COMMUNICATION_ERROR); + } + + /** + * Closes the connection. + * + * @throws IOException if an I/O Error occurs + */ + @Override + public void close() throws IOException { + if (encodingWriter != null) { + encodingWriter.close(); + } + + pgOutput.close(); + pgInput.close(); + connection.close(); + } + + public void setNetworkTimeout(int milliseconds) throws IOException { + connection.setSoTimeout(milliseconds); + pgInput.setTimeoutRequested(milliseconds != 0); + } + + public int getNetworkTimeout() throws IOException { + return connection.getSoTimeout(); + } + + /** + * Method to set MaxResultBuffer inside PGStream. + * + * @param value value of new max result buffer as string (cause we can expect % or chars to use + * multiplier) + * @throws PSQLException exception returned when occurred parsing problem. + */ + public void setMaxResultBuffer(String value) throws PSQLException { + maxResultBuffer = PGPropertyMaxResultBufferParser.parseProperty(value); + } + + /** + * Get MaxResultBuffer from PGStream. + * + * @return size of MaxResultBuffer + */ + public long getMaxResultBuffer() { + return maxResultBuffer; + } + + /** + * The idea behind this method is to keep in maxRowSize the size of biggest read data row. As + * there may be many data rows send after each other for a query, then value in maxRowSize would + * contain value noticed so far, because next data rows and their sizes are not read for that + * moment. We want it increasing, because the size of the biggest among data rows will be used + * during computing new adaptive fetch size for the query. + * + * @param rowSizeBytes new value to be set as maxRowSizeBytes + */ + public void setMaxRowSizeBytes(int rowSizeBytes) { + if (rowSizeBytes > maxRowSizeBytes) { + maxRowSizeBytes = rowSizeBytes; + } + } + + /** + * Get actual max row size noticed so far. + * + * @return value of max row size + */ + public int getMaxRowSizeBytes() { + return maxRowSizeBytes; + } + + /** + * Clear value of max row size noticed so far. + */ + public void clearMaxRowSizeBytes() { + maxRowSizeBytes = -1; + } + + /** + * Clear count of byte buffer. + */ + public void clearResultBufferCount() { + resultBufferByteCount = 0; + } + + /** + * Increase actual count of buffer. If buffer count is bigger than max result buffer limit, then + * gonna return an exception. + * + * @param value size of bytes to add to byte buffer. + * @throws SQLException exception returned when result buffer count is bigger than max result + * buffer. + */ + private void increaseByteCounter(long value) throws SQLException { + if (maxResultBuffer != -1) { + resultBufferByteCount += value; + if (resultBufferByteCount > maxResultBuffer) { + throw new PSQLException(GT.tr( + "Result set exceeded maxResultBuffer limit. Received: {0}; Current limit: {1}", + String.valueOf(resultBufferByteCount), String.valueOf(maxResultBuffer)), PSQLState.COMMUNICATION_ERROR); + } + } + } + + public boolean isClosed() { + return connection.isClosed(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/ParameterList.java new file mode 100644 index 0000000..5288184 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ParameterList.java @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import org.postgresql.util.ByteStreamWriter; + +import java.io.InputStream; +import java.sql.SQLException; + +/** + *

Abstraction of a list of parameters to be substituted into a Query. The protocol-specific details + * of how to efficiently store and stream the parameters is hidden behind implementations of this + * interface.

+ * + *

In general, instances of ParameterList are associated with a particular Query object (the one + * that created them) and shouldn't be used against another Query.

+ * + *

Parameter indexes are 1-based to match JDBC's PreparedStatement, i.e. the first parameter has + * index 1.

+ * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public interface ParameterList { + void registerOutParameter(int index, int sqlType) throws SQLException; + + /** + * Get the number of parameters in this list. This value never changes for a particular instance, + * and might be zero. + * + * @return the number of parameters in this list. + */ + int getParameterCount(); + + /** + * Get the number of IN parameters in this list. + * + * @return the number of IN parameters in this list + */ + int getInParameterCount(); + + /** + * Get the number of OUT parameters in this list. + * + * @return the number of OUT parameters in this list + */ + int getOutParameterCount(); + + /** + * Return the oids of the parameters in this list. May be null for a ParameterList that does not + * support typing of parameters. + * + * @return oids of the parameters + */ + int[] getTypeOIDs(); + + /** + * Binds an integer value to a parameter. The type of the parameter is implicitly 'int4'. + * + * @param index the 1-based parameter index to bind. + * @param value the integer value to use. + * @throws SQLException on error or if index is out of range + */ + void setIntParameter(int index, int value) throws SQLException; + + /** + * Binds a String value that is an unquoted literal to the server's query parser (for example, a + * bare integer) to a parameter. Associated with the parameter is a typename for the parameter + * that should correspond to an entry in pg_types. + * + * @param index the 1-based parameter index to bind. + * @param value the unquoted literal string to use. + * @param oid the type OID of the parameter, or 0 to infer the type. + * @throws SQLException on error or if index is out of range + */ + void setLiteralParameter(int index, + String value, int oid) throws SQLException; + + /** + * Binds a String value that needs to be quoted for the server's parser to understand (for + * example, a timestamp) to a parameter. Associated with the parameter is a typename for the + * parameter that should correspond to an entry in pg_types. + * + * @param index the 1-based parameter index to bind. + * @param value the quoted string to use. + * @param oid the type OID of the parameter, or 0 to infer the type. + * @throws SQLException on error or if index is out of range + */ + void setStringParameter(int index, String value, int oid) throws SQLException; + + /** + * Binds a binary bytea value stored as a bytearray to a parameter. The parameter's type is + * implicitly set to 'bytea'. The bytearray's contains should remain unchanged until query + * execution has completed. + * + * @param index the 1-based parameter index to bind. + * @param data an array containing the raw data value + * @param offset the offset within data of the start of the parameter data. + * @param length the number of bytes of parameter data within data to use. + * @throws SQLException on error or if index is out of range + */ + void setBytea(int index, byte[] data, + int offset, int length) throws SQLException; + + /** + * Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to + * 'bytea'. The stream should remain valid until query execution has completed. + * + * @param index the 1-based parameter index to bind. + * @param stream a stream containing the parameter data. + * @param length the number of bytes of parameter data to read from stream. + * @throws SQLException on error or if index is out of range + */ + void setBytea(int index, InputStream stream, int length) throws SQLException; + + /** + * Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to + * 'bytea'. The stream should remain valid until query execution has completed. + * + * @param index the 1-based parameter index to bind. + * @param stream a stream containing the parameter data. + * @throws SQLException on error or if index is out of range + */ + void setBytea(int index, InputStream stream) throws SQLException; + + /** + * Binds a binary bytea value stored as a ByteStreamWriter. The parameter's type is implicitly set to + * 'bytea'. The stream should remain valid until query execution has completed. + * + * @param index the 1-based parameter index to bind. + * @param writer a writer that can write the bytes for the parameter + * @throws SQLException on error or if index is out of range + */ + void setBytea(int index, ByteStreamWriter writer) throws SQLException; + + /** + * Binds a text value stored as an InputStream that is a valid UTF-8 byte stream. + * Any byte-order marks (BOM) in the stream are passed to the backend. + * The parameter's type is implicitly set to 'text'. + * The stream should remain valid until query execution has completed. + * + * @param index the 1-based parameter index to bind. + * @param stream a stream containing the parameter data. + * @throws SQLException on error or if index is out of range + */ + void setText(int index, InputStream stream) throws SQLException; + + /** + * Binds given byte[] value to a parameter. The bytes must already be in correct format matching + * the OID. + * + * @param index the 1-based parameter index to bind. + * @param value the bytes to send. + * @param oid the type OID of the parameter. + * @throws SQLException on error or if index is out of range + */ + void setBinaryParameter(int index, byte[] value, int oid) throws SQLException; + + /** + * Binds a SQL NULL value to a parameter. Associated with the parameter is a typename for the + * parameter that should correspond to an entry in pg_types. + * + * @param index the 1-based parameter index to bind. + * @param oid the type OID of the parameter, or 0 to infer the type. + * @throws SQLException on error or if index is out of range + */ + void setNull(int index, int oid) throws SQLException; + + /** + * Perform a shallow copy of this ParameterList, returning a new instance (still suitable for + * passing to the owning Query). If this ParameterList is immutable, copy() may return the same + * immutable object. + * + * @return a new ParameterList instance + */ + ParameterList copy(); + + /** + * Unbind all parameter values bound in this list. + */ + void clear(); + + /** + * Return a human-readable representation of a particular parameter in this ParameterList. If the + * parameter is not bound, returns "?". + * + * @param index the 1-based parameter index to bind. + * @param standardConformingStrings true if \ is not an escape character in strings literals + * @return a string representation of the parameter. + */ + String toString(int index, boolean standardConformingStrings); + + /** + * Use this operation to append more parameters to the current list. + * @param list of parameters to append with. + * @throws SQLException fault raised if driver or back end throw an exception + */ + void appendAll(ParameterList list) throws SQLException ; + + /** + * Returns the bound parameter values. + * @return Object array containing the parameter values. + */ + Object [] getValues(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Parser.java b/pgjdbc/src/main/java/org/postgresql/core/Parser.java new file mode 100644 index 0000000..ba0a105 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Parser.java @@ -0,0 +1,1581 @@ +/* + * Copyright (c) 2006, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.jdbc.EscapeSyntaxCallMode; +import org.postgresql.jdbc.EscapedFunctions2; +import org.postgresql.util.GT; +import org.postgresql.util.IntList; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Basic query parser infrastructure. + * Note: This class should not be considered as pgjdbc public API. + * + * @author Michael Paesold (mpaesold@gmx.at) + * @author Christopher Deckers (chrriis@gmail.com) + */ +public class Parser { + + public Parser() { + } + + /** + * Parses JDBC query into PostgreSQL's native format. Several queries might be given if separated + * by semicolon. + * + * @param query jdbc query to parse + * @param standardConformingStrings whether to allow backslashes to be used as escape characters + * in single quote literals + * @param withParameters whether to replace ?, ? with $1, $2, etc + * @param splitStatements whether to split statements by semicolon + * @param isBatchedReWriteConfigured whether re-write optimization is enabled + * @param quoteReturningIdentifiers whether to quote identifiers returned using returning clause + * @param returningColumnNames for simple insert, update, delete add returning with given column names + * @return list of native queries + * @throws SQLException if unable to add returning clause (invalid column names) + */ + public static List parseJdbcSql(String query, boolean standardConformingStrings, + boolean withParameters, boolean splitStatements, + boolean isBatchedReWriteConfigured, + boolean quoteReturningIdentifiers, + String... returningColumnNames) throws SQLException { + if (!withParameters && !splitStatements + && returningColumnNames != null && returningColumnNames.length == 0) { + return Collections.singletonList(new NativeQuery(query, + SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK))); + } + + int fragmentStart = 0; + int inParen = 0; + + char[] aChars = query.toCharArray(); + + StringBuilder nativeSql = new StringBuilder(query.length() + 10); + IntList bindPositions = null; // initialized on demand + List nativeQueries = null; + boolean isCurrentReWriteCompatible = false; + boolean isValuesFound = false; + int valuesParenthesisOpenPosition = -1; + int valuesParenthesisClosePosition = -1; + boolean valuesParenthesisCloseFound = false; + boolean isInsertPresent = false; + boolean isReturningPresent = false; + boolean isReturningPresentPrev = false; + boolean isBeginPresent = false; + boolean isBeginAtomicPresent = false; + SqlCommandType currentCommandType = SqlCommandType.BLANK; + SqlCommandType prevCommandType = SqlCommandType.BLANK; + int numberOfStatements = 0; + + boolean whitespaceOnly = true; + int keyWordCount = 0; + int keywordStart = -1; + int keywordEnd = -1; + /* + loop through looking for keywords, single quotes, double quotes, comments, dollar quotes, + parenthesis, ? and ; + for single/double/dollar quotes, and comments we just want to move the index + */ + for (int i = 0; i < aChars.length; i++) { + char aChar = aChars[i]; + boolean isKeyWordChar = false; + // ';' is ignored as it splits the queries. We do have to deal with ; in BEGIN ATOMIC functions + whitespaceOnly &= aChar == ';' || Character.isWhitespace(aChar); + keywordEnd = i; // parseSingleQuotes, parseDoubleQuotes, etc move index so we keep old value + switch (aChar) { + case '\'': // single-quotes + i = Parser.parseSingleQuotes(aChars, i, standardConformingStrings); + break; + + case '"': // double-quotes + i = Parser.parseDoubleQuotes(aChars, i); + break; + + case '-': // possibly -- style comment + i = Parser.parseLineComment(aChars, i); + break; + + case '/': // possibly /* */ style comment + i = Parser.parseBlockComment(aChars, i); + break; + + case '$': // possibly dollar quote start + i = Parser.parseDollarQuotes(aChars, i); + break; + + // case '(' moved below to parse "values(" properly + + case ')': + inParen--; + if (inParen == 0 && isValuesFound && !valuesParenthesisCloseFound) { + // If original statement is multi-values like VALUES (...), (...), ... then + // search for the latest closing paren + valuesParenthesisClosePosition = nativeSql.length() + i - fragmentStart; + } + break; + + case '?': + nativeSql.append(aChars, fragmentStart, i - fragmentStart); + if (i + 1 < aChars.length && aChars[i + 1] == '?') /* replace ?? with ? */ { + nativeSql.append('?'); + i++; // make sure the coming ? is not treated as a bind + } else { + if (!withParameters) { + nativeSql.append('?'); + } else { + if (bindPositions == null) { + bindPositions = new IntList(); + } + bindPositions.add(nativeSql.length()); + int bindIndex = bindPositions.size(); + nativeSql.append(NativeQuery.bindName(bindIndex)); + } + } + fragmentStart = i + 1; + break; + + case ';': + // we don't split the queries if BEGIN ATOMIC is present + if (!isBeginAtomicPresent && inParen == 0) { + if (!whitespaceOnly) { + numberOfStatements++; + nativeSql.append(aChars, fragmentStart, i - fragmentStart); + whitespaceOnly = true; + } + fragmentStart = i + 1; + if (nativeSql.length() > 0) { + if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent, quoteReturningIdentifiers)) { + isReturningPresent = true; + } + + if (splitStatements) { + if (nativeQueries == null) { + nativeQueries = new ArrayList<>(); + } + + if (!isValuesFound || !isCurrentReWriteCompatible || valuesParenthesisClosePosition == -1 + || (bindPositions != null + && valuesParenthesisClosePosition < bindPositions.get(bindPositions.size() - 1))) { + valuesParenthesisOpenPosition = -1; + valuesParenthesisClosePosition = -1; + } + + nativeQueries.add(new NativeQuery(nativeSql.toString(), + toIntArray(bindPositions), false, + SqlCommand.createStatementTypeInfo( + currentCommandType, isBatchedReWriteConfigured, valuesParenthesisOpenPosition, + valuesParenthesisClosePosition, + isReturningPresent, nativeQueries.size()))); + } + } + prevCommandType = currentCommandType; + isReturningPresentPrev = isReturningPresent; + currentCommandType = SqlCommandType.BLANK; + isReturningPresent = false; + if (splitStatements) { + // Prepare for next query + if (bindPositions != null) { + bindPositions.clear(); + } + nativeSql.setLength(0); + isValuesFound = false; + isCurrentReWriteCompatible = false; + valuesParenthesisOpenPosition = -1; + valuesParenthesisClosePosition = -1; + valuesParenthesisCloseFound = false; + } + } + break; + + default: + if (keywordStart >= 0) { + // When we are inside a keyword, we need to detect keyword end boundary + // Note that isKeyWordChar is initialized to false before the switch, so + // all other characters would result in isKeyWordChar=false + isKeyWordChar = isIdentifierContChar(aChar); + break; + } + // Not in keyword, so just detect next keyword start + isKeyWordChar = isIdentifierStartChar(aChar); + if (isKeyWordChar) { + keywordStart = i; + if (valuesParenthesisOpenPosition != -1 && inParen == 0) { + // When the statement already has multi-values, stop looking for more of them + // Since values(?,?),(?,?),... should not contain keywords in the middle + valuesParenthesisCloseFound = true; + } + } + break; + } + if (keywordStart >= 0 && (i == aChars.length - 1 || !isKeyWordChar)) { + int wordLength = (isKeyWordChar ? i + 1 : keywordEnd) - keywordStart; + if (currentCommandType == SqlCommandType.BLANK) { + if (wordLength == 6 && parseCreateKeyword(aChars, keywordStart)) { + currentCommandType = SqlCommandType.CREATE; + } else if (wordLength == 5 && parseAlterKeyword(aChars, keywordStart)) { + currentCommandType = SqlCommandType.ALTER; + } else if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) { + currentCommandType = SqlCommandType.UPDATE; + } else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) { + currentCommandType = SqlCommandType.DELETE; + } else if (wordLength == 4 && parseMoveKeyword(aChars, keywordStart)) { + currentCommandType = SqlCommandType.MOVE; + } else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) { + currentCommandType = SqlCommandType.SELECT; + } else if (wordLength == 4 && parseWithKeyword(aChars, keywordStart)) { + currentCommandType = SqlCommandType.WITH; + } else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) { + if (!isInsertPresent && (nativeQueries == null || nativeQueries.isEmpty())) { + // Only allow rewrite for insert command starting with the insert keyword. + // Else, too many risks of wrong interpretation. + isCurrentReWriteCompatible = keyWordCount == 0; + isInsertPresent = true; + currentCommandType = SqlCommandType.INSERT; + } else { + isCurrentReWriteCompatible = false; + } + } + + } else if (currentCommandType == SqlCommandType.WITH + && inParen == 0) { + SqlCommandType command = parseWithCommandType(aChars, i, keywordStart, wordLength); + if (command != null) { + currentCommandType = command; + } + } else if (currentCommandType == SqlCommandType.CREATE) { + /* + We are looking for BEGIN ATOMIC + */ + if (wordLength == 5 && parseBeginKeyword(aChars, keywordStart)) { + isBeginPresent = true; + } else { + // found begin, now look for atomic + if (isBeginPresent) { + if (wordLength == 6 && parseAtomicKeyword(aChars, keywordStart)) { + isBeginAtomicPresent = true; + } + // either way we reset beginFound + isBeginPresent = false; + } + } + } + if (inParen != 0 || aChar == ')') { + // RETURNING and VALUES cannot be present in parentheses + } else if (wordLength == 9 && parseReturningKeyword(aChars, keywordStart)) { + isReturningPresent = true; + } else if (wordLength == 6 && parseValuesKeyword(aChars, keywordStart)) { + isValuesFound = true; + } + keywordStart = -1; + keyWordCount++; + } + if (aChar == '(') { + inParen++; + if (inParen == 1 && isValuesFound && valuesParenthesisOpenPosition == -1) { + valuesParenthesisOpenPosition = nativeSql.length() + i - fragmentStart; + } + } + } + + if (!isValuesFound || !isCurrentReWriteCompatible || valuesParenthesisClosePosition == -1 + || (bindPositions != null + && valuesParenthesisClosePosition < bindPositions.get(bindPositions.size() - 1))) { + valuesParenthesisOpenPosition = -1; + valuesParenthesisClosePosition = -1; + } + + if (fragmentStart < aChars.length && !whitespaceOnly) { + nativeSql.append(aChars, fragmentStart, aChars.length - fragmentStart); + } else { + if (numberOfStatements > 1) { + isReturningPresent = false; + currentCommandType = SqlCommandType.BLANK; + } else if (numberOfStatements == 1) { + isReturningPresent = isReturningPresentPrev; + currentCommandType = prevCommandType; + } + } + + if (nativeSql.length() == 0) { + return nativeQueries != null ? nativeQueries : Collections.emptyList(); + } + + if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent, quoteReturningIdentifiers)) { + isReturningPresent = true; + } + + NativeQuery lastQuery = new NativeQuery(nativeSql.toString(), + toIntArray(bindPositions), !splitStatements, + SqlCommand.createStatementTypeInfo(currentCommandType, + isBatchedReWriteConfigured, valuesParenthesisOpenPosition, valuesParenthesisClosePosition, + isReturningPresent, (nativeQueries == null ? 0 : nativeQueries.size()))); + + if (nativeQueries == null) { + return Collections.singletonList(lastQuery); + } + + if (!whitespaceOnly) { + nativeQueries.add(lastQuery); + } + return nativeQueries; + } + + private static SqlCommandType parseWithCommandType(char[] aChars, int i, int keywordStart, + int wordLength) { + // This parses `with x as (...) ...` + // Corner case is `with select as (insert ..) select * from select + SqlCommandType command; + if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) { + command = SqlCommandType.UPDATE; + } else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) { + command = SqlCommandType.DELETE; + } else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) { + command = SqlCommandType.INSERT; + } else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) { + command = SqlCommandType.SELECT; + } else { + return null; + } + // update/delete/insert/select keyword detected + // Check if `AS` follows + int nextInd = i; + // The loop should skip whitespace and comments + for (; nextInd < aChars.length; nextInd++) { + char nextChar = aChars[nextInd]; + if (nextChar == '-') { + nextInd = Parser.parseLineComment(aChars, nextInd); + } else if (nextChar == '/') { + nextInd = Parser.parseBlockComment(aChars, nextInd); + } else if (Character.isWhitespace(nextChar)) { + // Skip whitespace + continue; + } else { + break; + } + } + if (nextInd + 2 >= aChars.length + || (!parseAsKeyword(aChars, nextInd) + || isIdentifierContChar(aChars[nextInd + 2]))) { + return command; + } + return null; + } + + private static boolean addReturning(StringBuilder nativeSql, SqlCommandType currentCommandType, + String[] returningColumnNames, boolean isReturningPresent, boolean quoteReturningIdentifiers) throws SQLException { + if (isReturningPresent || returningColumnNames.length == 0) { + return false; + } + if (currentCommandType != SqlCommandType.INSERT + && currentCommandType != SqlCommandType.UPDATE + && currentCommandType != SqlCommandType.DELETE + && currentCommandType != SqlCommandType.WITH) { + return false; + } + + nativeSql.append("\nRETURNING "); + if (returningColumnNames.length == 1 && returningColumnNames[0].charAt(0) == '*') { + nativeSql.append('*'); + return true; + } + for (int col = 0; col < returningColumnNames.length; col++) { + String columnName = returningColumnNames[col]; + if (col > 0) { + nativeSql.append(", "); + } + /* + If the client quotes identifiers then doing so again would create an error + */ + if (quoteReturningIdentifiers) { + Utils.escapeIdentifier(nativeSql, columnName); + } else { + nativeSql.append(columnName); + } + } + return true; + } + + /** + * Converts {@link IntList} to {@code int[]}. A {@code null} collection is converted to + * {@code null} array. + * + * @param list input list + * @return output array + */ + private static int [] toIntArray(IntList list) { + if (list == null) { + return null; + } + return list.toArray(); + } + + /** + *

Find the end of the single-quoted string starting at the given offset.

+ * + *

Note: for {@code 'single '' quote in string'}, this method currently returns the offset of + * first {@code '} character after the initial one. The caller must call the method a second time + * for the second part of the quoted string.

+ * + * @param query query + * @param offset start offset + * @param standardConformingStrings standard conforming strings + * @return position of the end of the single-quoted string + */ + public static int parseSingleQuotes(final char[] query, int offset, + boolean standardConformingStrings) { + // check for escape string syntax (E'') + if (standardConformingStrings + && offset >= 2 + && (query[offset - 1] == 'e' || query[offset - 1] == 'E') + && charTerminatesIdentifier(query[offset - 2])) { + standardConformingStrings = false; + } + + if (standardConformingStrings) { + // do NOT treat backslashes as escape characters + while (++offset < query.length) { + if (query[offset] == '\'') { + return offset; + } + } + } else { + // treat backslashes as escape characters + while (++offset < query.length) { + switch (query[offset]) { + case '\\': + ++offset; + break; + case '\'': + return offset; + default: + break; + } + } + } + + return query.length; + } + + /** + *

Find the end of the double-quoted string starting at the given offset.

+ * + *

Note: for {@code "double "" quote in string"}, this method currently + * returns the offset of first {@code "} character after the initial one. The caller must + * call the method a second time for the second part of the quoted string.

+ * + * @param query query + * @param offset start offset + * @return position of the end of the double-quoted string + */ + public static int parseDoubleQuotes(final char[] query, int offset) { + while (++offset < query.length && query[offset] != '"') { + // do nothing + } + return offset; + } + + /** + * Test if the dollar character ({@code $}) at the given offset starts a dollar-quoted string and + * return the offset of the ending dollar character. + * + * @param query query + * @param offset start offset + * @return offset of the ending dollar character + */ + public static int parseDollarQuotes(final char[] query, int offset) { + if (offset + 1 < query.length + && (offset == 0 || !isIdentifierContChar(query[offset - 1]))) { + int endIdx = -1; + if (query[offset + 1] == '$') { + endIdx = offset + 1; + } else if (isDollarQuoteStartChar(query[offset + 1])) { + for (int d = offset + 2; d < query.length; d++) { + if (query[d] == '$') { + endIdx = d; + break; + } else if (!isDollarQuoteContChar(query[d])) { + break; + } + } + } + if (endIdx > 0) { + // found; note: tag includes start and end $ character + int tagIdx = offset; + int tagLen = endIdx - offset + 1; + offset = endIdx; // loop continues at endIdx + 1 + for (++offset; offset < query.length; offset++) { + if (query[offset] == '$' + && subArraysEqual(query, tagIdx, offset, tagLen)) { + offset += tagLen - 1; + break; + } + } + } + } + return offset; + } + + /** + * Test if the {@code -} character at {@code offset} starts a {@code --} style line comment, + * and return the position of the first {@code \r} or {@code \n} character. + * + * @param query query + * @param offset start offset + * @return position of the first {@code \r} or {@code \n} character + */ + public static int parseLineComment(final char[] query, int offset) { + if (offset + 1 < query.length && query[offset + 1] == '-') { + while (offset + 1 < query.length) { + offset++; + if (query[offset] == '\r' || query[offset] == '\n') { + break; + } + } + } + return offset; + } + + /** + * Test if the {@code /} character at {@code offset} starts a block comment, and return the + * position of the last {@code /} character. + * + * @param query query + * @param offset start offset + * @return position of the last {@code /} character + */ + public static int parseBlockComment(final char[] query, int offset) { + if (offset + 1 < query.length && query[offset + 1] == '*') { + // /* /* */ */ nest, according to SQL spec + int level = 1; + for (offset += 2; offset < query.length; offset++) { + switch (query[offset - 1]) { + case '*': + if (query[offset] == '/') { + --level; + ++offset; // don't parse / in */* twice + } + break; + case '/': + if (query[offset] == '*') { + ++level; + ++offset; // don't parse * in /*/ twice + } + break; + default: + break; + } + + if (level == 0) { + --offset; // reset position to last '/' char + break; + } + } + } + return offset; + } + + /** + * Parse string to check presence of DELETE keyword regardless of case. The initial character is + * assumed to have been matched. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseDeleteKeyword(final char[] query, int offset) { + if (query.length < (offset + 6)) { + return false; + } + + return (query[offset] | 32) == 'd' + && (query[offset + 1] | 32) == 'e' + && (query[offset + 2] | 32) == 'l' + && (query[offset + 3] | 32) == 'e' + && (query[offset + 4] | 32) == 't' + && (query[offset + 5] | 32) == 'e'; + } + + /** + * Parse string to check presence of INSERT keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseInsertKeyword(final char[] query, int offset) { + if (query.length < (offset + 7)) { + return false; + } + + return (query[offset] | 32) == 'i' + && (query[offset + 1] | 32) == 'n' + && (query[offset + 2] | 32) == 's' + && (query[offset + 3] | 32) == 'e' + && (query[offset + 4] | 32) == 'r' + && (query[offset + 5] | 32) == 't'; + } + + /** + Parse string to check presence of BEGIN keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + + public static boolean parseBeginKeyword(final char[] query, int offset) { + if (query.length < (offset + 6)) { + return false; + } + return (query[offset] | 32) == 'b' + && (query[offset + 1] | 32) == 'e' + && (query[offset + 2] | 32) == 'g' + && (query[offset + 3] | 32) == 'i' + && (query[offset + 4] | 32) == 'n'; + } + + /** + Parse string to check presence of ATOMIC keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseAtomicKeyword(final char[] query, int offset) { + if (query.length < (offset + 7)) { + return false; + } + return (query[offset] | 32) == 'a' + && (query[offset + 1] | 32) == 't' + && (query[offset + 2] | 32) == 'o' + && (query[offset + 3] | 32) == 'm' + && (query[offset + 4] | 32) == 'i' + && (query[offset + 5] | 32) == 'c'; + } + + /** + * Parse string to check presence of MOVE keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseMoveKeyword(final char[] query, int offset) { + if (query.length < (offset + 4)) { + return false; + } + + return (query[offset] | 32) == 'm' + && (query[offset + 1] | 32) == 'o' + && (query[offset + 2] | 32) == 'v' + && (query[offset + 3] | 32) == 'e'; + } + + /** + * Parse string to check presence of RETURNING keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseReturningKeyword(final char[] query, int offset) { + if (query.length < (offset + 9)) { + return false; + } + + return (query[offset] | 32) == 'r' + && (query[offset + 1] | 32) == 'e' + && (query[offset + 2] | 32) == 't' + && (query[offset + 3] | 32) == 'u' + && (query[offset + 4] | 32) == 'r' + && (query[offset + 5] | 32) == 'n' + && (query[offset + 6] | 32) == 'i' + && (query[offset + 7] | 32) == 'n' + && (query[offset + 8] | 32) == 'g'; + } + + /** + * Parse string to check presence of SELECT keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseSelectKeyword(final char[] query, int offset) { + if (query.length < (offset + 6)) { + return false; + } + + return (query[offset] | 32) == 's' + && (query[offset + 1] | 32) == 'e' + && (query[offset + 2] | 32) == 'l' + && (query[offset + 3] | 32) == 'e' + && (query[offset + 4] | 32) == 'c' + && (query[offset + 5] | 32) == 't'; + } + + /** + * Parse string to check presence of CREATE keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseAlterKeyword(final char[] query, int offset) { + if (query.length < (offset + 5)) { + return false; + } + + return (query[offset] | 32) == 'a' + && (query[offset + 1] | 32) == 'l' + && (query[offset + 2] | 32) == 't' + && (query[offset + 3] | 32) == 'e' + && (query[offset + 4] | 32) == 'r'; + } + + /** + * Parse string to check presence of CREATE keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseCreateKeyword(final char[] query, int offset) { + if (query.length < (offset + 6)) { + return false; + } + + return (query[offset] | 32) == 'c' + && (query[offset + 1] | 32) == 'r' + && (query[offset + 2] | 32) == 'e' + && (query[offset + 3] | 32) == 'a' + && (query[offset + 4] | 32) == 't' + && (query[offset + 5] | 32) == 'e'; + } + + /** + * Parse string to check presence of UPDATE keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseUpdateKeyword(final char[] query, int offset) { + if (query.length < (offset + 6)) { + return false; + } + + return (query[offset] | 32) == 'u' + && (query[offset + 1] | 32) == 'p' + && (query[offset + 2] | 32) == 'd' + && (query[offset + 3] | 32) == 'a' + && (query[offset + 4] | 32) == 't' + && (query[offset + 5] | 32) == 'e'; + } + + /** + * Parse string to check presence of VALUES keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseValuesKeyword(final char[] query, int offset) { + if (query.length < (offset + 6)) { + return false; + } + + return (query[offset] | 32) == 'v' + && (query[offset + 1] | 32) == 'a' + && (query[offset + 2] | 32) == 'l' + && (query[offset + 3] | 32) == 'u' + && (query[offset + 4] | 32) == 'e' + && (query[offset + 5] | 32) == 's'; + } + + /** + * Faster version of {@link Long#parseLong(String)} when parsing a substring is required + * + * @param s string to parse + * @param beginIndex begin index + * @param endIndex end index + * @return long value + */ + public static long parseLong(String s, int beginIndex, int endIndex) { + // Fallback to default implementation in case the string is long + if (endIndex - beginIndex > 16) { + return Long.parseLong(s.substring(beginIndex, endIndex)); + } + long res = digitAt(s, beginIndex); + for (beginIndex++; beginIndex < endIndex; beginIndex++) { + res = res * 10 + digitAt(s, beginIndex); + } + return res; + } + + /** + * Parse string to check presence of WITH keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseWithKeyword(final char[] query, int offset) { + if (query.length < (offset + 4)) { + return false; + } + + return (query[offset] | 32) == 'w' + && (query[offset + 1] | 32) == 'i' + && (query[offset + 2] | 32) == 't' + && (query[offset + 3] | 32) == 'h'; + } + + /** + * Parse string to check presence of AS keyword regardless of case. + * + * @param query char[] of the query statement + * @param offset position of query to start checking + * @return boolean indicates presence of word + */ + public static boolean parseAsKeyword(final char[] query, int offset) { + if (query.length < (offset + 2)) { + return false; + } + + return (query[offset] | 32) == 'a' + && (query[offset + 1] | 32) == 's'; + } + + /** + * Returns true if a given string {@code s} has digit at position {@code pos}. + * @param s input string + * @param pos position (0-based) + * @return true if input string s has digit at position pos + */ + public static boolean isDigitAt(String s, int pos) { + return pos > 0 && pos < s.length() && Character.isDigit(s.charAt(pos)); + } + + /** + * Converts digit at position {@code pos} in string {@code s} to integer or throws. + * @param s input string + * @param pos position (0-based) + * @return integer value of a digit at position pos + * @throws NumberFormatException if character at position pos is not an integer + */ + public static int digitAt(String s, int pos) { + int c = s.charAt(pos) - '0'; + if (c < 0 || c > 9) { + throw new NumberFormatException("Input string: \"" + s + "\", position: " + pos); + } + return c; + } + + /** + * Identifies characters which the backend scanner considers to be whitespace. + * + *

+ * https://github.com/postgres/postgres/blob/17bb62501787c56e0518e61db13a523d47afd724/src/backend/parser/scan.l#L194-L198 + *

+ * + * @param c character + * @return true if the character is a whitespace character as defined in the backend's parser + */ + public static boolean isSpace(char c) { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f'; + } + + /** + * Identifies white space characters which the backend uses to determine if a + * {@code String} value needs to be quoted in array representation. + * + *

+ * https://github.com/postgres/postgres/blob/f2c587067a8eb9cf1c8f009262381a6576ba3dd0/src/backend/utils/adt/arrayfuncs.c#L421-L438 + *

+ * + * @param c + * Character to examine. + * @return Indication if the character is a whitespace which back end will + * escape. + */ + public static boolean isArrayWhiteSpace(char c) { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == 0x0B; + } + + /** + * @param c character + * @return true if the given character is a valid character for an operator in the backend's + * parser + */ + public static boolean isOperatorChar(char c) { + /* + * Extracted from operators defined by {self} and {op_chars} + * in pgsql/src/backend/parser/scan.l. + */ + return ",()[].;:+-*/%^<>=~!@#&|`?".indexOf(c) != -1; + } + + /** + * Checks if a character is valid as the start of an identifier. + * PostgreSQL 9.4 allows column names like _, ‿, ⁀, ⁔, ︳, ︴, ﹍, ﹎, ﹏, _, so + * it is assumed isJavaIdentifierPart is good enough for PostgreSQL. + * + * @param c the character to check + * @return true if valid as first character of an identifier; false if not + * @see Identifiers and Key Words + */ + public static boolean isIdentifierStartChar(char c) { + /* + * PostgreSQL's implementation is located in + * pgsql/src/backend/parser/scan.l: + * ident_start [A-Za-z\200-\377_] + * ident_cont [A-Za-z\200-\377_0-9\$] + * however it is not clear how that interacts with unicode, so we just use Java's implementation. + */ + return Character.isJavaIdentifierStart(c); + } + + /** + * Checks if a character is valid as the second or later character of an identifier. + * + * @param c the character to check + * @return true if valid as second or later character of an identifier; false if not + */ + public static boolean isIdentifierContChar(char c) { + return Character.isJavaIdentifierPart(c); + } + + /** + * @param c character + * @return true if the character terminates an identifier + */ + public static boolean charTerminatesIdentifier(char c) { + return c == '"' || isSpace(c) || isOperatorChar(c); + } + + /** + * Checks if a character is valid as the start of a dollar quoting tag. + * + * @param c the character to check + * @return true if valid as first character of a dollar quoting tag; false if not + */ + public static boolean isDollarQuoteStartChar(char c) { + /* + * The allowed dollar quote start and continuation characters + * must stay in sync with what the backend defines in + * pgsql/src/backend/parser/scan.l + * + * The quoted string starts with $foo$ where "foo" is an optional string + * in the form of an identifier, except that it may not contain "$", + * and extends to the first occurrence of an identical string. + * There is *no* processing of the quoted text. + */ + return c != '$' && isIdentifierStartChar(c); + } + + /** + * Checks if a character is valid as the second or later character of a dollar quoting tag. + * + * @param c the character to check + * @return true if valid as second or later character of a dollar quoting tag; false if not + */ + public static boolean isDollarQuoteContChar(char c) { + return c != '$' && isIdentifierContChar(c); + } + + /** + * Compares two sub-arrays of the given character array for equalness. If the length is zero, the + * result is true if and only if the offsets are within the bounds of the array. + * + * @param arr a char array + * @param offA first sub-array start offset + * @param offB second sub-array start offset + * @param len length of the sub arrays to compare + * @return true if the sub-arrays are equal; false if not + */ + private static boolean subArraysEqual(final char[] arr, + final int offA, final int offB, + final int len) { + if (offA < 0 || offB < 0 + || offA >= arr.length || offB >= arr.length + || offA + len > arr.length || offB + len > arr.length) { + return false; + } + + for (int i = 0; i < len; i++) { + if (arr[offA + i] != arr[offB + i]) { + return false; + } + } + + return true; + } + + /** + * Converts JDBC-specific callable statement escapes {@code { [? =] call [(?, + * [?,..])] }} into the PostgreSQL format which is {@code select (?, [?, ...]) as + * result} or {@code select * from (?, [?, ...]) as result} (7.3) + * + * @param jdbcSql sql text with JDBC escapes + * @param stdStrings if backslash in single quotes should be regular character or escape one + * @param serverVersion server version + * @param protocolVersion protocol version + * @param escapeSyntaxCallMode mode specifying whether JDBC escape call syntax is transformed into a CALL/SELECT statement + * @return SQL in appropriate for given server format + * @throws SQLException if given SQL is malformed + */ + public static JdbcCallParseInfo modifyJdbcCall(String jdbcSql, boolean stdStrings, + int serverVersion, int protocolVersion, EscapeSyntaxCallMode escapeSyntaxCallMode) throws SQLException { + // Mini-parser for JDBC function-call syntax (only) + // TODO: Merge with escape processing (and parameter parsing?) so we only parse each query once. + // RE: frequently used statements are cached (see {@link org.postgresql.jdbc.PgConnection#borrowQuery}), so this "merge" is not that important. + String sql = jdbcSql; + boolean isFunction = false; + boolean outParamBeforeFunc = false; + + int len = jdbcSql.length(); + int state = 1; + boolean inQuotes = false; + boolean inEscape = false; + int startIndex = -1; + int endIndex = -1; + boolean syntaxError = false; + int i = 0; + + while (i < len && !syntaxError) { + char ch = jdbcSql.charAt(i); + + switch (state) { + case 1: // Looking for { at start of query + if (ch == '{') { + ++i; + ++state; + } else if (Character.isWhitespace(ch)) { + ++i; + } else { + // Not function-call syntax. Skip the rest of the string. + i = len; + } + break; + + case 2: // After {, looking for ? or =, skipping whitespace + if (ch == '?') { + outParamBeforeFunc = + isFunction = true; // { ? = call ... } -- function with one out parameter + ++i; + ++state; + } else if (ch == 'c' || ch == 'C') { // { call ... } -- proc with no out parameters + state += 3; // Don't increase 'i' + } else if (Character.isWhitespace(ch)) { + ++i; + } else { + // "{ foo ...", doesn't make sense, complain. + syntaxError = true; + } + break; + + case 3: // Looking for = after ?, skipping whitespace + if (ch == '=') { + ++i; + ++state; + } else if (Character.isWhitespace(ch)) { + ++i; + } else { + syntaxError = true; + } + break; + + case 4: // Looking for 'call' after '? =' skipping whitespace + if (ch == 'c' || ch == 'C') { + ++state; // Don't increase 'i'. + } else if (Character.isWhitespace(ch)) { + ++i; + } else { + syntaxError = true; + } + break; + + case 5: // Should be at 'call ' either at start of string or after ?= + if ((ch == 'c' || ch == 'C') && i + 4 <= len && "call" + .equalsIgnoreCase(jdbcSql.substring(i, i + 4))) { + isFunction = true; + i += 4; + ++state; + } else if (Character.isWhitespace(ch)) { + ++i; + } else { + syntaxError = true; + } + break; + + case 6: // Looking for whitespace char after 'call' + if (Character.isWhitespace(ch)) { + // Ok, we found the start of the real call. + ++i; + ++state; + startIndex = i; + } else { + syntaxError = true; + } + break; + + case 7: // In "body" of the query (after "{ [? =] call ") + if (ch == '\'') { + inQuotes = !inQuotes; + ++i; + } else if (inQuotes && ch == '\\' && !stdStrings) { + // Backslash in string constant, skip next character. + i += 2; + } else if (!inQuotes && ch == '{') { + inEscape = !inEscape; + ++i; + } else if (!inQuotes && ch == '}') { + if (!inEscape) { + // Should be end of string. + endIndex = i; + ++i; + ++state; + } else { + inEscape = false; + } + } else if (!inQuotes && ch == ';') { + syntaxError = true; + } else { + // Everything else is ok. + ++i; + } + break; + + case 8: // At trailing end of query, eating whitespace + if (Character.isWhitespace(ch)) { + ++i; + } else { + syntaxError = true; + } + break; + + default: + throw new IllegalStateException("somehow got into bad state " + state); + } + } + + // We can only legally end in a couple of states here. + if (i == len && !syntaxError) { + if (state == 1) { + // Not an escaped syntax. + + // Detect PostgreSQL native CALL. + // (OUT parameter registration, needed for stored procedures with INOUT arguments, will fail without this) + i = 0; + while (i < len && Character.isWhitespace(jdbcSql.charAt(i))) { + i++; // skip any preceding whitespace + } + if (i < len - 5) { // 5 == length of "call" + 1 whitespace + //Check for CALL followed by whitespace + char ch = jdbcSql.charAt(i); + if ((ch == 'c' || ch == 'C') && "call".equalsIgnoreCase(jdbcSql.substring(i, i + 4)) + && Character.isWhitespace(jdbcSql.charAt(i + 4))) { + isFunction = true; + } + } + return new JdbcCallParseInfo(sql, isFunction); + } + if (state != 8) { + syntaxError = true; // Ran out of query while still parsing + } + } + + if (syntaxError) { + throw new PSQLException( + GT.tr("Malformed function or procedure escape syntax at offset {0}.", i), + PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL); + } + + String prefix; + String suffix; + if (escapeSyntaxCallMode == EscapeSyntaxCallMode.SELECT || serverVersion < 110000 + || (outParamBeforeFunc && escapeSyntaxCallMode == EscapeSyntaxCallMode.CALL_IF_NO_RETURN)) { + prefix = "select * from "; + suffix = " as result"; + } else { + prefix = "call "; + suffix = ""; + } + + String s = jdbcSql.substring(startIndex, endIndex); + int prefixLength = prefix.length(); + StringBuilder sb = new StringBuilder(prefixLength + jdbcSql.length() + suffix.length() + 10); + sb.append(prefix); + sb.append(s); + + int opening = s.indexOf('(') + 1; + if (opening == 0) { + // here the function call has no parameters declaration eg : "{ ? = call pack_getValue}" + sb.append(outParamBeforeFunc ? "(?)" : "()"); + } else if (outParamBeforeFunc) { + // move the single out parameter into the function call + // so that it can be treated like all other parameters + boolean needComma = false; + + // the following loop will check if the function call has parameters + // eg "{ ? = call pack_getValue(?) }" vs "{ ? = call pack_getValue() } + for (int j = opening + prefixLength; j < sb.length(); j++) { + char c = sb.charAt(j); + if (c == ')') { + break; + } + + if (!Character.isWhitespace(c)) { + needComma = true; + break; + } + } + + // insert the return parameter as the first parameter of the function call + if (needComma) { + sb.insert(opening + prefixLength, "?,"); + } else { + sb.insert(opening + prefixLength, "?"); + } + } + + if (!suffix.isEmpty()) { + sql = sb.append(suffix).toString(); + } else { + sql = sb.toString(); + } + return new JdbcCallParseInfo(sql, isFunction); + } + + /** + *

Filter the SQL string of Java SQL Escape clauses.

+ * + *

Currently implemented Escape clauses are those mentioned in 11.3 in the specification. + * Basically we look through the sql string for {d xxx}, {t xxx}, {ts xxx}, {oj xxx} or {fn xxx} + * in non-string sql code. When we find them, we just strip the escape part leaving only the xxx + * part. So, something like "select * from x where d={d '2001-10-09'}" would return "select * from + * x where d= '2001-10-09'".

+ * + * @param sql the original query text + * @param replaceProcessingEnabled whether replace_processing_enabled is on + * @param standardConformingStrings whether standard_conforming_strings is on + * @return PostgreSQL-compatible SQL + * @throws SQLException if given SQL is wrong + */ + public static String replaceProcessing(String sql, boolean replaceProcessingEnabled, + boolean standardConformingStrings) throws SQLException { + if (replaceProcessingEnabled) { + // Since escape codes can only appear in SQL CODE, we keep track + // of if we enter a string or not. + int len = sql.length(); + char[] chars = sql.toCharArray(); + StringBuilder newsql = new StringBuilder(len); + int i = 0; + while (i < len) { + i = parseSql(chars, i, newsql, false, standardConformingStrings); + // We need to loop here in case we encounter invalid + // SQL, consider: SELECT a FROM t WHERE (1 > 0)) ORDER BY a + // We can't ending replacing after the extra closing paren + // because that changes a syntax error to a valid query + // that isn't what the user specified. + if (i < len) { + newsql.append(chars[i]); + i++; + } + } + return newsql.toString(); + } else { + return sql; + } + } + + /** + * parse the given sql from index i, appending it to the given buffer until we hit an unmatched + * right parentheses or end of string. When the stopOnComma flag is set we also stop processing + * when a comma is found in sql text that isn't inside nested parenthesis. + * + * @param sql the original query text + * @param i starting position for replacing + * @param newsql where to write the replaced output + * @param stopOnComma should we stop after hitting the first comma in sql text? + * @param stdStrings whether standard_conforming_strings is on + * @return the position we stopped processing at + * @throws SQLException if given SQL is wrong + */ + private static int parseSql(char[] sql, int i, StringBuilder newsql, boolean stopOnComma, + boolean stdStrings) throws SQLException { + SqlParseState state = SqlParseState.IN_SQLCODE; + int len = sql.length; + int nestedParenthesis = 0; + boolean endOfNested = false; + + // because of the ++i loop + i--; + while (!endOfNested && ++i < len) { + char c = sql[i]; + + state_switch: + switch (state) { + case IN_SQLCODE: + if (c == '$') { + int i0 = i; + i = parseDollarQuotes(sql, i); + checkParsePosition(i, len, i0, sql, + "Unterminated dollar quote started at position {0} in SQL {1}. Expected terminating $$"); + newsql.append(sql, i0, i - i0 + 1); + break; + } else if (c == '\'') { + // start of a string? + int i0 = i; + i = parseSingleQuotes(sql, i, stdStrings); + checkParsePosition(i, len, i0, sql, + "Unterminated string literal started at position {0} in SQL {1}. Expected ' char"); + newsql.append(sql, i0, i - i0 + 1); + break; + } else if (c == '"') { + // start of a identifier? + int i0 = i; + i = parseDoubleQuotes(sql, i); + checkParsePosition(i, len, i0, sql, + "Unterminated identifier started at position {0} in SQL {1}. Expected \" char"); + newsql.append(sql, i0, i - i0 + 1); + break; + } else if (c == '/') { + int i0 = i; + i = parseBlockComment(sql, i); + checkParsePosition(i, len, i0, sql, + "Unterminated block comment started at position {0} in SQL {1}. Expected */ sequence"); + newsql.append(sql, i0, i - i0 + 1); + break; + } else if (c == '-') { + int i0 = i; + i = parseLineComment(sql, i); + newsql.append(sql, i0, i - i0 + 1); + break; + } else if (c == '(') { // begin nested sql + nestedParenthesis++; + } else if (c == ')') { // end of nested sql + nestedParenthesis--; + if (nestedParenthesis < 0) { + endOfNested = true; + break; + } + } else if (stopOnComma && c == ',' && nestedParenthesis == 0) { + endOfNested = true; + break; + } else if (c == '{') { // start of an escape code? + if (i + 1 < len) { + SqlParseState[] availableStates = SqlParseState.VALUES; + // skip first state, it's not a escape code state + for (int j = 1; j < availableStates.length; j++) { + SqlParseState availableState = availableStates[j]; + int matchedPosition = availableState.getMatchedPosition(sql, i + 1); + if (matchedPosition == 0) { + continue; + } + i += matchedPosition; + if (availableState.replacementKeyword != null) { + newsql.append(availableState.replacementKeyword); + } + state = availableState; + break state_switch; + } + } + } + newsql.append(c); + break; + + case ESC_FUNCTION: + // extract function name + i = escapeFunction(sql, i, newsql, stdStrings); + state = SqlParseState.IN_SQLCODE; // end of escaped function (or query) + break; + case ESC_DATE: + case ESC_TIME: + case ESC_TIMESTAMP: + case ESC_OUTERJOIN: + case ESC_ESCAPECHAR: + if (c == '}') { + state = SqlParseState.IN_SQLCODE; // end of escape code. + } else { + newsql.append(c); + } + break; + } // end switch + } + return i; + } + + private static int findOpenParenthesis(char[] sql, int i) { + int posArgs = i; + while (posArgs < sql.length && sql[posArgs] != '(') { + posArgs++; + } + return posArgs; + } + + private static void checkParsePosition(int i, int len, int i0, char[] sql, + String message) + throws PSQLException { + if (i < len) { + return; + } + throw new PSQLException( + GT.tr(message, i0, new String(sql)), + PSQLState.SYNTAX_ERROR); + } + + private static int escapeFunction(char[] sql, int i, StringBuilder newsql, boolean stdStrings) throws SQLException { + String functionName; + int argPos = findOpenParenthesis(sql, i); + if (argPos < sql.length) { + functionName = new String(sql, i, argPos - i).trim(); + // extract arguments + i = argPos + 1;// we start the scan after the first ( + i = escapeFunctionArguments(newsql, functionName, sql, i, stdStrings); + } + // go to the end of the function copying anything found + i++; + while (i < sql.length && sql[i] != '}') { + newsql.append(sql[i++]); + } + return i; + } + + /** + * Generate sql for escaped functions. + * + * @param newsql destination StringBuilder + * @param functionName the escaped function name + * @param sql input SQL text (containing arguments of a function call with possible JDBC escapes) + * @param i position in the input SQL + * @param stdStrings whether standard_conforming_strings is on + * @return the right PostgreSQL sql + * @throws SQLException if something goes wrong + */ + private static int escapeFunctionArguments(StringBuilder newsql, String functionName, char[] sql, int i, + boolean stdStrings) + throws SQLException { + // Maximum arity of functions in EscapedFunctions is 3 + List parsedArgs = new ArrayList<>(3); + while (true) { + StringBuilder arg = new StringBuilder(); + int lastPos = i; + i = parseSql(sql, i, arg, true, stdStrings); + if (i != lastPos) { + parsedArgs.add(arg); + } + if (i >= sql.length // should not happen + || sql[i] != ',') { + break; + } + i++; + } + Method method = EscapedFunctions2.getFunction(functionName); + if (method == null) { + newsql.append(functionName); + EscapedFunctions2.appendCall(newsql, "(", ",", ")", parsedArgs); + return i; + } + try { + method.invoke(null, newsql, parsedArgs); + } catch (InvocationTargetException e) { + Throwable targetException = e.getTargetException(); + if (targetException instanceof SQLException) { + throw (SQLException) targetException; + } else { + String message = targetException == null ? "no message" : targetException.getMessage(); + throw new PSQLException(message, PSQLState.SYSTEM_ERROR); + } + } catch (IllegalAccessException e) { + throw new PSQLException(e.getMessage(), PSQLState.SYSTEM_ERROR); + } + return i; + } + + private static final char[] QUOTE_OR_ALPHABETIC_MARKER = {'\"', '0'}; + private static final char[] QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS = {'\"', '0', '('}; + private static final char[] SINGLE_QUOTE = {'\''}; + + // Static variables for parsing SQL when replaceProcessing is true. + private enum SqlParseState { + IN_SQLCODE, + ESC_DATE("d", SINGLE_QUOTE, "DATE "), + ESC_TIME("t", SINGLE_QUOTE, "TIME "), + + ESC_TIMESTAMP("ts", SINGLE_QUOTE, "TIMESTAMP "), + ESC_FUNCTION("fn", QUOTE_OR_ALPHABETIC_MARKER, null), + ESC_OUTERJOIN("oj", QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS, null), + ESC_ESCAPECHAR("escape", SINGLE_QUOTE, "ESCAPE "); + + private static final SqlParseState[] VALUES = values(); + + private final char[] escapeKeyword; + private final char[] allowedValues; + private final String replacementKeyword; + + SqlParseState() { + this("", new char[0], null); + } + + SqlParseState(String escapeKeyword, char[] allowedValues, + String replacementKeyword) { + this.escapeKeyword = escapeKeyword.toCharArray(); + this.allowedValues = allowedValues; + this.replacementKeyword = replacementKeyword; + } + + private boolean startMatches(char[] sql, int pos) { + // check for the keyword + for (char c : escapeKeyword) { + if (pos >= sql.length) { + return false; + } + char curr = sql[pos++]; + if (curr != c && curr != Character.toUpperCase(c)) { + return false; + } + } + return pos < sql.length; + } + + private int getMatchedPosition(char[] sql, int pos) { + // check for the keyword + if (!startMatches(sql, pos)) { + return 0; + } + + int newPos = pos + escapeKeyword.length; + + // check for the beginning of the value + char curr = sql[newPos]; + // ignore any in-between whitespace + while (curr == ' ') { + newPos++; + if (newPos >= sql.length) { + return 0; + } + curr = sql[newPos]; + } + for (char c : allowedValues) { + if (curr == c || (c == '0' && Character.isLetter(curr))) { + return newPos - pos; + } + } + return 0; + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Provider.java b/pgjdbc/src/main/java/org/postgresql/core/Provider.java new file mode 100644 index 0000000..94c44e9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Provider.java @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +/** + * Represents a provider of results. + * + * @param the type of results provided by this provider + */ +public interface Provider { + + /** + * Gets a result. + * + * @return a result + */ + T get(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Query.java b/pgjdbc/src/main/java/org/postgresql/core/Query.java new file mode 100644 index 0000000..5322a09 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Query.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import java.util.Map; + +/** + *

Abstraction of a generic Query, hiding the details of any protocol-version-specific data needed + * to execute the query efficiently.

+ * + *

Query objects should be explicitly closed when no longer needed; if resources are allocated on + * the server for this query, their cleanup is triggered by closing the Query.

+ * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public interface Query { + /** + *

Create a ParameterList suitable for storing parameters associated with this Query.

+ * + *

If this query has no parameters, a ParameterList will be returned, but it may be a shared + * immutable object. If this query does have parameters, the returned ParameterList is a new list, + * unshared by other callers.

+ * + * @return a suitable ParameterList instance for this query + */ + ParameterList createParameterList(); + + /** + * Stringize this query to a human-readable form, substituting particular parameter values for + * parameter placeholders. + * + * @param parameters a ParameterList returned by this Query's {@link #createParameterList} method, + * or null to leave the parameter placeholders unsubstituted. + * @return a human-readable representation of this query + */ + String toString(ParameterList parameters); + + /** + * Returns SQL in native for database format. + * @return SQL in native for database format + */ + String getNativeSql(); + + /** + * Returns properties of the query (sql keyword, and some other parsing info). + * @return returns properties of the query (sql keyword, and some other parsing info) or null if not applicable + */ + SqlCommand getSqlCommand(); + + /** + *

Close this query and free any server-side resources associated with it. The resources may not + * be immediately deallocated, but closing a Query may make the deallocation more prompt.

+ * + *

A closed Query should not be executed.

+ */ + void close(); + + boolean isStatementDescribed(); + + boolean isEmpty(); + + /** + * Get the number of times this Query has been batched. + * @return number of times addBatch() has been called. + */ + int getBatchSize(); + + /** + * Get a map that a result set can use to find the index associated to a name. + * + * @return null if the query implementation does not support this method. + */ + Map getResultSetColumnNameIndexMap(); + + /** + * Return a list of the Query objects that make up this query. If this object is already a + * SimpleQuery, returns null (avoids an extra array construction in the common case). + * + * @return an array of single-statement queries, or null if this object is already a + * single-statement query. + */ + Query [] getSubqueries(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutor.java b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutor.java new file mode 100644 index 0000000..b5d96d7 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutor.java @@ -0,0 +1,623 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import org.postgresql.PGNotification; +import org.postgresql.copy.CopyOperation; +import org.postgresql.core.v3.TypeTransferModeRegistry; +import org.postgresql.jdbc.AutoSave; +import org.postgresql.jdbc.BatchResultHandler; +import org.postgresql.jdbc.EscapeSyntaxCallMode; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.util.HostSpec; + +import java.io.Closeable; +import java.io.IOException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TimeZone; + +/** + *

Abstracts the protocol-specific details of executing a query.

+ * + *

Every connection has a single QueryExecutor implementation associated with it. This object + * provides:

+ * + *
    + *
  • factory methods for Query objects ({@link #createSimpleQuery(String)} and + * {@link #createQuery(String, boolean, boolean, String...)}) + *
  • execution methods for created Query objects ( + * {@link #execute(Query, ParameterList, ResultHandler, int, int, int)} for single queries and + * {@link #execute(Query[], ParameterList[], BatchResultHandler, int, int, int)} for batches of queries) + *
  • a fastpath call interface ({@link #createFastpathParameters} and {@link #fastpathCall}). + *
+ * + *

Query objects may represent a query that has parameter placeholders. To provide actual values for + * these parameters, a {@link ParameterList} object is created via a factory method ( + * {@link Query#createParameterList}). The parameters are filled in by the caller and passed along + * with the query to the query execution methods. Several ParameterLists for a given query might + * exist at one time (or over time); this allows the underlying Query to be reused for several + * executions, or for batch execution of the same Query.

+ * + *

In general, a Query created by a particular QueryExecutor may only be executed by that + * QueryExecutor, and a ParameterList created by a particular Query may only be used as parameters + * to that Query. Unpredictable things will happen if this isn't done.

+ * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public interface QueryExecutor extends TypeTransferModeRegistry { + /** + * Flag for query execution that indicates the given Query object is unlikely to be reused. + */ + int QUERY_ONESHOT = 1; + + /** + * Flag for query execution that indicates that resultset metadata isn't needed and can be safely + * omitted. + */ + int QUERY_NO_METADATA = 2; + + /** + * Flag for query execution that indicates that a resultset isn't expected and the query executor + * can safely discard any rows (although the resultset should still appear to be from a + * resultset-returning query). + */ + int QUERY_NO_RESULTS = 4; + + /** + * Flag for query execution that indicates a forward-fetch-capable cursor should be used if + * possible. + */ + int QUERY_FORWARD_CURSOR = 8; + + /** + * Flag for query execution that indicates the automatic BEGIN on the first statement when outside + * a transaction should not be done. + */ + int QUERY_SUPPRESS_BEGIN = 16; + + /** + * Flag for query execution when we don't really want to execute, we just want to get the + * parameter metadata for the statement. + */ + int QUERY_DESCRIBE_ONLY = 32; + + /** + * Flag for query execution used by generated keys where we want to receive both the ResultSet and + * associated update count from the command status. + */ + int QUERY_BOTH_ROWS_AND_STATUS = 64; + + /** + * Force this query to be described at each execution. This is done in pipelined batches where we + * might need to detect mismatched result types. + */ + int QUERY_FORCE_DESCRIBE_PORTAL = 512; + + /** + * Flag to disable batch execution when we expect results (generated keys) from a statement. + * + * @deprecated in PgJDBC 9.4 as we now auto-size batches. + */ + @Deprecated + int QUERY_DISALLOW_BATCHING = 128; + + /** + * Flag for query execution to avoid using binary transfer. + */ + int QUERY_NO_BINARY_TRANSFER = 256; + + /** + * Execute the query via simple 'Q' command (not parse, bind, exec, but simple execute). + * This sends query text on each execution, however it supports sending multiple queries + * separated with ';' as a single command. + */ + int QUERY_EXECUTE_AS_SIMPLE = 1024; + + int MAX_SAVE_POINTS = 1000; + + /** + * Flag indicating that when beginning a transaction, it should be read only. + */ + int QUERY_READ_ONLY_HINT = 2048; + + /** + * Execute a Query, passing results to a provided ResultHandler. + * + * @param query the query to execute; must be a query returned from calling + * {@link #wrap(List)} on this QueryExecutor object. + * @param parameters the parameters for the query. Must be non-null if the query + * takes parameters. Must be a parameter object returned by + * {@link org.postgresql.core.Query#createParameterList()}. + * @param handler a ResultHandler responsible for handling results generated by this query + * @param maxRows the maximum number of rows to retrieve + * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve + * before suspending + * @param flags a combination of QUERY_* flags indicating how to handle the query. + * @throws SQLException if query execution fails + */ + void execute(Query query, ParameterList parameters, ResultHandler handler, int maxRows, + int fetchSize, int flags) throws SQLException; + + /** + * Execute a Query with adaptive fetch, passing results to a provided ResultHandler. + * + * @param query the query to execute; must be a query returned from calling + * {@link #wrap(List)} on this QueryExecutor object. + * @param parameters the parameters for the query. Must be non-null if the query + * takes parameters. Must be a parameter object returned by + * {@link org.postgresql.core.Query#createParameterList()}. + * @param handler a ResultHandler responsible for handling results generated by this query + * @param maxRows the maximum number of rows to retrieve + * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve + * before suspending + * @param flags a combination of QUERY_* flags indicating how to handle the query. + * @param adaptiveFetch state of adaptiveFetch to use during execution + * @throws SQLException if query execution fails + */ + void execute(Query query, ParameterList parameters, ResultHandler handler, int maxRows, + int fetchSize, int flags, boolean adaptiveFetch) throws SQLException; + + /** + * Execute several Query, passing results to a provided ResultHandler. + * + * @param queries the queries to execute; each must be a query returned from calling + * {@link #wrap(List)} on this QueryExecutor object. + * @param parameterLists the parameter lists for the queries. The parameter lists correspond 1:1 + * to the queries passed in the queries array. Each must be non- + * null if the corresponding query takes parameters, and must be a parameter + * object returned by {@link Query#createParameterList()} created by + * the corresponding query. + * @param handler a ResultHandler responsible for handling results generated by this query + * @param maxRows the maximum number of rows to retrieve + * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve + * before suspending + * @param flags a combination of QUERY_* flags indicating how to handle the query. + * @throws SQLException if query execution fails + */ + void execute(Query[] queries, ParameterList[] parameterLists, + BatchResultHandler handler, int maxRows, + int fetchSize, int flags) throws SQLException; + + /** + * Execute several Query with adaptive fetch, passing results to a provided ResultHandler. + * + * @param queries the queries to execute; each must be a query returned from calling + * {@link #wrap(List)} on this QueryExecutor object. + * @param parameterLists the parameter lists for the queries. The parameter lists correspond 1:1 + * to the queries passed in the queries array. Each must be non- + * null if the corresponding query takes parameters, and must be a parameter + * object returned by {@link Query#createParameterList()} created by + * the corresponding query. + * @param handler a ResultHandler responsible for handling results generated by this query + * @param maxRows the maximum number of rows to retrieve + * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve + * before suspending + * @param flags a combination of QUERY_* flags indicating how to handle the query. + * @param adaptiveFetch state of adaptiveFetch to use during execution + * @throws SQLException if query execution fails + */ + void execute(Query[] queries, ParameterList[] parameterLists, + BatchResultHandler handler, int maxRows, + int fetchSize, int flags, boolean adaptiveFetch) throws SQLException; + + /** + * Fetch additional rows from a cursor. + * + * @param cursor the cursor to fetch from + * @param handler the handler to feed results to + * @param fetchSize the preferred number of rows to retrieve before suspending + * @param adaptiveFetch state of adaptiveFetch to use during fetching + * @throws SQLException if query execution fails + */ + void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize, boolean adaptiveFetch) throws SQLException; + + /** + * Create an unparameterized Query object suitable for execution by this QueryExecutor. The + * provided query string is not parsed for parameter placeholders ('?' characters), and the + * {@link Query#createParameterList} of the returned object will always return an empty + * ParameterList. + * + * @param sql the SQL for the query to create + * @return a new Query object + * @throws SQLException if something goes wrong + */ + Query createSimpleQuery(String sql) throws SQLException; + + boolean isReWriteBatchedInsertsEnabled(); + + CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, + String ... columnNames) + throws SQLException; + + Object createQueryKey(String sql, boolean escapeProcessing, boolean isParameterized, + String ... columnNames); + + CachedQuery createQueryByKey(Object key) throws SQLException; + + CachedQuery borrowQueryByKey(Object key) throws SQLException; + + CachedQuery borrowQuery(String sql) throws SQLException; + + CachedQuery borrowCallableQuery(String sql) throws SQLException; + + CachedQuery borrowReturningQuery(String sql, String [] columnNames) throws SQLException; + + void releaseQuery(CachedQuery cachedQuery); + + /** + * Wrap given native query into a ready for execution format. + * @param queries list of queries in native to database syntax + * @return query object ready for execution by this query executor + */ + Query wrap(List queries); + + /** + * Prior to attempting to retrieve notifications, we need to pull any recently received + * notifications off of the network buffers. The notification retrieval in ProtocolConnection + * cannot do this as it is prone to deadlock, so the higher level caller must be responsible which + * requires exposing this method. + * + * @throws SQLException if and error occurs while fetching notifications + */ + void processNotifies() throws SQLException; + + /** + * Prior to attempting to retrieve notifications, we need to pull any recently received + * notifications off of the network buffers. The notification retrieval in ProtocolConnection + * cannot do this as it is prone to deadlock, so the higher level caller must be responsible which + * requires exposing this method. This variant supports blocking for the given time in millis. + * + * @param timeoutMillis number of milliseconds to block for + * @throws SQLException if and error occurs while fetching notifications + */ + void processNotifies(int timeoutMillis) throws SQLException; + + // + // Fastpath interface. + // + + /** + * Create a new ParameterList implementation suitable for invoking a fastpath function via + * {@link #fastpathCall}. + * + * @param count the number of parameters the fastpath call will take + * @return a ParameterList suitable for passing to {@link #fastpathCall}. + * @deprecated This API is somewhat obsolete, as one may achieve similar performance + * and greater functionality by setting up a prepared statement to define + * the function call. Then, executing the statement with binary transmission of parameters + * and results substitutes for a fast-path function call. + */ + @Deprecated + ParameterList createFastpathParameters(int count); + + /** + * Invoke a backend function via the fastpath interface. + * + * @param fnid the OID of the backend function to invoke + * @param params a ParameterList returned from {@link #createFastpathParameters} containing the + * parameters to pass to the backend function + * @param suppressBegin if begin should be suppressed + * @return the binary-format result of the fastpath call, or null if a void result + * was returned + * @throws SQLException if an error occurs while executing the fastpath call + * @deprecated This API is somewhat obsolete, as one may achieve similar performance + * and greater functionality by setting up a prepared statement to define + * the function call. Then, executing the statement with binary transmission of parameters + * and results substitutes for a fast-path function call. + */ + @Deprecated + byte [] fastpathCall(int fnid, ParameterList params, boolean suppressBegin) + throws SQLException; + + /** + * Issues a COPY FROM STDIN / COPY TO STDOUT statement and returns handler for associated + * operation. Until the copy operation completes, no other database operation may be performed. + * Implemented for protocol version 3 only. + * + * @param sql input sql + * @param suppressBegin if begin should be suppressed + * @return handler for associated operation + * @throws SQLException when initializing the given query fails + */ + CopyOperation startCopy(String sql, boolean suppressBegin) throws SQLException; + + /** + * @return the version of the implementation + */ + int getProtocolVersion(); + + /** + * Adds a single oid that should be received using binary encoding. + * + * @param oid The oid to request with binary encoding. + */ + void addBinaryReceiveOid(int oid); + + /** + * Remove given oid from the list of oids for binary receive encoding. + *

Note: the binary receive for the oid can be re-activated later.

+ * + * @param oid The oid to request with binary encoding. + */ + void removeBinaryReceiveOid(int oid); + + /** + * Gets the oids that should be received using binary encoding. + *

Note: this returns an unmodifiable set, and its contents might not reflect the current state.

+ * + * @return The oids to request with binary encoding. + * @deprecated the method returns a copy of the set, so it is not efficient. Use {@link #useBinaryForReceive(int)} + */ + @Deprecated + Set getBinaryReceiveOids(); + + /** + * Sets the oids that should be received using binary encoding. + * + * @param useBinaryForOids The oids to request with binary encoding. + */ + void setBinaryReceiveOids(Set useBinaryForOids); + + /** + * Adds a single oid that should be sent using binary encoding. + * + * @param oid The oid to send with binary encoding. + */ + void addBinarySendOid(int oid); + + /** + * Remove given oid from the list of oids for binary send encoding. + *

Note: the binary send for the oid can be re-activated later.

+ * + * @param oid The oid to send with binary encoding. + */ + void removeBinarySendOid(int oid); + + /** + * Gets the oids that should be sent using binary encoding. + *

Note: this returns an unmodifiable set, and its contents might not reflect the current state.

+ * + * @return useBinaryForOids The oids to send with binary encoding. + * @deprecated the method returns a copy of the set, so it is not efficient. Use {@link #useBinaryForSend(int)} + */ + @Deprecated + Set getBinarySendOids(); + + /** + * Sets the oids that should be sent using binary encoding. + * + * @param useBinaryForOids The oids to send with binary encoding. + */ + void setBinarySendOids(Set useBinaryForOids); + + /** + * Returns true if server uses integer instead of double for binary date and time encodings. + * + * @return the server integer_datetime setting. + */ + boolean getIntegerDateTimes(); + + /** + * @return the host and port this connection is connected to. + */ + HostSpec getHostSpec(); + + /** + * @return the user this connection authenticated as. + */ + String getUser(); + + /** + * @return the database this connection is connected to. + */ + String getDatabase(); + + /** + * Sends a query cancellation for this connection. + * + * @throws SQLException if something goes wrong. + */ + void sendQueryCancel() throws SQLException; + + /** + * Return the process ID (PID) of the backend server process handling this connection. + * + * @return process ID (PID) of the backend server process handling this connection + */ + int getBackendPID(); + + /** + * Abort at network level without sending the Terminate message to the backend. + */ + void abort(); + + /** + * Close this connection cleanly. + */ + void close(); + + /** + * Returns an action that would close the connection cleanly. + * The returned object should refer only the minimum subset of objects required + * for proper resource cleanup. For instance, it should better not hold a strong reference to + * {@link QueryExecutor}. + * @return action that would close the connection cleanly. + */ + Closeable getCloseAction(); + + /** + * Check if this connection is closed. + * + * @return true iff the connection is closed. + */ + boolean isClosed(); + + /** + *

Return the server version from the server_version GUC.

+ * + *

Note that there's no requirement for this to be numeric or of the form x.y.z. PostgreSQL + * development releases usually have the format x.ydevel e.g. 9.4devel; betas usually x.ybetan + * e.g. 9.4beta1. The --with-extra-version configure option may add an arbitrary string to this.

+ * + *

Don't use this string for logic, only use it when displaying the server version to the user. + * Prefer getServerVersionNum() for all logic purposes.

+ * + * @return the server version string from the server_version GUC + */ + String getServerVersion(); + + /** + * Retrieve and clear the set of asynchronous notifications pending on this connection. + * + * @return an array of notifications; if there are no notifications, an empty array is returned. + * @throws SQLException if and error occurs while fetching notifications + */ + PGNotification[] getNotifications() throws SQLException; + + /** + * Retrieve and clear the chain of warnings accumulated on this connection. + * + * @return the first SQLWarning in the chain; subsequent warnings can be found via + * SQLWarning.getNextWarning(). + */ + SQLWarning getWarnings(); + + /** + *

Get a machine-readable server version.

+ * + *

This returns the value of the server_version_num GUC. If no such GUC exists, it falls back on + * attempting to parse the text server version for the major version. If there's no minor version + * (e.g. a devel or beta release) then the minor version is set to zero. If the version could not + * be parsed, zero is returned.

+ * + * @return the server version in numeric XXYYZZ form, eg 090401, from server_version_num + */ + int getServerVersionNum(); + + /** + * Get the current transaction state of this connection. + * + * @return a ProtocolConnection.TRANSACTION_* constant. + */ + TransactionState getTransactionState(); + + /** + * Returns whether the server treats string-literals according to the SQL standard or if it uses + * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape + * characters in string-literals. Since 8.2, this depends on the value of the + * {@code standard_conforming_strings} server variable. + * + * @return true if the server treats string literals according to the SQL standard + */ + boolean getStandardConformingStrings(); + + /** + * + * @return true if we are going to quote identifier provided in the returning array default is true + */ + boolean getQuoteReturningIdentifiers(); + + /** + * Returns backend timezone in java format. + * @return backend timezone in java format. + */ + TimeZone getTimeZone(); + + /** + * @return the current encoding in use by this connection + */ + Encoding getEncoding(); + + /** + * Returns application_name connection property. + * @return application_name connection property + */ + String getApplicationName(); + + boolean isColumnSanitiserDisabled(); + + EscapeSyntaxCallMode getEscapeSyntaxCallMode(); + + PreferQueryMode getPreferQueryMode(); + + void setPreferQueryMode(PreferQueryMode mode); + + AutoSave getAutoSave(); + + void setAutoSave(AutoSave autoSave); + + boolean willHealOnRetry(SQLException e); + + /** + * By default, the connection resets statement cache in case deallocate all/discard all + * message is observed. + * This API allows to disable that feature for testing purposes. + * + * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed + */ + void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate); + + /** + * @return the ReplicationProtocol instance for this connection. + */ + ReplicationProtocol getReplicationProtocol(); + + void setNetworkTimeout(int milliseconds) throws IOException; + + int getNetworkTimeout() throws IOException; + + // Expose parameter status to PGConnection + Map getParameterStatuses(); + + String getParameterStatus(String parameterName); + + /** + * Get fetch size computed by adaptive fetch size for given query. + * + * @param adaptiveFetch state of adaptive fetch, which should be used during retrieving + * @param cursor Cursor used by resultSet, containing query, have to be able to cast to + * Portal class. + * @return fetch size computed by adaptive fetch size for given query passed inside cursor + */ + int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor); + + /** + * Get state of adaptive fetch inside QueryExecutor. + * + * @return state of adaptive fetch inside QueryExecutor + */ + boolean getAdaptiveFetch(); + + /** + * Set state of adaptive fetch inside QueryExecutor. + * + * @param adaptiveFetch desired state of adaptive fetch + */ + void setAdaptiveFetch(boolean adaptiveFetch); + + /** + * Add query to adaptive fetch cache inside QueryExecutor. + * + * @param adaptiveFetch state of adaptive fetch used during adding query + * @param cursor Cursor used by resultSet, containing query, have to be able to cast to + * Portal class. + */ + void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor); + + /** + * Remove query from adaptive fetch cache inside QueryExecutor + * + * @param adaptiveFetch state of adaptive fetch used during removing query + * @param cursor Cursor used by resultSet, containing query, have to be able to cast to + * Portal class. + */ + void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorBase.java b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorBase.java new file mode 100644 index 0000000..bafc8f1 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorBase.java @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.PGNotification; +import org.postgresql.PGProperty; +import org.postgresql.jdbc.AutoSave; +import org.postgresql.jdbc.EscapeSyntaxCallMode; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.util.HostSpec; +import org.postgresql.util.LruCache; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.ServerErrorMessage; + +import java.io.Closeable; +import java.io.IOException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Map; +import java.util.Properties; +import java.util.TreeMap; +import java.util.concurrent.locks.Condition; +import java.util.logging.Level; +import java.util.logging.Logger; + +@SuppressWarnings("try") +public abstract class QueryExecutorBase implements QueryExecutor { + + private static final Logger LOGGER = Logger.getLogger(QueryExecutorBase.class.getName()); + protected final PGStream pgStream; + private final String user; + private final String database; + private final int cancelSignalTimeout; + + private int cancelPid; + private int cancelKey; + protected final QueryExecutorCloseAction closeAction; + private String serverVersion; + private int serverVersionNum; + private TransactionState transactionState = TransactionState.IDLE; + private final boolean reWriteBatchedInserts; + private final boolean columnSanitiserDisabled; + private final EscapeSyntaxCallMode escapeSyntaxCallMode; + private final boolean quoteReturningIdentifiers; + private PreferQueryMode preferQueryMode; + private AutoSave autoSave; + private boolean flushCacheOnDeallocate = true; + protected final boolean logServerErrorDetail; + + // default value for server versions that don't report standard_conforming_strings + private boolean standardConformingStrings; + + private SQLWarning warnings; + private final ArrayList notifications = new ArrayList<>(); + + private final LruCache statementCache; + private final CachedQueryCreateAction cachedQueryCreateAction; + + // For getParameterStatuses(), GUC_REPORT tracking + private final TreeMap parameterStatuses + = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + + protected final ResourceLock lock = new ResourceLock(); + protected final Condition lockCondition = lock.newCondition(); + + @SuppressWarnings("this-escape") + protected QueryExecutorBase(PGStream pgStream, int cancelSignalTimeout, Properties info) throws SQLException { + this.pgStream = pgStream; + this.user = PGProperty.USER.getOrDefault(info); + this.database = PGProperty.PG_DBNAME.getOrDefault(info); + this.cancelSignalTimeout = cancelSignalTimeout; + this.reWriteBatchedInserts = PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(info); + this.columnSanitiserDisabled = PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(info); + String callMode = PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(info); + this.escapeSyntaxCallMode = EscapeSyntaxCallMode.of(callMode); + this.quoteReturningIdentifiers = PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(info); + String preferMode = PGProperty.PREFER_QUERY_MODE.getOrDefault(info); + this.preferQueryMode = PreferQueryMode.of(preferMode); + this.autoSave = AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(info)); + this.logServerErrorDetail = PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info); + // assignment, argument + this.cachedQueryCreateAction = new CachedQueryCreateAction(this); + statementCache = new LruCache<>( + Math.max(0, PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getInt(info)), + Math.max(0, PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getInt(info) * 1024L * 1024L), + false, + cachedQueryCreateAction, + new LruCache.EvictAction() { + @Override + public void evict(CachedQuery cachedQuery) throws SQLException { + cachedQuery.query.close(); + } + }); + this.closeAction = createCloseAction(); + } + + protected QueryExecutorCloseAction createCloseAction() { + return new QueryExecutorCloseAction(pgStream); + } + + /** + * Sends "terminate connection" message to the backend. + * @throws IOException in case connection termination fails + * @deprecated use {@link #getCloseAction()} instead + */ + @Deprecated + protected abstract void sendCloseMessage() throws IOException; + + @Override + public void setNetworkTimeout(int milliseconds) throws IOException { + pgStream.setNetworkTimeout(milliseconds); + } + + @Override + public int getNetworkTimeout() throws IOException { + return pgStream.getNetworkTimeout(); + } + + @Override + public HostSpec getHostSpec() { + return pgStream.getHostSpec(); + } + + @Override + public String getUser() { + return user; + } + + @Override + public String getDatabase() { + return database; + } + + public void setBackendKeyData(int cancelPid, int cancelKey) { + this.cancelPid = cancelPid; + this.cancelKey = cancelKey; + } + + @Override + public int getBackendPID() { + return cancelPid; + } + + @Override + public void abort() { + closeAction.abort(); + } + + @Override + public Closeable getCloseAction() { + return closeAction; + } + + @Override + public void close() { + if (closeAction.isClosed()) { + return; + } + + try { + getCloseAction().close(); + } catch (IOException ioe) { + LOGGER.log(Level.FINEST, "Discarding IOException on close:", ioe); + } + } + + @Override + public boolean isClosed() { + return closeAction.isClosed(); + } + + @Override + public void sendQueryCancel() throws SQLException { + + PGStream cancelStream = null; + + // Now we need to construct and send a cancel packet + try { + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " FE=> CancelRequest(pid={0},ckey={1})", new Object[]{cancelPid, cancelKey}); + } + + cancelStream = + new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), cancelSignalTimeout); + if (cancelSignalTimeout > 0) { + cancelStream.setNetworkTimeout(cancelSignalTimeout); + } + cancelStream.sendInteger4(16); + cancelStream.sendInteger2(1234); + cancelStream.sendInteger2(5678); + cancelStream.sendInteger4(cancelPid); + cancelStream.sendInteger4(cancelKey); + cancelStream.flush(); + cancelStream.receiveEOF(); + } catch (IOException e) { + // Safe to ignore. + LOGGER.log(Level.FINEST, "Ignoring exception on cancel request:", e); + } finally { + if (cancelStream != null) { + try { + cancelStream.close(); + } catch (IOException e) { + // Ignored. + } + } + } + } + + public void addWarning(SQLWarning newWarning) { + try (ResourceLock ignore = lock.obtain()) { + if (warnings == null) { + warnings = newWarning; + } else { + warnings.setNextWarning(newWarning); + } + } + } + + public void addNotification(PGNotification notification) { + try (ResourceLock ignore = lock.obtain()) { + notifications.add(notification); + } + } + + @Override + public PGNotification[] getNotifications() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + PGNotification[] array = notifications.toArray(new PGNotification[0]); + notifications.clear(); + return array; + } + } + + @Override + public SQLWarning getWarnings() { + try (ResourceLock ignore = lock.obtain()) { + SQLWarning chain = warnings; + warnings = null; + return chain; + } + } + + @Override + public String getServerVersion() { + String serverVersion = this.serverVersion; + if (serverVersion == null) { + throw new IllegalStateException("serverVersion must not be null"); + } + return serverVersion; + } + + @SuppressWarnings("deprecation") + @Override + public int getServerVersionNum() { + if (serverVersionNum != 0) { + return serverVersionNum; + } + return serverVersionNum = Utils.parseServerVersionStr(getServerVersion()); + } + + public void setServerVersion(String serverVersion) { + this.serverVersion = serverVersion; + } + + public void setServerVersionNum(int serverVersionNum) { + this.serverVersionNum = serverVersionNum; + } + + public void setTransactionState(TransactionState state) { + try (ResourceLock ignore = lock.obtain()) { + transactionState = state; + } + } + + public void setStandardConformingStrings(boolean value) { + try (ResourceLock ignore = lock.obtain()) { + standardConformingStrings = value; + } + } + + @Override + public boolean getStandardConformingStrings() { + try (ResourceLock ignore = lock.obtain()) { + return standardConformingStrings; + } + } + + @Override + public boolean getQuoteReturningIdentifiers() { + return quoteReturningIdentifiers; + } + + @Override + public TransactionState getTransactionState() { + try (ResourceLock ignore = lock.obtain()) { + return transactionState; + } + } + + public void setEncoding(Encoding encoding) throws IOException { + pgStream.setEncoding(encoding); + } + + @Override + public Encoding getEncoding() { + return pgStream.getEncoding(); + } + + @Override + public boolean isReWriteBatchedInsertsEnabled() { + return this.reWriteBatchedInserts; + } + + @Override + public final CachedQuery borrowQuery(String sql) throws SQLException { + return statementCache.borrow(sql); + } + + @Override + public final CachedQuery borrowCallableQuery(String sql) throws SQLException { + return statementCache.borrow(new CallableQueryKey(sql)); + } + + @Override + public final CachedQuery borrowReturningQuery(String sql, String [] columnNames) + throws SQLException { + return statementCache.borrow(new QueryWithReturningColumnsKey(sql, true, true, + columnNames + )); + } + + @Override + public CachedQuery borrowQueryByKey(Object key) throws SQLException { + return statementCache.borrow(key); + } + + @Override + public void releaseQuery(CachedQuery cachedQuery) { + statementCache.put(cachedQuery.key, cachedQuery); + } + + @Override + public final Object createQueryKey(String sql, boolean escapeProcessing, + boolean isParameterized, String ... columnNames) { + Object key; + if (columnNames == null || columnNames.length != 0) { + // Null means "return whatever sensible columns are" (e.g. primary key, or serial, or something like that) + key = new QueryWithReturningColumnsKey(sql, isParameterized, escapeProcessing, columnNames); + } else if (isParameterized) { + // If no generated columns requested, just use the SQL as a cache key + key = sql; + } else { + key = new BaseQueryKey(sql, false, escapeProcessing); + } + return key; + } + + @Override + public CachedQuery createQueryByKey(Object key) throws SQLException { + return cachedQueryCreateAction.create(key); + } + + @Override + public final CachedQuery createQuery(String sql, boolean escapeProcessing, + boolean isParameterized, String ... columnNames) + throws SQLException { + Object key = createQueryKey(sql, escapeProcessing, isParameterized, columnNames); + // Note: cache is not reused here for two reasons: + // 1) Simplify initial implementation for simple statements + // 2) Non-prepared statements are likely to have literals, thus query reuse would not be often + return createQueryByKey(key); + } + + @Override + public boolean isColumnSanitiserDisabled() { + return columnSanitiserDisabled; + } + + @Override + public EscapeSyntaxCallMode getEscapeSyntaxCallMode() { + return escapeSyntaxCallMode; + } + + @Override + public PreferQueryMode getPreferQueryMode() { + return preferQueryMode; + } + + public void setPreferQueryMode(PreferQueryMode mode) { + preferQueryMode = mode; + } + + @Override + public AutoSave getAutoSave() { + return autoSave; + } + + @Override + public void setAutoSave(AutoSave autoSave) { + this.autoSave = autoSave; + } + + protected boolean willHealViaReparse(SQLException e) { + if (e == null || e.getSQLState() == null) { + return false; + } + + // "prepared statement \"S_2\" does not exist" + if (PSQLState.INVALID_SQL_STATEMENT_NAME.getState().equals(e.getSQLState())) { + return true; + } + if (!PSQLState.NOT_IMPLEMENTED.getState().equals(e.getSQLState())) { + return false; + } + + if (!(e instanceof PSQLException)) { + return false; + } + + PSQLException pe = (PSQLException) e; + + ServerErrorMessage serverErrorMessage = pe.getServerErrorMessage(); + if (serverErrorMessage == null) { + return false; + } + // "cached plan must not change result type" + String routine = serverErrorMessage.getRoutine(); + return "RevalidateCachedQuery".equals(routine) // 9.2+ + || "RevalidateCachedPlan".equals(routine); // <= 9.1 + } + + @Override + public boolean willHealOnRetry(SQLException e) { + if (autoSave == AutoSave.NEVER && getTransactionState() == TransactionState.FAILED) { + // If autorollback is not activated, then every statement will fail with + // 'transaction is aborted', etc, etc + return false; + } + return willHealViaReparse(e); + } + + public boolean isFlushCacheOnDeallocate() { + return flushCacheOnDeallocate; + } + + @Override + public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) { + this.flushCacheOnDeallocate = flushCacheOnDeallocate; + } + + protected boolean hasNotifications() { + return !notifications.isEmpty(); + } + + @Override + public final Map getParameterStatuses() { + return Collections.unmodifiableMap(parameterStatuses); + } + + @Override + public final String getParameterStatus(String parameterName) { + return parameterStatuses.get(parameterName); + } + + /** + * Update the parameter status map in response to a new ParameterStatus + * wire protocol message. + * + *

The server sends ParameterStatus messages when GUC_REPORT settings are + * initially assigned and whenever they change.

+ * + *

A future version may invoke a client-defined listener class at this point, + * so this should be the only access path.

+ * + *

Keys are case-insensitive and case-preserving.

+ * + *

The server doesn't provide a way to report deletion of a reportable + * parameter so we don't expose one here.

+ * + * @param parameterName case-insensitive case-preserving name of parameter to create or update + * @param parameterStatus new value of parameter + * @see org.postgresql.PGConnection#getParameterStatuses + * @see org.postgresql.PGConnection#getParameterStatus + */ + protected void onParameterStatus(String parameterName, String parameterStatus) { + if (parameterName == null || "".equals(parameterName)) { + throw new IllegalStateException("attempt to set GUC_REPORT parameter with null or empty-string name"); + } + + parameterStatuses.put(parameterName, parameterStatus); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorCloseAction.java b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorCloseAction.java new file mode 100644 index 0000000..d28eac9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorCloseAction.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * The action performs connection cleanup, so it is properly terminated from the backend + * point of view. + * Implementation note: it should keep only the minimum number of object references + * to reduce heap usage in case the user abandons connection without closing it first. + */ +public class QueryExecutorCloseAction implements Closeable { + private static final Logger LOGGER = Logger.getLogger(QueryExecutorBase.class.getName()); + + @SuppressWarnings("cast") + private static final AtomicReferenceFieldUpdater PG_STREAM_UPDATER = + AtomicReferenceFieldUpdater.newUpdater( + QueryExecutorCloseAction.class, (Class) PGStream.class, "pgStream"); + + private volatile PGStream pgStream; + + public QueryExecutorCloseAction(PGStream pgStream) { + this.pgStream = pgStream; + } + + public boolean isClosed() { + PGStream pgStream = this.pgStream; + return pgStream == null || pgStream.isClosed(); + } + + public void abort() { + PGStream pgStream = this.pgStream; + if (pgStream == null || !PG_STREAM_UPDATER.compareAndSet(this, pgStream, null)) { + // The connection has already been closed + return; + } + try { + LOGGER.log(Level.FINEST, " FE=> close socket"); + pgStream.getSocket().close(); + } catch (IOException e) { + // ignore + } + } + + @Override + public void close() throws IOException { + LOGGER.log(Level.FINEST, " FE=> Terminate"); + PGStream pgStream = this.pgStream; + if (pgStream == null || !PG_STREAM_UPDATER.compareAndSet(this, pgStream, null)) { + // The connection has already been closed + return; + } + sendCloseMessage(pgStream); + + // Technically speaking, this check should not be needed, + // however org.postgresql.test.jdbc2.ConnectionTest.testPGStreamSettings + // closes pgStream reflectively, so here's an extra check to prevent failures + // when getNetworkTimeout is called on a closed stream + if (pgStream.isClosed()) { + return; + } + pgStream.flush(); + pgStream.close(); + } + + public void sendCloseMessage(PGStream pgStream) throws IOException { + // Technically speaking, this check should not be needed, + // however org.postgresql.test.jdbc2.ConnectionTest.testPGStreamSettings + // closes pgStream reflectively, so here's an extra check to prevent failures + // when getNetworkTimeout is called on a closed stream + if (pgStream.isClosed()) { + return; + } + // Prevent blocking the thread for too long + // The connection will be discarded anyway, so there's no much sense in waiting long + int timeout = pgStream.getNetworkTimeout(); + if (timeout == 0 || timeout > 1000) { + pgStream.setNetworkTimeout(1000); + } + pgStream.sendChar('X'); + pgStream.sendInteger4(4); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryWithReturningColumnsKey.java b/pgjdbc/src/main/java/org/postgresql/core/QueryWithReturningColumnsKey.java new file mode 100644 index 0000000..0b18c0e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/QueryWithReturningColumnsKey.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.util.Arrays; + +/** + * Cache key for a query that have some returning columns. + * {@code columnNames} should contain non-quoted column names. + * The parser will quote them automatically. + *

There's a special case of {@code columnNames == new String[]{"*"}} that means all columns + * should be returned. {@link Parser} is aware of that and does not quote {@code *}

+ */ +class QueryWithReturningColumnsKey extends BaseQueryKey { + public final String[] columnNames; + private int size; // query length cannot exceed MAX_INT + + QueryWithReturningColumnsKey(String sql, boolean isParameterized, boolean escapeProcessing, + String [] columnNames) { + super(sql, isParameterized, escapeProcessing); + if (columnNames == null) { + // TODO: teach parser to fetch key columns somehow when no column names were given + columnNames = new String[]{"*"}; + } + this.columnNames = columnNames; + } + + @Override + public long getSize() { + int size = this.size; + if (size != 0) { + return size; + } + size = (int) super.getSize(); + if (columnNames != null) { + size += 16; // array itself + for (String columnName: columnNames) { + size += columnName.length() * 2; // 2 bytes per char, revise with Java 9's compact strings + } + } + this.size = size; + return size; + } + + @Override + public String toString() { + return "QueryWithReturningColumnsKey{" + + "sql='" + sql + '\'' + + ", isParameterized=" + isParameterized + + ", escapeProcessing=" + escapeProcessing + + ", columnNames=" + Arrays.toString(columnNames) + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + QueryWithReturningColumnsKey that = (QueryWithReturningColumnsKey) o; + + // Probably incorrect - comparing Object[] arrays with Arrays.equals + return Arrays.equals(columnNames, that.columnNames); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Arrays.hashCode(columnNames); + return result; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ReplicationProtocol.java b/pgjdbc/src/main/java/org/postgresql/core/ReplicationProtocol.java new file mode 100644 index 0000000..8c67c06 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ReplicationProtocol.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.fluent.logical.LogicalReplicationOptions; +import org.postgresql.replication.fluent.physical.PhysicalReplicationOptions; + +import java.sql.SQLException; + +/** + *

Abstracts the protocol-specific details of physic and logic replication.

+ * + *

With each connection open with replication options associate own instance ReplicationProtocol.

+ */ +public interface ReplicationProtocol { + /** + * @param options not null options for logical replication stream + * @return not null stream instance from which available fetch wal logs that was decode by output + * plugin + * @throws SQLException on error + */ + PGReplicationStream startLogical(LogicalReplicationOptions options) throws SQLException; + + /** + * @param options not null options for physical replication stream + * @return not null stream instance from which available fetch wal logs + * @throws SQLException on error + */ + PGReplicationStream startPhysical(PhysicalReplicationOptions options) throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultCursor.java b/pgjdbc/src/main/java/org/postgresql/core/ResultCursor.java new file mode 100644 index 0000000..7bd88c6 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ResultCursor.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +/** + * Abstraction of a cursor over a returned resultset. This is an opaque interface that only provides + * a way to close the cursor; all other operations are done by passing a ResultCursor to + * QueryExecutor methods. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public interface ResultCursor { + /** + * Close this cursor. This may not immediately free underlying resources but may make it happen + * more promptly. Closed cursors should not be passed to QueryExecutor methods. + */ + void close(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultHandler.java b/pgjdbc/src/main/java/org/postgresql/core/ResultHandler.java new file mode 100644 index 0000000..c462bf1 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ResultHandler.java @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.List; + +/** + *

Callback interface for passing query results from the protocol-specific layer to the + * protocol-independent JDBC implementation code.

+ * + *

In general, a single query execution will consist of a number of calls to handleResultRows, + * handleCommandStatus, handleWarning, and handleError, followed by a single call to + * handleCompletion when query execution is complete. If the caller wants to throw SQLException, + * this can be done in handleCompletion.

+ * + *

Each executed query ends with a call to handleResultRows, handleCommandStatus, or handleError. If + * an error occurs, subsequent queries won't generate callbacks.

+ * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public interface ResultHandler { + /** + * Called when result rows are received from a query. + * + * @param fromQuery the underlying query that generated these results; this may not be very + * specific (e.g. it may be a query that includes multiple statements). + * @param fields column metadata for the resultset; might be null if + * Query.QUERY_NO_METADATA was specified. + * @param tuples the actual data + * @param cursor a cursor to use to fetch additional data; null if no further results + * are present. + */ + void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor); + + /** + * Called when a query that did not return a resultset completes. + * + * @param status the command status string (e.g. "SELECT") returned by the backend + * @param updateCount the number of rows affected by an INSERT, UPDATE, DELETE, FETCH, or MOVE + * command; -1 if not available. + * @param insertOID for a single-row INSERT query, the OID of the newly inserted row; 0 if not + * available. + */ + void handleCommandStatus(String status, long updateCount, long insertOID); + + /** + * Called when a warning is emitted. + * + * @param warning the warning that occurred. + */ + void handleWarning(SQLWarning warning); + + /** + * Called when an error occurs. Subsequent queries are abandoned; in general the only calls + * between a handleError call and a subsequent handleCompletion call are handleError or + * handleWarning. + * + * @param error the error that occurred + */ + void handleError(SQLException error); + + /** + * Called before a QueryExecutor method returns. This method may throw a SQLException if desired; + * if it does, the QueryExecutor method will propagate that exception to the original caller. + * + * @throws SQLException if the handler wishes the original method to throw an exception. + */ + void handleCompletion() throws SQLException; + + /** + * Callback for batch statements. In case batch statement is executed in autocommit==true mode, + * the executor might commit "as it this it is best", so the result handler should track which + * statements are executed successfully and which are not. + */ + void secureProgress(); + + /** + * Returns the first encountered exception. The rest are chained via {@link SQLException#setNextException(SQLException)} + * @return the first encountered exception + */ + SQLException getException(); + + /** + * Returns the first encountered warning. The rest are chained via {@link SQLException#setNextException(SQLException)} + * @return the first encountered warning + */ + SQLWarning getWarning(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerBase.java b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerBase.java new file mode 100644 index 0000000..9caf01a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerBase.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.List; + +/** + * Empty implementation of {@link ResultHandler} interface. + * {@link SQLException#setNextException(SQLException)} has {@code O(N)} complexity, + * so this class tracks the last exception object to speedup {@code setNextException}. + */ +public class ResultHandlerBase implements ResultHandler { + // Last exception is tracked to avoid O(N) SQLException#setNextException just in case there + // will be lots of exceptions (e.g. all batch rows fail with constraint violation or so) + private SQLException firstException; + private SQLException lastException; + + private SQLWarning firstWarning; + private SQLWarning lastWarning; + + public ResultHandlerBase() { + } + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + } + + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + } + + @Override + public void secureProgress() { + } + + @Override + public void handleWarning(SQLWarning warning) { + if (firstWarning == null) { + firstWarning = lastWarning = warning; + return; + } + SQLWarning lastWarning = this.lastWarning; + lastWarning.setNextException(warning); + this.lastWarning = warning; + } + + @Override + public void handleError(SQLException error) { + if (firstException == null) { + firstException = lastException = error; + return; + } + lastException.setNextException(error); + this.lastException = error; + } + + @Override + public void handleCompletion() throws SQLException { + SQLException firstException = this.firstException; + if (firstException != null) { + throw firstException; + } + } + + @Override + public SQLException getException() { + return firstException; + } + + @Override + public SQLWarning getWarning() { + return firstWarning; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerDelegate.java b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerDelegate.java new file mode 100644 index 0000000..456ce1e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerDelegate.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.List; + +/** + * Internal to the driver class, please do not use in the application. + * + *

The class simplifies creation of ResultHandler delegates: it provides default implementation + * for the interface methods

+ */ +public class ResultHandlerDelegate implements ResultHandler { + private final ResultHandler delegate; + + public ResultHandlerDelegate(ResultHandler delegate) { + this.delegate = delegate; + } + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + if (delegate != null) { + delegate.handleResultRows(fromQuery, fields, tuples, cursor); + } + } + + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + if (delegate != null) { + delegate.handleCommandStatus(status, updateCount, insertOID); + } + } + + @Override + public void handleWarning(SQLWarning warning) { + if (delegate != null) { + delegate.handleWarning(warning); + } + } + + @Override + public void handleError(SQLException error) { + if (delegate != null) { + delegate.handleError(error); + } + } + + @Override + public void handleCompletion() throws SQLException { + if (delegate != null) { + delegate.handleCompletion(); + } + } + + @Override + public void secureProgress() { + if (delegate != null) { + delegate.secureProgress(); + } + } + + @Override + public SQLException getException() { + if (delegate != null) { + return delegate.getException(); + } + return null; + } + + @Override + public SQLWarning getWarning() { + if (delegate != null) { + return delegate.getWarning(); + } + return null; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java b/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java new file mode 100644 index 0000000..d8ec12a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.text.NumberFormat; +import java.text.ParsePosition; + +/** + * Enumeration for PostgreSQL versions. + */ +public enum ServerVersion implements Version { + + INVALID("0.0.0"), + v8_2("8.2.0"), + v8_3("8.3.0"), + v8_4("8.4.0"), + v9_0("9.0.0"), + v9_1("9.1.0"), + v9_2("9.2.0"), + v9_3("9.3.0"), + v9_4("9.4.0"), + v9_5("9.5.0"), + v9_6("9.6.0"), + v10("10"), + v11("11"), + v12("12"), + v13("13"), + v14("14"), + v15("15"), + v16("16") + ; + + private final int version; + + ServerVersion(String version) { + this.version = parseServerVersionStr(version); + } + + /** + * Get a machine-readable version number. + * + * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1 + */ + @Override + public int getVersionNum() { + return version; + } + + /** + *

Attempt to parse the server version string into an XXYYZZ form version number into a + * {@link Version}.

+ * + *

If the specified version cannot be parsed, the {@link Version#getVersionNum()} will return 0.

+ * + * @param version version in numeric XXYYZZ form, e.g. "090401" for 9.4.1 + * @return a {@link Version} representing the specified version string. + */ + public static Version from(String version) { + final int versionNum = parseServerVersionStr(version); + return new Version() { + @Override + public int getVersionNum() { + return versionNum; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof Version) { + return this.getVersionNum() == ((Version) obj).getVersionNum(); + } + return false; + } + + @Override + public int hashCode() { + return getVersionNum(); + } + + @Override + public String toString() { + return Integer.toString(versionNum); + } + }; + } + + /** + *

Attempt to parse the server version string into an XXYYZZ form version number.

+ * + *

Returns 0 if the version could not be parsed.

+ * + *

Returns minor version 0 if the minor version could not be determined, e.g. devel or beta + * releases.

+ * + *

If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and + * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).

+ * + *

The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a + * version part is out of range.

+ * + * @param serverVersion server version in a XXYYZZ form + * @return server version in number form + */ + static int parseServerVersionStr(String serverVersion) throws NumberFormatException { + if (serverVersion == null) { + return 0; + } + + NumberFormat numformat = NumberFormat.getIntegerInstance(); + numformat.setGroupingUsed(false); + ParsePosition parsepos = new ParsePosition(0); + + int[] parts = new int[3]; + int versionParts; + for (versionParts = 0; versionParts < 3; versionParts++) { + Number part = (Number) numformat.parseObject(serverVersion, parsepos); + if (part == null) { + break; + } + parts[versionParts] = part.intValue(); + if (parsepos.getIndex() == serverVersion.length() + || serverVersion.charAt(parsepos.getIndex()) != '.') { + break; + } + // Skip . + parsepos.setIndex(parsepos.getIndex() + 1); + } + versionParts++; + + if (parts[0] >= 10000) { + /* + * PostgreSQL version 1000? I don't think so. We're seeing a version like 90401; return it + * verbatim, but only if there's nothing else in the version. If there is, treat it as a parse + * error. + */ + if (parsepos.getIndex() == serverVersion.length() && versionParts == 1) { + return parts[0]; + } else { + throw new NumberFormatException( + "First major-version part equal to or greater than 10000 in invalid version string: " + + serverVersion); + } + } + + /* #667 - Allow for versions with greater than 3 parts. + For versions with more than 3 parts, still return 3 parts (4th part ignored for now + as no functionality is dependent on the 4th part . + Allows for future versions of the server to utilize more than 3 part version numbers + without upgrading the jdbc driver */ + + if (versionParts >= 3) { + if (parts[1] > 99) { + throw new NumberFormatException( + "Unsupported second part of major version > 99 in invalid version string: " + + serverVersion); + } + if (parts[2] > 99) { + throw new NumberFormatException( + "Unsupported second part of minor version > 99 in invalid version string: " + + serverVersion); + } + return (parts[0] * 100 + parts[1]) * 100 + parts[2]; + } + if (versionParts == 2) { + if (parts[0] >= 10) { + return parts[0] * 100 * 100 + parts[1]; + } + if (parts[1] > 99) { + throw new NumberFormatException( + "Unsupported second part of major version > 99 in invalid version string: " + + serverVersion); + } + return (parts[0] * 100 + parts[1]) * 100; + } + if (versionParts == 1) { + if (parts[0] >= 10) { + return parts[0] * 100 * 100; + } + } + return 0; /* unknown */ + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java b/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java new file mode 100644 index 0000000..739043e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.List; + +/** + * Poor man's Statement & ResultSet, used for initial queries while we're still initializing the + * system. + */ +public class SetupQueryRunner { + + public SetupQueryRunner() { + } + + private static class SimpleResultHandler extends ResultHandlerBase { + private List tuples; + + List getResults() { + return tuples; + } + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + this.tuples = tuples; + } + + @Override + public void handleWarning(SQLWarning warning) { + // We ignore warnings. We assume we know what we're + // doing in the setup queries. + } + } + + public static Tuple run(QueryExecutor executor, String queryString, + boolean wantResults) throws SQLException { + Query query = executor.createSimpleQuery(queryString); + SimpleResultHandler handler = new SimpleResultHandler(); + + int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_SUPPRESS_BEGIN + | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; + if (!wantResults) { + flags |= QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_NO_METADATA; + } + + try { + executor.execute(query, null, handler, 0, 0, flags); + } finally { + query.close(); + } + + if (!wantResults) { + return null; + } + + List tuples = handler.getResults(); + if (tuples == null || tuples.size() != 1) { + throw new PSQLException(GT.tr("An unexpected result was returned by a query."), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } + + return tuples.get(0); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java new file mode 100644 index 0000000..f54fb00 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.PGProperty; +import org.postgresql.ssl.LibPQFactory; +import org.postgresql.util.GT; +import org.postgresql.util.ObjectFactory; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.util.Properties; + +import javax.net.SocketFactory; +import javax.net.ssl.SSLSocketFactory; + +/** + * Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}. + */ +public class SocketFactoryFactory { + + public SocketFactoryFactory() { + } + + /** + * Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}. + * + * @param info connection properties + * @return socket factory + * @throws PSQLException if something goes wrong + */ + public static SocketFactory getSocketFactory(Properties info) throws PSQLException { + // Socket factory + String socketFactoryClassName = PGProperty.SOCKET_FACTORY.getOrDefault(info); + if (socketFactoryClassName == null) { + return SocketFactory.getDefault(); + } + try { + return ObjectFactory.instantiate(SocketFactory.class, socketFactoryClassName, info, true, + PGProperty.SOCKET_FACTORY_ARG.getOrDefault(info)); + } catch (Exception e) { + throw new PSQLException( + GT.tr("The SocketFactory class provided {0} could not be instantiated.", + socketFactoryClassName), + PSQLState.CONNECTION_FAILURE, e); + } + } + + /** + * Instantiates {@link SSLSocketFactory} based on the {@link PGProperty#SSL_FACTORY}. + * + * @param info connection properties + * @return SSL socket factory + * @throws PSQLException if something goes wrong + */ + @SuppressWarnings("deprecation") + public static SSLSocketFactory getSslSocketFactory(Properties info) throws PSQLException { + String classname = PGProperty.SSL_FACTORY.getOrDefault(info); + if (classname == null + || "org.postgresql.ssl.jdbc4.LibPQFactory".equals(classname) + || "org.postgresql.ssl.LibPQFactory".equals(classname)) { + return new LibPQFactory(info); + } + try { + return ObjectFactory.instantiate(SSLSocketFactory.class, classname, info, true, + PGProperty.SSL_FACTORY_ARG.getOrDefault(info)); + } catch (Exception e) { + throw new PSQLException( + GT.tr("The SSLSocketFactory class provided {0} could not be instantiated.", classname), + PSQLState.CONNECTION_FAILURE, e); + } + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java b/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java new file mode 100644 index 0000000..90201fa --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.postgresql.core.SqlCommandType.INSERT; +import static org.postgresql.core.SqlCommandType.SELECT; +import static org.postgresql.core.SqlCommandType.WITH; + +/** + * Data Modification Language inspection support. + * + * @author Jeremy Whiting jwhiting@redhat.com + * @author Christopher Deckers (chrriis@gmail.com) + * + */ +public class SqlCommand { + public static final SqlCommand BLANK = SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK); + + public boolean isBatchedReWriteCompatible() { + return valuesBraceOpenPosition >= 0; + } + + public int getBatchRewriteValuesBraceOpenPosition() { + return valuesBraceOpenPosition; + } + + public int getBatchRewriteValuesBraceClosePosition() { + return valuesBraceClosePosition; + } + + public SqlCommandType getType() { + return commandType; + } + + public boolean isReturningKeywordPresent() { + return parsedSQLhasRETURNINGKeyword; + } + + public boolean returnsRows() { + return parsedSQLhasRETURNINGKeyword || commandType == SELECT || commandType == WITH; + } + + public static SqlCommand createStatementTypeInfo(SqlCommandType type, + boolean isBatchedReWritePropertyConfigured, + int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent, + int priorQueryCount) { + return new SqlCommand(type, isBatchedReWritePropertyConfigured, + valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent, + priorQueryCount); + } + + public static SqlCommand createStatementTypeInfo(SqlCommandType type) { + return new SqlCommand(type, false, -1, -1, false, 0); + } + + public static SqlCommand createStatementTypeInfo(SqlCommandType type, + boolean isRETURNINGkeywordPresent) { + return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0); + } + + private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured, + int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent, + int priorQueryCount) { + commandType = type; + parsedSQLhasRETURNINGKeyword = isPresent; + boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured + && valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition + && !isPresent && priorQueryCount == 0; + this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1; + this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1; + } + + private final SqlCommandType commandType; + private final boolean parsedSQLhasRETURNINGKeyword; + private final int valuesBraceOpenPosition; + private final int valuesBraceClosePosition; + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java b/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java new file mode 100644 index 0000000..3a4fc43 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +/** + * Type information inspection support. + * @author Jeremy Whiting jwhiting@redhat.com + * + */ + +public enum SqlCommandType { + + /** + * Use BLANK for empty sql queries or when parsing the sql string is not + * necessary. + */ + BLANK, + INSERT, + UPDATE, + DELETE, + MOVE, + SELECT, + WITH, + CREATE, + ALTER +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java b/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java new file mode 100644 index 0000000..b819026 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +public enum TransactionState { + IDLE, + OPEN, + FAILED +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Tuple.java b/pgjdbc/src/main/java/org/postgresql/core/Tuple.java new file mode 100644 index 0000000..5f6e488 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Tuple.java @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +/** + * Class representing a row in a {@link java.sql.ResultSet}. + */ +public class Tuple { + private final boolean forUpdate; + final byte[] [] data; + + /** + * Construct an empty tuple. Used in updatable result sets. + * @param length the number of fields in the tuple. + */ + public Tuple(int length) { + this(new byte[length][], true); + } + + /** + * Construct a populated tuple. Used when returning results. + * @param data the tuple data + */ + public Tuple(byte[] [] data) { + this(data, false); + } + + private Tuple(byte[] [] data, boolean forUpdate) { + this.data = data; + this.forUpdate = forUpdate; + } + + /** + * Number of fields in the tuple + * @return number of fields + */ + public int fieldCount() { + return data.length; + } + + /** + * Total length in bytes of the tuple data. + * @return the number of bytes in this tuple + */ + public int length() { + int length = 0; + for (byte[] field : data) { + if (field != null) { + length += field.length; + } + } + return length; + } + + /** + * Get the data for the given field + * @param index 0-based field position in the tuple + * @return byte array of the data + */ + public byte [] get(int index) { + return data[index]; + } + + /** + * Create a copy of the tuple for updating. + * @return a copy of the tuple that allows updates + */ + public Tuple updateableCopy() { + return copy(true); + } + + /** + * Create a read-only copy of the tuple + * @return a copy of the tuple that does not allow updates + */ + public Tuple readOnlyCopy() { + return copy(false); + } + + private Tuple copy(boolean forUpdate) { + byte[][] dataCopy = new byte[data.length][]; + System.arraycopy(data, 0, dataCopy, 0, data.length); + return new Tuple(dataCopy, forUpdate); + } + + /** + * Set the given field to the given data. + * @param index 0-based field position + * @param fieldData the data to set + */ + public void set(int index, byte [] fieldData) { + if (!forUpdate) { + throw new IllegalArgumentException("Attempted to write to readonly tuple"); + } + data[index] = fieldData; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java b/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java new file mode 100644 index 0000000..f41b407 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.util.PGobject; + +import java.sql.SQLException; +import java.util.Iterator; + +public interface TypeInfo { + void addCoreType(String pgTypeName, Integer oid, Integer sqlType, String javaClass, + Integer arrayOid); + + void addDataType(String type, Class klass) throws SQLException; + + /** + * Look up the SQL typecode for a given type oid. + * + * @param oid the type's OID + * @return the SQL type code (a constant from {@link java.sql.Types}) for the type + * @throws SQLException if an error occurs when retrieving sql type + */ + int getSQLType(int oid) throws SQLException; + + /** + * Look up the SQL typecode for a given postgresql type name. + * + * @param pgTypeName the server type name to look up + * @return the SQL type code (a constant from {@link java.sql.Types}) for the type + * @throws SQLException if an error occurs when retrieving sql type + */ + int getSQLType(String pgTypeName) throws SQLException; + + int getJavaArrayType(String className) throws SQLException; + + /** + * Look up the oid for a given postgresql type name. This is the inverse of + * {@link #getPGType(int)}. + * + * @param pgTypeName the server type name to look up + * @return the type's OID, or 0 if unknown + * @throws SQLException if an error occurs when retrieving PG type + */ + int getPGType(String pgTypeName) throws SQLException; + + /** + * Look up the postgresql type name for a given oid. This is the inverse of + * {@link #getPGType(String)}. + * + * @param oid the type's OID + * @return the server type name for that OID or null if unknown + * @throws SQLException if an error occurs when retrieving PG type + */ + String getPGType(int oid) throws SQLException; + + /** + * Look up the oid of an array's base type given the array's type oid. + * + * @param oid the array type's OID + * @return the base type's OID, or 0 if unknown + * @throws SQLException if an error occurs when retrieving array element + */ + int getPGArrayElement(int oid) throws SQLException; + + /** + * Determine the oid of the given base postgresql type's array type. + * + * @param elementTypeName the base type's + * @return the array type's OID, or 0 if unknown + * @throws SQLException if an error occurs when retrieving array type + */ + int getPGArrayType(String elementTypeName) throws SQLException; + + /** + * Determine the delimiter for the elements of the given array type oid. + * + * @param oid the array type's OID + * @return the base type's array type delimiter + * @throws SQLException if an error occurs when retrieving array delimiter + */ + char getArrayDelimiter(int oid) throws SQLException; + + Iterator getPGTypeNamesWithSQLTypes(); + + Iterator getPGTypeOidsWithSQLTypes(); + + Class getPGobject(String type); + + String getJavaClass(int oid) throws SQLException; + + String getTypeForAlias(String alias); + + int getPrecision(int oid, int typmod); + + int getScale(int oid, int typmod); + + boolean isCaseSensitive(int oid); + + boolean isSigned(int oid); + + int getDisplaySize(int oid, int typmod); + + int getMaximumPrecision(int oid); + + boolean requiresQuoting(int oid) throws SQLException; + + /** + * Returns true if particular sqlType requires quoting. + * This method is used internally by the driver, so it might disappear without notice. + * + * @param sqlType sql type as in java.sql.Types + * @return true if the type requires quoting + * @throws SQLException if something goes wrong + */ + boolean requiresQuotingSqlType(int sqlType) throws SQLException; + + /** + *

Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers. + * We therefore read them as positive long values and then force them into signed integers + * (wrapping around into negative values when required) or we'd be unable to correctly + * handle the upper half of the oid space.

+ * + *

This function handles the mapping of uint32-values in the long to java integers, and + * throws for values that are out of range.

+ * + * @param oid the oid as a long. + * @return the (internal) signed integer representation of the (unsigned) oid. + * @throws SQLException if the long has a value outside of the range representable by uint32 + */ + int longOidToInt(long oid) throws SQLException; + + /** + * Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers. + * We must therefore first map the (internal) integer representation to a positive long + * value before sending it to postgresql, or we would be unable to correctly handle the + * upper half of the oid space because these negative values are disallowed as OID values. + * + * @param oid the (signed) integer oid to convert into a long. + * @return the non-negative value of this oid, stored as a java long. + */ + long intOidToLong(int oid); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Utils.java b/pgjdbc/src/main/java/org/postgresql/core/Utils.java new file mode 100644 index 0000000..d96c6e3 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Utils.java @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.sql.SQLException; + +/** + * Collection of utilities used by the protocol-level code. + */ +public class Utils { + + public Utils() { + } + + /** + * Turn a bytearray into a printable form, representing each byte in hex. + * + * @param data the bytearray to stringize + * @return a hex-encoded printable representation of {@code data} + */ + public static String toHexString(byte[] data) { + StringBuilder sb = new StringBuilder(data.length * 2); + for (byte element : data) { + sb.append(Integer.toHexString((element >> 4) & 15)); + sb.append(Integer.toHexString(element & 15)); + } + return sb.toString(); + } + + /** + * Escape the given literal {@code value} and append it to the string builder {@code sbuf}. If + * {@code sbuf} is {@code null}, a new StringBuilder will be returned. The argument + * {@code standardConformingStrings} defines whether the backend expects standard-conforming + * string literals or allows backslash escape sequences. + * + * @param sbuf the string builder to append to; or {@code null} + * @param value the string value + * @param standardConformingStrings if standard conforming strings should be used + * @return the sbuf argument; or a new string builder for sbuf == null + * @throws SQLException if the string contains a {@code \0} character + */ + public static StringBuilder escapeLiteral(StringBuilder sbuf, String value, + boolean standardConformingStrings) throws SQLException { + if (sbuf == null) { + sbuf = new StringBuilder((value.length() + 10) / 10 * 11); // Add 10% for escaping. + } + doAppendEscapedLiteral(sbuf, value, standardConformingStrings); + return sbuf; + } + + /** + * Common part for {@link #escapeLiteral(StringBuilder, String, boolean)}. + * + * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be + * thrown + * @param value value to append + * @param standardConformingStrings if standard conforming strings should be used + */ + private static void doAppendEscapedLiteral(Appendable sbuf, String value, + boolean standardConformingStrings) throws SQLException { + try { + if (standardConformingStrings) { + // With standard_conforming_strings on, escape only single-quotes. + for (int i = 0; i < value.length(); i++) { + char ch = value.charAt(i); + if (ch == '\0') { + throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."), + PSQLState.INVALID_PARAMETER_VALUE); + } + if (ch == '\'') { + sbuf.append('\''); + } + sbuf.append(ch); + } + } else { + // With standard_conforming_string off, escape backslashes and + // single-quotes, but still escape single-quotes by doubling, to + // avoid a security hazard if the reported value of + // standard_conforming_strings is incorrect, or an error if + // backslash_quote is off. + for (int i = 0; i < value.length(); i++) { + char ch = value.charAt(i); + if (ch == '\0') { + throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."), + PSQLState.INVALID_PARAMETER_VALUE); + } + if (ch == '\\' || ch == '\'') { + sbuf.append(ch); + } + sbuf.append(ch); + } + } + } catch (IOException e) { + throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"), + PSQLState.UNEXPECTED_ERROR, e); + } + } + + /** + * Escape the given identifier {@code value} and append it to the string builder {@code sbuf}. + * If {@code sbuf} is {@code null}, a new StringBuilder will be returned. This method is + * different from appendEscapedLiteral in that it includes the quoting required for the identifier + * while {@link #escapeLiteral(StringBuilder, String, boolean)} does not. + * + * @param sbuf the string builder to append to; or {@code null} + * @param value the string value + * @return the sbuf argument; or a new string builder for sbuf == null + * @throws SQLException if the string contains a {@code \0} character + */ + public static StringBuilder escapeIdentifier(StringBuilder sbuf, String value) + throws SQLException { + if (sbuf == null) { + sbuf = new StringBuilder(2 + (value.length() + 10) / 10 * 11); // Add 10% for escaping. + } + doAppendEscapedIdentifier(sbuf, value); + return sbuf; + } + + /** + * Common part for appendEscapedIdentifier. + * + * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be + * thrown. + * @param value value to append + */ + private static void doAppendEscapedIdentifier(Appendable sbuf, String value) throws SQLException { + try { + sbuf.append('"'); + + for (int i = 0; i < value.length(); i++) { + char ch = value.charAt(i); + if (ch == '\0') { + throw new PSQLException(GT.tr("Zero bytes may not occur in identifiers."), + PSQLState.INVALID_PARAMETER_VALUE); + } + if (ch == '"') { + sbuf.append(ch); + } + sbuf.append(ch); + } + + sbuf.append('"'); + } catch (IOException e) { + throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"), + PSQLState.UNEXPECTED_ERROR, e); + } + } + + /** + *

Attempt to parse the server version string into an XXYYZZ form version number.

+ * + *

Returns 0 if the version could not be parsed.

+ * + *

Returns minor version 0 if the minor version could not be determined, e.g. devel or beta + * releases.

+ * + *

If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and + * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).

+ * + *

The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a + * version part is out of range.

+ * + * @param serverVersion server version in a XXYYZZ form + * @return server version in number form + * @deprecated use specific {@link Version} instance + */ + @Deprecated + public static int parseServerVersionStr(String serverVersion) throws NumberFormatException { + return ServerVersion.parseServerVersionStr(serverVersion); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/Version.java b/pgjdbc/src/main/java/org/postgresql/core/Version.java new file mode 100644 index 0000000..639226a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/Version.java @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +public interface Version { + + /** + * Get a machine-readable version number. + * + * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1 + */ + int getVersionNum(); + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java b/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java new file mode 100644 index 0000000..c78623f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2006, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.net.SocketTimeoutException; + +/** + * A faster version of BufferedInputStream. Does no synchronisation and allows direct access to the + * used byte[] buffer. + * + * @author Mikko Tiihonen + */ +public class VisibleBufferedInputStream extends InputStream { + + /** + * If a direct read to byte array is called that would require a smaller read from the wrapped + * stream that MINIMUM_READ then first fill the buffer and serve the bytes from there. Larger + * reads are directly done to the provided byte array. + */ + private static final int MINIMUM_READ = 1024; + + /** + * In how large spans is the C string zero-byte scanned. + */ + private static final int STRING_SCAN_SPAN = 1024; + + /** + * The wrapped input stream. + */ + private final InputStream wrapped; + + /** + * The buffer. + */ + private byte[] buffer; + + /** + * Current read position in the buffer. + */ + private int index; + + /** + * How far is the buffer filled with valid data. + */ + private int endIndex; + + /** + * socket timeout has been requested + */ + private boolean timeoutRequested; + + /** + * Creates a new buffer around the given stream. + * + * @param in The stream to buffer. + * @param bufferSize The initial size of the buffer. + */ + public VisibleBufferedInputStream(InputStream in, int bufferSize) { + wrapped = in; + buffer = new byte[bufferSize < MINIMUM_READ ? MINIMUM_READ : bufferSize]; + } + + /** + * {@inheritDoc} + */ + @Override + public int read() throws IOException { + if (ensureBytes(1)) { + return buffer[index++] & 0xFF; + } + return -1; + } + + /** + * Reads a byte from the buffer without advancing the index pointer. + * + * @return byte from the buffer without advancing the index pointer + * @throws IOException if something wrong happens + */ + public int peek() throws IOException { + if (ensureBytes(1)) { + return buffer[index] & 0xFF; + } + return -1; + } + + /** + * Reads byte from the buffer without any checks. This method never reads from the underlaying + * stream. Before calling this method the {@link #ensureBytes} method must have been called. + * + * @return The next byte from the buffer. + * @throws ArrayIndexOutOfBoundsException If ensureBytes was not called to make sure the buffer + * contains the byte. + */ + public byte readRaw() { + return buffer[index++]; + } + + /** + * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index + * fields. + * + * @param n The amount of bytes to ensure exists in buffer + * @return true if required bytes are available and false if EOF + * @throws IOException If reading of the wrapped stream failed. + */ + public boolean ensureBytes(int n) throws IOException { + return ensureBytes(n, true); + } + + /** + * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index + * fields. + * + * @param n The amount of bytes to ensure exists in buffer + * @param block whether or not to block the IO + * @return true if required bytes are available and false if EOF or the parameter block was false and socket timeout occurred. + * @throws IOException If reading of the wrapped stream failed. + */ + public boolean ensureBytes(int n, boolean block) throws IOException { + int required = n - endIndex + index; + while (required > 0) { + if (!readMore(required, block)) { + return false; + } + required = n - endIndex + index; + } + return true; + } + + /** + * Reads more bytes into the buffer. + * + * @param wanted How much should be at least read. + * @return True if at least some bytes were read. + * @throws IOException If reading of the wrapped stream failed. + */ + private boolean readMore(int wanted, boolean block) throws IOException { + if (endIndex == index) { + index = 0; + endIndex = 0; + } + int canFit = buffer.length - endIndex; + if (canFit < wanted) { + // would the wanted bytes fit if we compacted the buffer + // and still leave some slack + if (index + canFit > wanted + MINIMUM_READ) { + compact(); + } else { + doubleBuffer(); + } + canFit = buffer.length - endIndex; + } + int read = 0; + try { + read = wrapped.read(buffer, endIndex, canFit); + if (!block && read == 0) { + return false; + } + } catch (SocketTimeoutException e) { + if (!block) { + return false; + } + if (timeoutRequested) { + throw e; + } + } + if (read < 0) { + return false; + } + endIndex += read; + return true; + } + + /** + * Doubles the size of the buffer. + */ + private void doubleBuffer() { + byte[] buf = new byte[buffer.length * 2]; + moveBufferTo(buf); + buffer = buf; + } + + /** + * Compacts the unread bytes of the buffer to the beginning of the buffer. + */ + private void compact() { + moveBufferTo(buffer); + } + + /** + * Moves bytes from the buffer to the beginning of the destination buffer. Also sets the index and + * endIndex variables. + * + * @param dest The destination buffer. + */ + private void moveBufferTo(byte[] dest) { + int size = endIndex - index; + System.arraycopy(buffer, index, dest, 0, size); + index = 0; + endIndex = size; + } + + /** + * {@inheritDoc} + */ + @Override + public int read(byte[] to, int off, int len) throws IOException { + if ((off | len | (off + len) | (to.length - (off + len))) < 0) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + // if the read would go to wrapped stream, but would result + // in a small read then try read to the buffer instead + int avail = endIndex - index; + if (len - avail < MINIMUM_READ) { + ensureBytes(len); + avail = endIndex - index; + } + + // first copy from buffer + if (avail > 0) { + if (len <= avail) { + System.arraycopy(buffer, index, to, off, len); + index += len; + return len; + } + System.arraycopy(buffer, index, to, off, avail); + len -= avail; + off += avail; + } + int read = avail; + + // good place to reset index because the buffer is fully drained + index = 0; + endIndex = 0; + + // then directly from wrapped stream + do { + int r; + try { + r = wrapped.read(to, off, len); + } catch (SocketTimeoutException e) { + if (read == 0 && timeoutRequested) { + throw e; + } + return read; + } + if (r <= 0) { + return read == 0 ? r : read; + } + read += r; + off += r; + len -= r; + } while (len > 0); + + return read; + } + + /** + * {@inheritDoc} + */ + @Override + public long skip(long n) throws IOException { + int avail = endIndex - index; + if (n >= Integer.MAX_VALUE) { + throw new IllegalArgumentException("n is too large"); + } + if (avail >= n) { + index = index + (int)n; + return n; + } + n -= avail; + index = 0; + endIndex = 0; + return avail + wrapped.skip(n); + } + + /** + * {@inheritDoc} + */ + @Override + public int available() throws IOException { + int avail = endIndex - index; + return avail > 0 ? avail : wrapped.available(); + } + + /** + * {@inheritDoc} + */ + @Override + public void close() throws IOException { + wrapped.close(); + } + + /** + * Returns direct handle to the used buffer. Use the {@link #ensureBytes} to prefill required + * bytes the buffer and {@link #getIndex} to fetch the current position of the buffer. + * + * @return The underlaying buffer. + */ + public byte[] getBuffer() { + return buffer; + } + + /** + * Returns the current read position in the buffer. + * + * @return the current read position in the buffer. + */ + public int getIndex() { + return index; + } + + /** + * Scans the length of the next null terminated string (C-style string) from the stream. + * + * @return The length of the next null terminated string. + * @throws IOException If reading of stream fails. + * @throws EOFException If the stream did not contain any null terminators. + */ + public int scanCStringLength() throws IOException { + int pos = index; + while (true) { + while (pos < endIndex) { + if (buffer[pos++] == '\0') { + return pos - index; + } + } + if (!readMore(STRING_SCAN_SPAN, true)) { + throw new EOFException(); + } + pos = index; + } + } + + public void setTimeoutRequested(boolean timeoutRequested) { + this.timeoutRequested = timeoutRequested; + } + + /** + * + * @return the wrapped stream + */ + public InputStream getWrapped() { + return wrapped; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java b/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java new file mode 100644 index 0000000..f6cf40a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +import org.postgresql.PGProperty; +import org.postgresql.plugin.AuthenticationPlugin; +import org.postgresql.plugin.AuthenticationRequestType; +import org.postgresql.util.GT; +import org.postgresql.util.ObjectFactory; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +class AuthenticationPluginManager { + private static final Logger LOGGER = Logger.getLogger(AuthenticationPluginManager.class.getName()); + + @FunctionalInterface + public interface PasswordAction { + R apply(T password) throws PSQLException, IOException; + } + + private AuthenticationPluginManager() { + } + + /** + * If a password is requested by the server during connection initiation, this + * method will be invoked to supply the password. This method will only be + * invoked if the server actually requests a password, e.g. trust authentication + * will skip it entirely. + * + *

The caller provides a action method that will be invoked with the {@code char[]} + * password. After completion, for security reasons the {@code char[]} array will be + * wiped by filling it with zeroes. Callers must not rely on being able to read + * the password {@code char[]} after the action has completed.

+ * + * @param type The authentication type that is being requested + * @param info The connection properties for the connection + * @param action The action to invoke with the password + * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated + * @throws IOException Bubbles up any thrown IOException from the provided action + */ + public static T withPassword(AuthenticationRequestType type, Properties info, + PasswordAction action) throws PSQLException, IOException { + char[] password = null; + + String authPluginClassName = PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info); + + if (authPluginClassName == null || "".equals(authPluginClassName)) { + // Default auth plugin simply pulls password directly from connection properties + String passwordText = PGProperty.PASSWORD.getOrDefault(info); + if (passwordText != null) { + password = passwordText.toCharArray(); + } + } else { + AuthenticationPlugin authPlugin; + try { + authPlugin = ObjectFactory.instantiate(AuthenticationPlugin.class, authPluginClassName, info, + false, null); + } catch (Exception ex) { + String msg = GT.tr("Unable to load Authentication Plugin {0}", authPluginClassName); + LOGGER.log(Level.FINE, msg, ex); + throw new PSQLException(msg, PSQLState.INVALID_PARAMETER_VALUE, ex); + } + + password = authPlugin.getPassword(type); + } + + try { + return action.apply(password); + } finally { + if (password != null) { + Arrays.fill(password, (char) 0); + } + } + } + + /** + * Helper that wraps {@link #withPassword(AuthenticationRequestType, Properties, PasswordAction)}, checks that it is not-null, and encodes + * it as a byte array. Used by internal code paths that require an encoded password + * that may be an empty string, but not null. + * + *

The caller provides a callback method that will be invoked with the {@code byte[]} + * encoded password. After completion, for security reasons the {@code byte[]} array will be + * wiped by filling it with zeroes. Callers must not rely on being able to read + * the password {@code byte[]} after the callback has completed.

+ + * @param type The authentication type that is being requested + * @param info The connection properties for the connection + * @param action The action to invoke with the encoded password + * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated or if the retrieved password is null. + * @throws IOException Bubbles up any thrown IOException from the provided callback + */ + public static T withEncodedPassword(AuthenticationRequestType type, Properties info, + PasswordAction action) throws PSQLException, IOException { + byte[] encodedPassword = withPassword(type, info, password -> { + if (password == null) { + throw new PSQLException( + GT.tr("The server requested password-based authentication, but no password was provided by plugin {0}", + PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info)), + PSQLState.CONNECTION_REJECTED); + } + ByteBuffer buf = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password)); + byte[] bytes = new byte[buf.limit()]; + buf.get(bytes); + return bytes; + }); + + try { + return action.apply(encodedPassword); + } finally { + Arrays.fill(encodedPassword, (byte) 0); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java new file mode 100644 index 0000000..ed57f75 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +import org.postgresql.core.NativeQuery; +import org.postgresql.core.ParameterList; + +/** + * Purpose of this object is to support batched query re write behaviour. Responsibility for + * tracking the batch size and implement the clean up of the query fragments after the batch execute + * is complete. Intended to be used to wrap a Query that is present in the batchStatements + * collection. + * + * @author Jeremy Whiting jwhiting@redhat.com + * @author Christopher Deckers (chrriis@gmail.com) + * + */ +public class BatchedQuery extends SimpleQuery { + + private String sql; + private final int valuesBraceOpenPosition; + private final int valuesBraceClosePosition; + private final int batchSize; + private BatchedQuery [] blocks; + + public BatchedQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry, + int valuesBraceOpenPosition, + int valuesBraceClosePosition, boolean sanitiserDisabled) { + super(query, transferModeRegistry, sanitiserDisabled); + this.valuesBraceOpenPosition = valuesBraceOpenPosition; + this.valuesBraceClosePosition = valuesBraceClosePosition; + this.batchSize = 1; + } + + private BatchedQuery(BatchedQuery src, int batchSize) { + super(src); + this.valuesBraceOpenPosition = src.valuesBraceOpenPosition; + this.valuesBraceClosePosition = src.valuesBraceClosePosition; + this.batchSize = batchSize; + } + + public BatchedQuery deriveForMultiBatch(int valueBlock) { + if (getBatchSize() != 1) { + throw new IllegalStateException("Only the original decorator can be derived."); + } + if (valueBlock == 1) { + return this; + } + int index = Integer.numberOfTrailingZeros(valueBlock) - 1; + if (valueBlock > 128 || valueBlock != (1 << (index + 1))) { + throw new IllegalArgumentException( + "Expected value block should be a power of 2 smaller or equal to 128. Actual block is " + + valueBlock); + } + if (blocks == null) { + blocks = new BatchedQuery[7]; + } + BatchedQuery bq = blocks[index]; + if (bq == null) { + bq = new BatchedQuery(this, valueBlock); + blocks[index] = bq; + } + return bq; + } + + @Override + public int getBatchSize() { + return batchSize; + } + + /** + * Method to return the sql based on number of batches. Skipping the initial + * batch. + */ + @Override + public String getNativeSql() { + if (sql != null) { + return sql; + } + sql = buildNativeSql(null); + return sql; + } + + private String buildNativeSql(ParameterList params) { + String sql = null; + // dynamically build sql with parameters for batches + String nativeSql = super.getNativeSql(); + int batchSize = getBatchSize(); + if (batchSize < 2) { + sql = nativeSql; + return sql; + } + if (nativeSql == null) { + sql = ""; + return sql; + } + int valuesBlockCharCount = 0; + // Split the values section around every dynamic parameter. + int[] bindPositions = getNativeQuery().bindPositions; + int[] chunkStart = new int[1 + bindPositions.length]; + int[] chunkEnd = new int[1 + bindPositions.length]; + chunkStart[0] = valuesBraceOpenPosition; + if (bindPositions.length == 0) { + valuesBlockCharCount = valuesBraceClosePosition - valuesBraceOpenPosition + 1; + chunkEnd[0] = valuesBraceClosePosition + 1; + } else { + chunkEnd[0] = bindPositions[0]; + // valuesBlockCharCount += chunks[0].length; + valuesBlockCharCount += chunkEnd[0] - chunkStart[0]; + for (int i = 0; i < bindPositions.length; i++) { + int startIndex = bindPositions[i] + 2; + int endIndex = + i < bindPositions.length - 1 ? bindPositions[i + 1] : valuesBraceClosePosition + 1; + for (; startIndex < endIndex; startIndex++) { + if (!Character.isDigit(nativeSql.charAt(startIndex))) { + break; + } + } + chunkStart[i + 1] = startIndex; + chunkEnd[i + 1] = endIndex; + // valuesBlockCharCount += chunks[i + 1].length; + valuesBlockCharCount += chunkEnd[i + 1] - chunkStart[i + 1]; + } + } + int length = nativeSql.length(); + //valuesBraceOpenPosition + valuesBlockCharCount; + length += NativeQuery.calculateBindLength(bindPositions.length * batchSize); + length -= NativeQuery.calculateBindLength(bindPositions.length); + length += (valuesBlockCharCount + 1 /*comma*/) * (batchSize - 1 /* initial sql */); + + StringBuilder s = new StringBuilder(length); + // Add query until end of values parameter block. + int pos; + if (bindPositions.length > 0 && params == null) { + // Add the first values (...) clause, it would be values($1,..., $n), and it matches with + // the values clause of a simple non-rewritten SQL + s.append(nativeSql, 0, valuesBraceClosePosition + 1); + pos = bindPositions.length + 1; + } else { + pos = 1; + batchSize++; // do not use super.toString(params) as it does not work if query ends with -- + // We need to carefully add (...),(...), and we do not want to get (...) --, (...) + // s.append(super.toString(params)); + s.append(nativeSql, 0, valuesBraceOpenPosition); + } + for (int i = 2; i <= batchSize; i++) { + if (i > 2 || pos != 1) { + // For "has binds" the first valuds + s.append(','); + } + s.append(nativeSql, chunkStart[0], chunkEnd[0]); + for (int j = 1; j < chunkStart.length; j++) { + if (params == null) { + NativeQuery.appendBindName(s, pos++); + } else { + s.append(params.toString(pos++, true)); + } + s.append(nativeSql, chunkStart[j], chunkEnd[j]); + } + } + // Add trailing content: final query is like original with multi values. + // This could contain "--" comments, so it is important to add them at end. + s.append(nativeSql, valuesBraceClosePosition + 1, nativeSql.length()); + sql = s.toString(); + // Predict length only when building sql with $1, $2, ... (that is no specific params given) + assert params != null || s.length() == length + : "Predicted length != actual: " + length + " !=" + s.length(); + return sql; + } + + @Override + public String toString(ParameterList params) { + if (getBatchSize() < 2) { + return super.toString(params); + } + return buildNativeSql(params); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java new file mode 100644 index 0000000..8075834 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.core.ParameterList; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.InputStream; +import java.sql.SQLException; + +/** + * Parameter list for V3 query strings that contain multiple statements. We delegate to one + * SimpleParameterList per statement, and translate parameter indexes as needed. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +class CompositeParameterList implements V3ParameterList { + CompositeParameterList(SimpleParameterList[] subparams, int[] offsets) { + this.subparams = subparams; + this.offsets = offsets; + this.total = offsets[offsets.length - 1] + subparams[offsets.length - 1].getInParameterCount(); + } + + private int findSubParam(int index) throws SQLException { + if (index < 1 || index > total) { + throw new PSQLException( + GT.tr("The column index is out of range: {0}, number of columns: {1}.", index, total), + PSQLState.INVALID_PARAMETER_VALUE); + } + + for (int i = offsets.length - 1; i >= 0; i--) { + if (offsets[i] < index) { + return i; + } + } + + throw new IllegalArgumentException("I am confused; can't find a subparam for index " + index); + } + + @Override + public void registerOutParameter(int index, int sqlType) { + + } + + public int getDirection(int i) { + return 0; + } + + @Override + public int getParameterCount() { + return total; + } + + @Override + public int getInParameterCount() { + return total; + } + + @Override + public int getOutParameterCount() { + return 0; + } + + @Override + public int[] getTypeOIDs() { + int[] oids = new int[total]; + for (int i = 0; i < offsets.length; i++) { + int[] subOids = subparams[i].getTypeOIDs(); + System.arraycopy(subOids, 0, oids, offsets[i], subOids.length); + } + return oids; + } + + @Override + public void setIntParameter(int index, int value) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setIntParameter(index - offsets[sub], value); + } + + @Override + public void setLiteralParameter(int index, String value, int oid) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setStringParameter(index - offsets[sub], value, oid); + } + + @Override + public void setStringParameter(int index, String value, int oid) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setStringParameter(index - offsets[sub], value, oid); + } + + @Override + public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setBinaryParameter(index - offsets[sub], value, oid); + } + + @Override + public void setBytea(int index, byte[] data, int offset, int length) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setBytea(index - offsets[sub], data, offset, length); + } + + @Override + public void setBytea(int index, InputStream stream, int length) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setBytea(index - offsets[sub], stream, length); + } + + @Override + public void setBytea(int index, InputStream stream) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setBytea(index - offsets[sub], stream); + } + + @Override + public void setBytea(int index, ByteStreamWriter writer) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setBytea(index - offsets[sub], writer); + } + + @Override + public void setText(int index, InputStream stream) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setText(index - offsets[sub], stream); + } + + @Override + public void setNull(int index, int oid) throws SQLException { + int sub = findSubParam(index); + subparams[sub].setNull(index - offsets[sub], oid); + } + + @Override + public String toString(int index, boolean standardConformingStrings) { + try { + int sub = findSubParam(index); + return subparams[sub].toString(index - offsets[sub], standardConformingStrings); + } catch (SQLException e) { + throw new IllegalStateException(e.getMessage()); + } + } + + @Override + public ParameterList copy() { + SimpleParameterList[] copySub = new SimpleParameterList[subparams.length]; + for (int sub = 0; sub < subparams.length; sub++) { + copySub[sub] = (SimpleParameterList) subparams[sub].copy(); + } + + return new CompositeParameterList(copySub, offsets); + } + + @Override + public void clear() { + for (SimpleParameterList subparam : subparams) { + subparam.clear(); + } + } + + @Override + public SimpleParameterList [] getSubparams() { + return subparams; + } + + @Override + public void checkAllParametersSet() throws SQLException { + for (SimpleParameterList subparam : subparams) { + subparam.checkAllParametersSet(); + } + } + + @Override + public byte [][] getEncoding() { + return null; // unsupported + } + + @Override + public byte [] getFlags() { + return null; // unsupported + } + + @Override + public int [] getParamTypes() { + return null; // unsupported + } + + @Override + public Object [] getValues() { + return null; // unsupported + } + + @Override + public void appendAll(ParameterList list) throws SQLException { + // no-op, unsupported + } + + @Override + public void convertFunctionOutParameters() { + for (SimpleParameterList subparam : subparams) { + subparam.convertFunctionOutParameters(); + } + } + + private final int total; + private final SimpleParameterList[] subparams; + private final int[] offsets; +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java new file mode 100644 index 0000000..bb34876 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.SqlCommand; + +import java.util.Map; + +/** + * V3 Query implementation for queries that involve multiple statements. We split it up into one + * SimpleQuery per statement, and wrap the corresponding per-statement SimpleParameterList objects + * in a CompositeParameterList. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +class CompositeQuery implements Query { + CompositeQuery(SimpleQuery[] subqueries, int[] offsets) { + this.subqueries = subqueries; + this.offsets = offsets; + } + + @Override + public ParameterList createParameterList() { + SimpleParameterList[] subparams = new SimpleParameterList[subqueries.length]; + for (int i = 0; i < subqueries.length; i++) { + subparams[i] = (SimpleParameterList) subqueries[i].createParameterList(); + } + return new CompositeParameterList(subparams, offsets); + } + + @Override + public String toString(ParameterList parameters) { + StringBuilder sbuf = new StringBuilder(subqueries[0].toString()); + for (int i = 1; i < subqueries.length; i++) { + sbuf.append(';'); + sbuf.append(subqueries[i]); + } + return sbuf.toString(); + } + + @Override + public String getNativeSql() { + StringBuilder sbuf = new StringBuilder(subqueries[0].getNativeSql()); + for (int i = 1; i < subqueries.length; i++) { + sbuf.append(';'); + sbuf.append(subqueries[i].getNativeSql()); + } + return sbuf.toString(); + } + + @Override + public SqlCommand getSqlCommand() { + return null; + } + + @Override + public String toString() { + return toString(null); + } + + @Override + public void close() { + for (SimpleQuery subquery : subqueries) { + subquery.close(); + } + } + + @Override + public Query[] getSubqueries() { + return subqueries; + } + + @Override + public boolean isStatementDescribed() { + for (SimpleQuery subquery : subqueries) { + if (!subquery.isStatementDescribed()) { + return false; + } + } + return true; + } + + @Override + public boolean isEmpty() { + for (SimpleQuery subquery : subqueries) { + if (!subquery.isEmpty()) { + return false; + } + } + return true; + } + + @Override + public int getBatchSize() { + return 0; // no-op, unsupported + } + + @Override + public Map getResultSetColumnNameIndexMap() { + return null; // unsupported + } + + private final SimpleQuery[] subqueries; + private final int[] offsets; +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java new file mode 100644 index 0000000..1815a91 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java @@ -0,0 +1,907 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.PGProperty; +import org.postgresql.core.ConnectionFactory; +import org.postgresql.core.PGStream; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.ServerVersion; +import org.postgresql.core.SetupQueryRunner; +import org.postgresql.core.SocketFactoryFactory; +import org.postgresql.core.Tuple; +import org.postgresql.core.Utils; +import org.postgresql.core.Version; +import org.postgresql.gss.MakeGSS; +import org.postgresql.hostchooser.CandidateHost; +import org.postgresql.hostchooser.GlobalHostStatusTracker; +import org.postgresql.hostchooser.HostChooser; +import org.postgresql.hostchooser.HostChooserFactory; +import org.postgresql.hostchooser.HostRequirement; +import org.postgresql.hostchooser.HostStatus; +import org.postgresql.jdbc.GSSEncMode; +import org.postgresql.jdbc.SslMode; +import org.postgresql.plugin.AuthenticationRequestType; +import org.postgresql.scram.ScramAuthenticator; +import org.postgresql.ssl.MakeSSL; +import org.postgresql.util.GT; +import org.postgresql.util.HostSpec; +import org.postgresql.util.MD5Digest; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.ServerErrorMessage; + +import java.io.IOException; +import java.net.ConnectException; +import java.nio.charset.StandardCharsets; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.TimeZone; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +import javax.net.SocketFactory; + +/** + * ConnectionFactory implementation for version 3 (7.4+) connections. + * + * @author Oliver Jowett (oliver@opencloud.com), based on the previous implementation + */ +public class ConnectionFactoryImpl extends ConnectionFactory { + + private static class StartupParam { + private final String key; + private final String value; + + StartupParam(String key, String value) { + this.key = key; + this.value = value; + } + + @Override + public String toString() { + return this.key + "=" + this.value; + } + + public byte[] getEncodedKey() { + return this.key.getBytes(StandardCharsets.UTF_8); + } + + public byte[] getEncodedValue() { + return this.value.getBytes(StandardCharsets.UTF_8); + } + } + + private static final Logger LOGGER = Logger.getLogger(ConnectionFactoryImpl.class.getName()); + private static final int AUTH_REQ_OK = 0; + private static final int AUTH_REQ_KRB4 = 1; + private static final int AUTH_REQ_KRB5 = 2; + private static final int AUTH_REQ_PASSWORD = 3; + private static final int AUTH_REQ_CRYPT = 4; + private static final int AUTH_REQ_MD5 = 5; + private static final int AUTH_REQ_SCM = 6; + private static final int AUTH_REQ_GSS = 7; + private static final int AUTH_REQ_GSS_CONTINUE = 8; + private static final int AUTH_REQ_SSPI = 9; + private static final int AUTH_REQ_SASL = 10; + private static final int AUTH_REQ_SASL_CONTINUE = 11; + private static final int AUTH_REQ_SASL_FINAL = 12; + + private static final String IN_HOT_STANDBY = "in_hot_standby"; + + public ConnectionFactoryImpl() { + } + + private PGStream tryConnect(Properties info, SocketFactory socketFactory, HostSpec hostSpec, + SslMode sslMode, GSSEncMode gssEncMode) + throws SQLException, IOException { + int connectTimeout = PGProperty.CONNECT_TIMEOUT.getInt(info) * 1000; + String user = PGProperty.USER.getOrDefault(info); + String database = PGProperty.PG_DBNAME.getOrDefault(info); + if (user == null) { + throw new PSQLException(GT.tr("User cannot be null"), PSQLState.INVALID_NAME); + } + if (database == null) { + throw new PSQLException(GT.tr("Database cannot be null"), PSQLState.INVALID_NAME); + } + + PGStream newStream = new PGStream(socketFactory, hostSpec, connectTimeout); + try { + // Set the socket timeout if the "socketTimeout" property has been set. + int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info); + if (socketTimeout > 0) { + newStream.setNetworkTimeout(socketTimeout * 1000); + } + + String maxResultBuffer = PGProperty.MAX_RESULT_BUFFER.getOrDefault(info); + newStream.setMaxResultBuffer(maxResultBuffer); + + // Enable TCP keep-alive probe if required. + boolean requireTCPKeepAlive = PGProperty.TCP_KEEP_ALIVE.getBoolean(info); + newStream.getSocket().setKeepAlive(requireTCPKeepAlive); + + // Enable TCP no delay if required + boolean requireTCPNoDelay = PGProperty.TCP_NO_DELAY.getBoolean(info); + newStream.getSocket().setTcpNoDelay(requireTCPNoDelay); + + // Try to set SO_SNDBUF and SO_RECVBUF socket options, if requested. + // If receiveBufferSize and send_buffer_size are set to a value greater + // than 0, adjust. -1 means use the system default, 0 is ignored since not + // supported. + + // Set SO_RECVBUF read buffer size + int receiveBufferSize = PGProperty.RECEIVE_BUFFER_SIZE.getInt(info); + if (receiveBufferSize > -1) { + // value of 0 not a valid buffer size value + if (receiveBufferSize > 0) { + newStream.getSocket().setReceiveBufferSize(receiveBufferSize); + } else { + LOGGER.log(Level.WARNING, "Ignore invalid value for receiveBufferSize: {0}", + receiveBufferSize); + } + } + + // Set SO_SNDBUF write buffer size + int sendBufferSize = PGProperty.SEND_BUFFER_SIZE.getInt(info); + if (sendBufferSize > -1) { + if (sendBufferSize > 0) { + newStream.getSocket().setSendBufferSize(sendBufferSize); + } else { + LOGGER.log(Level.WARNING, "Ignore invalid value for sendBufferSize: {0}", sendBufferSize); + } + } + + if (LOGGER.isLoggable(Level.FINE)) { + LOGGER.log(Level.FINE, "Receive Buffer Size is {0}", + newStream.getSocket().getReceiveBufferSize()); + LOGGER.log(Level.FINE, "Send Buffer Size is {0}", + newStream.getSocket().getSendBufferSize()); + } + + newStream = enableGSSEncrypted(newStream, gssEncMode, hostSpec.getHost(), info, connectTimeout); + + // if we have a security context then gss negotiation succeeded. Do not attempt SSL + // negotiation + if (!newStream.isGssEncrypted()) { + // Construct and send an ssl startup packet if requested. + newStream = enableSSL(newStream, sslMode, info, connectTimeout); + } + + // Make sure to set network timeout again, in case the stream changed due to GSS or SSL + if (socketTimeout > 0) { + newStream.setNetworkTimeout(socketTimeout * 1000); + } + + List paramList = getParametersForStartup(user, database, info); + sendStartupPacket(newStream, paramList); + + // Do authentication (until AuthenticationOk). + doAuthentication(newStream, hostSpec.getHost(), user, info); + + return newStream; + } catch (Exception e) { + closeStream(newStream); + throw e; + } + } + + @Override + public QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException { + SslMode sslMode = SslMode.of(info); + GSSEncMode gssEncMode = GSSEncMode.of(info); + + HostRequirement targetServerType; + String targetServerTypeStr = PGProperty.TARGET_SERVER_TYPE.getOrDefault(info); + try { + targetServerType = HostRequirement.getTargetServerType(targetServerTypeStr); + } catch (IllegalArgumentException ex) { + throw new PSQLException( + GT.tr("Invalid targetServerType value: {0}", targetServerTypeStr), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } + + SocketFactory socketFactory = SocketFactoryFactory.getSocketFactory(info); + + HostChooser hostChooser = + HostChooserFactory.createHostChooser(hostSpecs, targetServerType, info); + Iterator hostIter = hostChooser.iterator(); + Map knownStates = new HashMap<>(); + while (hostIter.hasNext()) { + CandidateHost candidateHost = hostIter.next(); + HostSpec hostSpec = candidateHost.hostSpec; + LOGGER.log(Level.FINE, "Trying to establish a protocol version 3 connection to {0}", hostSpec); + + // Note: per-connect-attempt status map is used here instead of GlobalHostStatusTracker + // for the case when "no good hosts" match (e.g. all the hosts are known as "connectfail") + // In that case, the system tries to connect to each host in order, thus it should not look into + // GlobalHostStatusTracker + HostStatus knownStatus = knownStates.get(hostSpec); + if (knownStatus != null && !candidateHost.targetServerType.allowConnectingTo(knownStatus)) { + if (LOGGER.isLoggable(Level.FINER)) { + LOGGER.log(Level.FINER, "Known status of host {0} is {1}, and required status was {2}. Will try next host", + new Object[]{hostSpec, knownStatus, candidateHost.targetServerType}); + } + continue; + } + + // + // Establish a connection. + // + + PGStream newStream = null; + try { + try { + newStream = tryConnect(info, socketFactory, hostSpec, sslMode, gssEncMode); + } catch (SQLException e) { + if (sslMode == SslMode.PREFER + && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) { + // Try non-SSL connection to cover case like "non-ssl only db" + // Note: PREFER allows loss of encryption, so no significant harm is made + Throwable ex = null; + try { + newStream = + tryConnect(info, socketFactory, hostSpec, SslMode.DISABLE, gssEncMode); + LOGGER.log(Level.FINE, "Downgraded to non-encrypted connection for host {0}", + hostSpec); + } catch (SQLException | IOException ee) { + ex = ee; + } + + if (ex != null) { + log(Level.FINE, "sslMode==PREFER, however non-SSL connection failed as well", ex); + // non-SSL failed as well, so re-throw original exception + // Add non-SSL exception as suppressed + e.addSuppressed(ex); + throw e; + } + } else if (sslMode == SslMode.ALLOW + && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) { + // Try using SSL + Throwable ex = null; + try { + newStream = + tryConnect(info, socketFactory, hostSpec, SslMode.REQUIRE, gssEncMode); + LOGGER.log(Level.FINE, "Upgraded to encrypted connection for host {0}", + hostSpec); + } catch (SQLException ee) { + ex = ee; + } catch (IOException ee) { + ex = ee; // Can't use multi-catch in Java 6 :( + } + if (ex != null) { + log(Level.FINE, "sslMode==ALLOW, however SSL connection failed as well", ex); + // non-SSL failed as well, so re-throw original exception + // Add SSL exception as suppressed + e.addSuppressed(ex); + throw e; + } + + } else { + throw e; + } + } + + int cancelSignalTimeout = PGProperty.CANCEL_SIGNAL_TIMEOUT.getInt(info) * 1000; + + // Do final startup. + QueryExecutor queryExecutor = new QueryExecutorImpl(newStream, cancelSignalTimeout, info); + + // Check Primary or Secondary + HostStatus hostStatus = HostStatus.ConnectOK; + if (candidateHost.targetServerType != HostRequirement.any) { + hostStatus = isPrimary(queryExecutor) ? HostStatus.Primary : HostStatus.Secondary; + } + GlobalHostStatusTracker.reportHostStatus(hostSpec, hostStatus); + knownStates.put(hostSpec, hostStatus); + if (!candidateHost.targetServerType.allowConnectingTo(hostStatus)) { + queryExecutor.close(); + continue; + } + + runInitialQueries(queryExecutor, info); + + // And we're done. + return queryExecutor; + } catch (ConnectException cex) { + // Added by Peter Mount + // ConnectException is thrown when the connection cannot be made. + // we trap this an return a more meaningful message for the end user + GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail); + knownStates.put(hostSpec, HostStatus.ConnectFail); + if (hostIter.hasNext()) { + log(Level.FINE, "ConnectException occurred while connecting to {0}", cex, hostSpec); + // still more addresses to try + continue; + } + throw new PSQLException(GT.tr( + "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.", + hostSpec), PSQLState.CONNECTION_UNABLE_TO_CONNECT, cex); + } catch (IOException ioe) { + closeStream(newStream); + GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail); + knownStates.put(hostSpec, HostStatus.ConnectFail); + if (hostIter.hasNext()) { + log(Level.FINE, "IOException occurred while connecting to {0}", ioe, hostSpec); + // still more addresses to try + continue; + } + throw new PSQLException(GT.tr("The connection attempt failed."), + PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe); + } catch (SQLException se) { + closeStream(newStream); + GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail); + knownStates.put(hostSpec, HostStatus.ConnectFail); + if (hostIter.hasNext()) { + log(Level.FINE, "SQLException occurred while connecting to {0}", se, hostSpec); + // still more addresses to try + continue; + } + throw se; + } + } + throw new PSQLException(GT + .tr("Could not find a server with specified targetServerType: {0}", targetServerType), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } + + private List getParametersForStartup(String user, String database, Properties info) { + List paramList = new ArrayList<>(); + paramList.add(new StartupParam("user", user)); + paramList.add(new StartupParam("database", database)); + paramList.add(new StartupParam("client_encoding", "UTF8")); + paramList.add(new StartupParam("DateStyle", "ISO")); + paramList.add(new StartupParam("TimeZone", createPostgresTimeZone())); + + Version assumeVersion = ServerVersion.from(PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info)); + + if (assumeVersion.getVersionNum() >= ServerVersion.v9_0.getVersionNum()) { + // User is explicitly telling us this is a 9.0+ server so set properties here: + paramList.add(new StartupParam("extra_float_digits", "3")); + String appName = PGProperty.APPLICATION_NAME.getOrDefault(info); + if (appName != null) { + paramList.add(new StartupParam("application_name", appName)); + } + } else { + // User has not explicitly told us that this is a 9.0+ server so stick to old default: + paramList.add(new StartupParam("extra_float_digits", "2")); + } + + String replication = PGProperty.REPLICATION.getOrDefault(info); + if (replication != null && assumeVersion.getVersionNum() >= ServerVersion.v9_4.getVersionNum()) { + paramList.add(new StartupParam("replication", replication)); + } + + String currentSchema = PGProperty.CURRENT_SCHEMA.getOrDefault(info); + if (currentSchema != null) { + paramList.add(new StartupParam("search_path", currentSchema)); + } + + String options = PGProperty.OPTIONS.getOrDefault(info); + if (options != null) { + paramList.add(new StartupParam("options", options)); + } + + return paramList; + } + + private static void log(Level level, String msg, Throwable thrown, Object... params) { + if (!LOGGER.isLoggable(level)) { + return; + } + LogRecord rec = new LogRecord(level, msg); + // Set the loggerName of the LogRecord with the current logger + rec.setLoggerName(LOGGER.getName()); + rec.setParameters(params); + rec.setThrown(thrown); + LOGGER.log(rec); + } + + /** + * Convert Java time zone to postgres time zone. All others stay the same except that GMT+nn + * changes to GMT-nn and vise versa. + * If you provide GMT+/-nn postgres uses POSIX rules which has a positive sign for west of Greenwich + * JAVA uses ISO rules which the positive sign is east of Greenwich + * To make matters more interesting postgres will always report in ISO + * + * @return The current JVM time zone in postgresql format. + */ + private static String createPostgresTimeZone() { + String tz = TimeZone.getDefault().getID(); + if (tz.length() <= 3 || !tz.startsWith("GMT")) { + return tz; + } + char sign = tz.charAt(3); + String start; + switch (sign) { + case '+': + start = "GMT-"; + break; + case '-': + start = "GMT+"; + break; + default: + // unknown type + return tz; + } + + return start + tz.substring(4); + } + + @SuppressWarnings("fallthrough") + private PGStream enableGSSEncrypted(PGStream pgStream, GSSEncMode gssEncMode, String host, Properties info, + int connectTimeout) + throws IOException, PSQLException { + + if ( gssEncMode == GSSEncMode.DISABLE ) { + return pgStream; + } + + if (gssEncMode == GSSEncMode.ALLOW ) { + // start with plain text and let the server request it + return pgStream; + } + + /* + at this point gssEncMode is either PREFER or REQUIRE + libpq looks to see if there is a ticket in the cache before asking + the server if it supports encrypted GSS connections or not. + since the user has specifically asked or either prefer or require we can + assume they want it. + */ + /* + let's see if the server will allow a GSS encrypted connection + */ + String user = PGProperty.USER.getOrDefault(info); + if (user == null) { + throw new PSQLException("GSSAPI encryption required but was impossible user is null", PSQLState.CONNECTION_REJECTED); + } + + // attempt to acquire a GSS encrypted connection + LOGGER.log(Level.FINEST, " FE=> GSSENCRequest"); + + int gssTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info); + int currentTimeout = pgStream.getNetworkTimeout(); + + // if the current timeout is less than sslTimeout then + // use the smaller timeout. We could do something tricky + // here to not set it in that case but this is pretty readable + if (currentTimeout > 0 && currentTimeout < gssTimeout) { + gssTimeout = currentTimeout; + } + + pgStream.setNetworkTimeout(gssTimeout); + + // Send GSSEncryption request packet + pgStream.sendInteger4(8); + pgStream.sendInteger2(1234); + pgStream.sendInteger2(5680); + pgStream.flush(); + // Now get the response from the backend, one of N, E, S. + int beresp = pgStream.receiveChar(); + pgStream.setNetworkTimeout(currentTimeout); + switch (beresp) { + case 'E': + LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Error"); + + // Server doesn't even know about the SSL handshake protocol + if (gssEncMode.requireEncryption()) { + throw new PSQLException(GT.tr("The server does not support GSS Encoding."), + PSQLState.CONNECTION_REJECTED); + } + + // We have to reconnect to continue. + pgStream.close(); + return new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), connectTimeout); + + case 'N': + LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Refused"); + + // Server does not support gss encryption + if (gssEncMode.requireEncryption()) { + throw new PSQLException(GT.tr("The server does not support GSS Encryption."), + PSQLState.CONNECTION_REJECTED); + } + + return pgStream; + + case 'G': + LOGGER.log(Level.FINEST, " <=BE GSSEncryptedOk"); + try { + AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> { + MakeGSS.authenticate(true, pgStream, host, user, password, + PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info), + PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), false, // TODO: fix this + PGProperty.JAAS_LOGIN.getBoolean(info), + PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info)); + return void.class; + }); + return pgStream; + } catch (PSQLException ex) { + // allow the connection to proceed + if (gssEncMode == GSSEncMode.PREFER) { + // we have to reconnect to continue + return new PGStream(pgStream, connectTimeout); + } + } + // fallthrough + + default: + throw new PSQLException(GT.tr("An error occurred while setting up the GSS Encoded connection."), + PSQLState.PROTOCOL_VIOLATION); + } + } + + private PGStream enableSSL(PGStream pgStream, SslMode sslMode, Properties info, + int connectTimeout) + throws IOException, PSQLException { + if (sslMode == SslMode.DISABLE) { + return pgStream; + } + if (sslMode == SslMode.ALLOW) { + // Allow ==> start with plaintext, use encryption if required by server + return pgStream; + } + + LOGGER.log(Level.FINEST, " FE=> SSLRequest"); + + int sslTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info); + int currentTimeout = pgStream.getNetworkTimeout(); + + // if the current timeout is less than sslTimeout then + // use the smaller timeout. We could do something tricky + // here to not set it in that case but this is pretty readable + if (currentTimeout > 0 && currentTimeout < sslTimeout) { + sslTimeout = currentTimeout; + } + + pgStream.setNetworkTimeout(sslTimeout); + // Send SSL request packet + pgStream.sendInteger4(8); + pgStream.sendInteger2(1234); + pgStream.sendInteger2(5679); + pgStream.flush(); + + // Now get the response from the backend, one of N, E, S. + int beresp = pgStream.receiveChar(); + pgStream.setNetworkTimeout(currentTimeout); + + switch (beresp) { + case 'E': + LOGGER.log(Level.FINEST, " <=BE SSLError"); + + // Server doesn't even know about the SSL handshake protocol + if (sslMode.requireEncryption()) { + throw new PSQLException(GT.tr("The server does not support SSL."), + PSQLState.CONNECTION_REJECTED); + } + + // We have to reconnect to continue. + return new PGStream(pgStream, connectTimeout); + + case 'N': + LOGGER.log(Level.FINEST, " <=BE SSLRefused"); + + // Server does not support ssl + if (sslMode.requireEncryption()) { + throw new PSQLException(GT.tr("The server does not support SSL."), + PSQLState.CONNECTION_REJECTED); + } + + return pgStream; + + case 'S': + LOGGER.log(Level.FINEST, " <=BE SSLOk"); + + // Server supports ssl + MakeSSL.convert(pgStream, info); + return pgStream; + + default: + throw new PSQLException(GT.tr("An error occurred while setting up the SSL connection."), + PSQLState.PROTOCOL_VIOLATION); + } + } + + private void sendStartupPacket(PGStream pgStream, List params) + throws IOException { + if (LOGGER.isLoggable(Level.FINEST)) { + StringBuilder details = new StringBuilder(); + for (int i = 0; i < params.size(); i++) { + if (i != 0) { + details.append(", "); + } + details.append(params.get(i).toString()); + } + LOGGER.log(Level.FINEST, " FE=> StartupPacket({0})", details); + } + + // Precalculate message length and encode params. + int length = 4 + 4; + byte[][] encodedParams = new byte[params.size() * 2][]; + for (int i = 0; i < params.size(); i++) { + encodedParams[i * 2] = params.get(i).getEncodedKey(); + encodedParams[i * 2 + 1] = params.get(i).getEncodedValue(); + length += encodedParams[i * 2].length + 1 + encodedParams[i * 2 + 1].length + 1; + } + + length += 1; // Terminating \0 + + // Send the startup message. + pgStream.sendInteger4(length); + pgStream.sendInteger2(3); // protocol major + pgStream.sendInteger2(0); // protocol minor + for (byte[] encodedParam : encodedParams) { + pgStream.send(encodedParam); + pgStream.sendChar(0); + } + + pgStream.sendChar(0); + pgStream.flush(); + } + + private void doAuthentication(PGStream pgStream, String host, String user, Properties info) throws IOException, SQLException { + // Now get the response from the backend, either an error message + // or an authentication request + + /* SCRAM authentication state, if used */ + ScramAuthenticator scramAuthenticator = null; + + authloop: + while (true) { + int beresp = pgStream.receiveChar(); + + switch (beresp) { + case 'E': + // An error occurred, so pass the error message to the + // user. + // + // The most common one to be thrown here is: + // "User authentication failed" + // + int elen = pgStream.receiveInteger4(); + + ServerErrorMessage errorMsg = + new ServerErrorMessage(pgStream.receiveErrorString(elen - 4)); + LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg); + throw new PSQLException(errorMsg, PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info)); + + case 'R': + // Authentication request. + // Get the message length + int msgLen = pgStream.receiveInteger4(); + + // Get the type of request + int areq = pgStream.receiveInteger4(); + + // Process the request. + switch (areq) { + case AUTH_REQ_MD5: { + byte[] md5Salt = pgStream.receive(4); + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE AuthenticationReqMD5(salt={0})", Utils.toHexString(md5Salt)); + } + + byte[] digest = AuthenticationPluginManager.withEncodedPassword( + AuthenticationRequestType.MD5_PASSWORD, info, + encodedPassword -> MD5Digest.encode(user.getBytes(StandardCharsets.UTF_8), + encodedPassword, md5Salt) + ); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " FE=> Password(md5digest={0})", new String(digest, StandardCharsets.US_ASCII)); + } + + try { + pgStream.sendChar('p'); + pgStream.sendInteger4(4 + digest.length + 1); + pgStream.send(digest); + } finally { + Arrays.fill(digest, (byte) 0); + } + pgStream.sendChar(0); + pgStream.flush(); + + break; + } + + case AUTH_REQ_PASSWORD: { + LOGGER.log(Level.FINEST, "<=BE AuthenticationReqPassword"); + LOGGER.log(Level.FINEST, " FE=> Password(password=)"); + + AuthenticationPluginManager.withEncodedPassword(AuthenticationRequestType.CLEARTEXT_PASSWORD, info, encodedPassword -> { + pgStream.sendChar('p'); + pgStream.sendInteger4(4 + encodedPassword.length + 1); + pgStream.send(encodedPassword); + return void.class; + }); + pgStream.sendChar(0); + pgStream.flush(); + + break; + } + + case AUTH_REQ_GSS: + /* + * Use GSSAPI if requested on all platforms, via JSSE. + * + * Note that this is slightly different to libpq, which uses SSPI for GSSAPI where + * supported. We prefer to use the existing Java JSSE Kerberos support rather than + * going to native (via JNA) calls where possible, so that JSSE system properties + * etc continue to work normally. + * + * Note that while SSPI is often Kerberos-based there's no guarantee it will be; it + * may be NTLM or anything else. If the client responds to an SSPI request via + * GSSAPI and the other end isn't using Kerberos for SSPI then authentication will + * fail. + */ + final String gsslib = PGProperty.GSS_LIB.getOrDefault(info); + final boolean usespnego = PGProperty.USE_SPNEGO.getBoolean(info); + + /* + * Use gssapi. If the user has specified a Kerberos server + * name we'll always use JSSE GSSAPI. + */ + if ("gssapi".equals(gsslib)) { + LOGGER.log(Level.FINE, "Using JSSE GSSAPI, param gsslib=gssapi"); + } + + /* Use JGSS's GSSAPI for this request */ + AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> { + MakeGSS.authenticate(false, pgStream, host, user, password, + PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info), + PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), usespnego, + PGProperty.JAAS_LOGIN.getBoolean(info), + PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info)); + return void.class; + }); + break; + + case AUTH_REQ_GSS_CONTINUE: + // unused + break; + + case AUTH_REQ_SASL: + + LOGGER.log(Level.FINEST, " <=BE AuthenticationSASL"); + + scramAuthenticator = AuthenticationPluginManager.withPassword(AuthenticationRequestType.SASL, info, password -> { + if (password == null) { + throw new PSQLException( + GT.tr( + "The server requested SCRAM-based authentication, but no password was provided."), + PSQLState.CONNECTION_REJECTED); + } + if (password.length == 0) { + throw new PSQLException( + GT.tr( + "The server requested SCRAM-based authentication, but the password is an empty string."), + PSQLState.CONNECTION_REJECTED); + } + return new ScramAuthenticator(user, String.valueOf(password), pgStream); + }); + scramAuthenticator.processServerMechanismsAndInit(); + scramAuthenticator.sendScramClientFirstMessage(); + // This works as follows: + // 1. When tests is run from IDE, it is assumed SCRAM library is on the classpath + // 2. In regular build for Java < 8 this `if` is deactivated and the code always throws + if (false) { + throw new PSQLException(GT.tr( + "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)", + areq), PSQLState.CONNECTION_REJECTED); + } + break; + + case AUTH_REQ_SASL_CONTINUE: + scramAuthenticator.processServerFirstMessage(msgLen - 4 - 4); + break; + + case AUTH_REQ_SASL_FINAL: + scramAuthenticator.verifyServerSignature(msgLen - 4 - 4); + break; + + case AUTH_REQ_OK: + /* Cleanup after successful authentication */ + LOGGER.log(Level.FINEST, " <=BE AuthenticationOk"); + break authloop; // We're done. + + default: + LOGGER.log(Level.FINEST, " <=BE AuthenticationReq (unsupported type {0})", areq); + throw new PSQLException(GT.tr( + "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.", + areq), PSQLState.CONNECTION_REJECTED); + } + + break; + + default: + throw new PSQLException(GT.tr("Protocol error. Session setup failed."), + PSQLState.PROTOCOL_VIOLATION); + } + } + } + + @SuppressWarnings("deprecation") + private void runInitialQueries(QueryExecutor queryExecutor, Properties info) + throws SQLException { + String assumeMinServerVersion = PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info); + if (Utils.parseServerVersionStr(assumeMinServerVersion) >= ServerVersion.v9_0.getVersionNum()) { + // We already sent the parameter values in the StartupMessage so skip this + return; + } + + final int dbVersion = queryExecutor.getServerVersionNum(); + + if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) { + SetupQueryRunner.run(queryExecutor, "BEGIN", false); + } + + if (dbVersion >= ServerVersion.v9_0.getVersionNum()) { + SetupQueryRunner.run(queryExecutor, "SET extra_float_digits = 3", false); + } + + String appName = PGProperty.APPLICATION_NAME.getOrDefault(info); + if (appName != null && dbVersion >= ServerVersion.v9_0.getVersionNum()) { + StringBuilder sql = new StringBuilder(); + sql.append("SET application_name = '"); + Utils.escapeLiteral(sql, appName, queryExecutor.getStandardConformingStrings()); + sql.append("'"); + SetupQueryRunner.run(queryExecutor, sql.toString(), false); + } + + if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) { + SetupQueryRunner.run(queryExecutor, "COMMIT", false); + } + } + + /** + * Since PG14 there is GUC_REPORT ParamStatus {@code in_hot_standby} which is set to "on" + * when the server is in archive recovery or standby mode. In driver's lingo such server is called + * {@link org.postgresql.hostchooser.HostRequirement#secondary}. + * Previously {@code transaction_read_only} was used as a workable substitute. + * However {@code transaction_read_only} could have been manually overridden on the primary server + * by database user leading to a false positives: ie server is effectively read-only but + * technically is "primary" (not in a recovery/standby mode). + * + *

This method checks whether {@code in_hot_standby} GUC was reported by the server + * during initial connection:

+ * + *
    + *
  • {@code in_hot_standby} was reported and the value was "on" then the server is a replica + * and database is read-only by definition, false is returned.
  • + *
  • {@code in_hot_standby} was reported and the value was "off" + * then the server is indeed primary but database may be in + * read-only mode nevertheless. We proceed to conservatively {@code show transaction_read_only} + * since users may not be expecting a readonly connection for {@code targetServerType=primary}
  • + *
  • If {@code in_hot_standby} has not been reported we fallback to pre v14 behavior.
  • + *
+ * + *

Do not confuse {@code hot_standby} and {@code in_hot_standby} ParamStatuses

+ * + * @see GUC_REPORT documentation + * @see Hot standby documentation + * @see in_hot_standby patch thread v10 + * @see in_hot_standby patch thread v14 + * + */ + private boolean isPrimary(QueryExecutor queryExecutor) throws SQLException, IOException { + String inHotStandby = queryExecutor.getParameterStatus(IN_HOT_STANDBY); + if ("on".equalsIgnoreCase(inHotStandby)) { + return false; + } + Tuple results = SetupQueryRunner.run(queryExecutor, "show transaction_read_only", true); + Tuple nonNullResults = results; + String queriedTransactionReadonly = queryExecutor.getEncoding().decode(nonNullResults.get(0)); + return "off".equalsIgnoreCase(queriedTransactionReadonly); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyDualImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyDualImpl.java new file mode 100644 index 0000000..6ed13b8 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyDualImpl.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +import org.postgresql.copy.CopyDual; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.PSQLException; + +import java.sql.SQLException; +import java.util.ArrayDeque; +import java.util.Queue; + +public class CopyDualImpl extends CopyOperationImpl implements CopyDual { + private final Queue received = new ArrayDeque<>(); + + public CopyDualImpl() { + } + + @Override + public void writeToCopy(byte[] data, int off, int siz) throws SQLException { + getQueryExecutor().writeToCopy(this, data, off, siz); + } + + @Override + public void writeToCopy(ByteStreamWriter from) throws SQLException { + getQueryExecutor().writeToCopy(this, from); + } + + @Override + public void flushCopy() throws SQLException { + getQueryExecutor().flushCopy(this); + } + + @Override + public long endCopy() throws SQLException { + return getQueryExecutor().endCopy(this); + } + + @Override + public byte [] readFromCopy() throws SQLException { + return readFromCopy(true); + } + + @Override + public byte [] readFromCopy(boolean block) throws SQLException { + if (received.isEmpty()) { + getQueryExecutor().readFromCopy(this, block); + } + + return received.poll(); + } + + @Override + public void handleCommandStatus(String status) throws PSQLException { + } + + @Override + protected void handleCopydata(byte[] data) { + received.add(data); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyInImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyInImpl.java new file mode 100644 index 0000000..50b73d7 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyInImpl.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +import org.postgresql.copy.CopyIn; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.SQLException; + +/** + *

COPY FROM STDIN operation.

+ * + *

Anticipated flow: + * + * CopyManager.copyIn() ->QueryExecutor.startCopy() - sends given query to server + * ->processCopyResults(): - receives CopyInResponse from Server - creates new CopyInImpl + * ->initCopy(): - receives copy metadata from server ->CopyInImpl.init() ->lock() + * connection for this operation - if query fails an exception is thrown - if query returns wrong + * CopyOperation, copyIn() cancels it before throwing exception <-return: new CopyInImpl holding + * lock on connection repeat CopyIn.writeToCopy() for all data ->CopyInImpl.writeToCopy() + * ->QueryExecutorImpl.writeToCopy() - sends given data ->processCopyResults() - parameterized + * not to block, just peek for new messages from server - on ErrorResponse, waits until protocol is + * restored and unlocks connection CopyIn.endCopy() ->CopyInImpl.endCopy() + * ->QueryExecutorImpl.endCopy() - sends CopyDone - processCopyResults() - on CommandComplete + * ->CopyOperationImpl.handleCommandComplete() - sets updatedRowCount when applicable - on + * ReadyForQuery unlock() connection for use by other operations <-return: + * CopyInImpl.getUpdatedRowCount()

+ */ +public class CopyInImpl extends CopyOperationImpl implements CopyIn { + + public CopyInImpl() { + } + + @Override + public void writeToCopy(byte[] data, int off, int siz) throws SQLException { + getQueryExecutor().writeToCopy(this, data, off, siz); + } + + @Override + public void writeToCopy(ByteStreamWriter from) throws SQLException { + getQueryExecutor().writeToCopy(this, from); + } + + @Override + public void flushCopy() throws SQLException { + getQueryExecutor().flushCopy(this); + } + + @Override + public long endCopy() throws SQLException { + return getQueryExecutor().endCopy(this); + } + + @Override + protected void handleCopydata(byte[] data) throws PSQLException { + throw new PSQLException(GT.tr("CopyIn copy direction can't receive data"), + PSQLState.PROTOCOL_VIOLATION); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java new file mode 100644 index 0000000..680c6d2 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +import org.postgresql.copy.CopyOperation; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.SQLException; + +public abstract class CopyOperationImpl implements CopyOperation { + QueryExecutorImpl queryExecutor; + int rowFormat; + int [] fieldFormats; + long handledRowCount = -1; + + public CopyOperationImpl() { + } + + void init(QueryExecutorImpl q, int fmt, int[] fmts) { + queryExecutor = q; + rowFormat = fmt; + fieldFormats = fmts; + } + + protected QueryExecutorImpl getQueryExecutor() { + return queryExecutor; + } + + @Override + public void cancelCopy() throws SQLException { + queryExecutor.cancelCopy(this); + } + + @Override + public int getFieldCount() { + return fieldFormats.length; + } + + @Override + public int getFieldFormat(int field) { + return fieldFormats[field]; + } + + @Override + public int getFormat() { + return rowFormat; + } + + @Override + public boolean isActive() { + return queryExecutor.hasLockOn(this); + } + + public void handleCommandStatus(String status) throws PSQLException { + if (status.startsWith("COPY")) { + int i = status.lastIndexOf(' '); + handledRowCount = i > 3 ? Long.parseLong(status.substring(i + 1)) : -1; + } else { + throw new PSQLException(GT.tr("CommandComplete expected COPY but got: " + status), + PSQLState.COMMUNICATION_ERROR); + } + } + + /** + * Consume received copy data. + * + * @param data data that was receive by copy protocol + * @throws PSQLException if some internal problem occurs + */ + protected abstract void handleCopydata(byte[] data) throws PSQLException; + + @Override + public long getHandledRowCount() { + return handledRowCount; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java new file mode 100644 index 0000000..f7898bf --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +import org.postgresql.copy.CopyOut; + +import java.sql.SQLException; + +/** + *

Anticipated flow of a COPY TO STDOUT operation:

+ * + *

CopyManager.copyOut() ->QueryExecutor.startCopy() - sends given query to server + * ->processCopyResults(): - receives CopyOutResponse from Server - creates new CopyOutImpl + * ->initCopy(): - receives copy metadata from server ->CopyOutImpl.init() ->lock() + * connection for this operation - if query fails an exception is thrown - if query returns wrong + * CopyOperation, copyOut() cancels it before throwing exception <-returned: new CopyOutImpl + * holding lock on connection repeat CopyOut.readFromCopy() until null + * ->CopyOutImpl.readFromCopy() ->QueryExecutorImpl.readFromCopy() ->processCopyResults() - + * on copydata row from server ->CopyOutImpl.handleCopydata() stores reference to byte array - on + * CopyDone, CommandComplete, ReadyForQuery ->unlock() connection for use by other operations + * <-returned: byte array of data received from server or null at end.

+ */ +public class CopyOutImpl extends CopyOperationImpl implements CopyOut { + private byte [] currentDataRow; + + public CopyOutImpl() { + } + + @Override + public byte [] readFromCopy() throws SQLException { + return readFromCopy(true); + } + + @Override + public byte [] readFromCopy(boolean block) throws SQLException { + currentDataRow = null; + getQueryExecutor().readFromCopy(this, block); + return currentDataRow; + } + + @Override + protected void handleCopydata(byte[] data) { + currentDataRow = data; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java b/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java new file mode 100644 index 0000000..14a0fef --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +/** + * Information for "pending describe queue". + * + */ +class DescribeRequest { + public final SimpleQuery query; + public final SimpleParameterList parameterList; + public final boolean describeOnly; + public final String statementName; + + DescribeRequest(SimpleQuery query, SimpleParameterList parameterList, + boolean describeOnly, String statementName) { + this.query = query; + this.parameterList = parameterList; + this.describeOnly = describeOnly; + this.statementName = statementName; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java b/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java new file mode 100644 index 0000000..e01190b --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +/** + * Information for "pending execute queue". + * + */ +class ExecuteRequest { + public final SimpleQuery query; + public final Portal portal; + public final boolean asSimple; + + ExecuteRequest(SimpleQuery query, Portal portal, boolean asSimple) { + this.query = query; + this.portal = portal; + this.asSimple = asSimple; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java b/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java new file mode 100644 index 0000000..1300355 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.core.ResultCursor; + +import java.lang.ref.PhantomReference; +import java.nio.charset.StandardCharsets; + +/** + * V3 ResultCursor implementation in terms of backend Portals. This holds the state of a single + * Portal. We use a PhantomReference managed by our caller to handle resource cleanup. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +class Portal implements ResultCursor { + Portal(SimpleQuery query, String portalName) { + this.query = query; + this.portalName = portalName; + this.encodedName = portalName.getBytes(StandardCharsets.UTF_8); + } + + @Override + public void close() { + PhantomReference cleanupRef = this.cleanupRef; + if (cleanupRef != null) { + cleanupRef.clear(); + cleanupRef.enqueue(); + this.cleanupRef = null; + } + } + + String getPortalName() { + return portalName; + } + + byte[] getEncodedPortalName() { + return encodedName; + } + + SimpleQuery getQuery() { + return query; + } + + void setCleanupRef(PhantomReference cleanupRef) { + this.cleanupRef = cleanupRef; + } + + @Override + public String toString() { + return portalName; + } + + // Holding on to a reference to the generating query has + // the nice side-effect that while this Portal is referenced, + // so is the SimpleQuery, so the underlying statement won't + // be closed while the portal is open (the backend closes + // all open portals when the statement is closed) + + private final SimpleQuery query; + private final String portalName; + private final byte[] encodedName; + private PhantomReference cleanupRef; +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java new file mode 100644 index 0000000..e1e12b6 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java @@ -0,0 +1,3102 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.PGProperty; +import org.postgresql.copy.CopyIn; +import org.postgresql.copy.CopyOperation; +import org.postgresql.copy.CopyOut; +import org.postgresql.core.CommandCompleteParser; +import org.postgresql.core.Encoding; +import org.postgresql.core.EncodingPredictor; +import org.postgresql.core.Field; +import org.postgresql.core.NativeQuery; +import org.postgresql.core.Notification; +import org.postgresql.core.Oid; +import org.postgresql.core.PGBindException; +import org.postgresql.core.PGStream; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Parser; +import org.postgresql.core.Query; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.QueryExecutorBase; +import org.postgresql.core.ReplicationProtocol; +import org.postgresql.core.ResultCursor; +import org.postgresql.core.ResultHandler; +import org.postgresql.core.ResultHandlerBase; +import org.postgresql.core.ResultHandlerDelegate; +import org.postgresql.core.SqlCommand; +import org.postgresql.core.SqlCommandType; +import org.postgresql.core.TransactionState; +import org.postgresql.core.Tuple; +import org.postgresql.core.v3.adaptivefetch.AdaptiveFetchCache; +import org.postgresql.core.v3.replication.V3ReplicationProtocol; +import org.postgresql.jdbc.AutoSave; +import org.postgresql.jdbc.BatchResultHandler; +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.jdbc.TimestampUtils; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.PSQLWarning; +import org.postgresql.util.ServerErrorMessage; + +import java.io.IOException; +import java.lang.ref.PhantomReference; +import java.lang.ref.Reference; +import java.lang.ref.ReferenceQueue; +import java.net.Socket; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.nio.charset.StandardCharsets; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * QueryExecutor implementation for the V3 protocol. + */ +@SuppressWarnings("try") +public class QueryExecutorImpl extends QueryExecutorBase { + + private static final Logger LOGGER = Logger.getLogger(QueryExecutorImpl.class.getName()); + + private static final Field[] NO_FIELDS = new Field[0]; + + static { + //canonicalize commonly seen strings to reduce memory and speed comparisons + Encoding.canonicalize("application_name"); + Encoding.canonicalize("client_encoding"); + Encoding.canonicalize("DateStyle"); + Encoding.canonicalize("integer_datetimes"); + Encoding.canonicalize("off"); + Encoding.canonicalize("on"); + Encoding.canonicalize("server_encoding"); + Encoding.canonicalize("server_version"); + Encoding.canonicalize("server_version_num"); + Encoding.canonicalize("standard_conforming_strings"); + Encoding.canonicalize("TimeZone"); + Encoding.canonicalize("UTF8"); + Encoding.canonicalize("UTF-8"); + Encoding.canonicalize("in_hot_standby"); + } + + /** + * TimeZone of the current connection (TimeZone backend parameter). + */ + private TimeZone timeZone; + + /** + * application_name connection property. + */ + private String applicationName; + + /** + * True if server uses integers for date and time fields. False if server uses double. + */ + private boolean integerDateTimes; + + /** + * Bit set that has a bit set for each oid which should be received using binary format. + */ + private final Set useBinaryReceiveForOids = new HashSet<>(); + + /** + * Bit set that has a bit set for each oid which should be sent using binary format. + */ + private final Set useBinarySendForOids = new HashSet<>(); + + /** + * This is a fake query object so processResults can distinguish "ReadyForQuery" messages + * from Sync messages vs from simple execute (aka 'Q'). + */ + private final SimpleQuery sync; + + private short deallocateEpoch; + + /** + * This caches the latest observed {@code set search_path} query so the reset of prepared + * statement cache can be skipped if using repeated calls for the same {@code set search_path} + * value. + */ + private String lastSetSearchPathQuery; + + /** + * The exception that caused the last transaction to fail. + */ + private SQLException transactionFailCause; + + private final ReplicationProtocol replicationProtocol; + + /** + * {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those + */ + private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser(); + + private final AdaptiveFetchCache adaptiveFetchCache; + + @SuppressWarnings("this-escape") + public QueryExecutorImpl(PGStream pgStream, + int cancelSignalTimeout, Properties info) throws SQLException, IOException { + super(pgStream, cancelSignalTimeout, info); + + this.sync = (SimpleQuery) createQuery("SYNC", false, true).query; + + long maxResultBuffer = pgStream.getMaxResultBuffer(); + this.adaptiveFetchCache = new AdaptiveFetchCache(maxResultBuffer, info); + + this.allowEncodingChanges = PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(info); + this.cleanupSavePoints = PGProperty.CLEANUP_SAVEPOINTS.getBoolean(info); + // assignment, argument + this.replicationProtocol = new V3ReplicationProtocol(this, pgStream); + readStartupMessages(); + } + + @Override + public int getProtocolVersion() { + return 3; + } + + /** + *

Supplement to synchronization of public methods on current QueryExecutor.

+ * + *

Necessary for keeping the connection intact between calls to public methods sharing a state + * such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access + * point.

+ * + *

Public methods sharing that state must then be synchronized among themselves. Normal method + * synchronization typically suffices for that.

+ * + *

See notes on related methods as well as currentCopy() below.

+ */ + private Object lockedFor; + + /** + * Obtain lock over this connection for given object, blocking to wait if necessary. + * + * @param obtainer object that gets the lock. Normally current thread. + * @throws PSQLException when already holding the lock or getting interrupted. + */ + private void lock(Object obtainer) throws PSQLException { + if (lockedFor == obtainer) { + throw new PSQLException(GT.tr("Tried to obtain lock while already holding it"), + PSQLState.OBJECT_NOT_IN_STATE); + + } + waitOnLock(); + lockedFor = obtainer; + } + + /** + * Release lock on this connection presumably held by given object. + * + * @param holder object that holds the lock. Normally current thread. + * @throws PSQLException when this thread does not hold the lock + */ + private void unlock(Object holder) throws PSQLException { + if (lockedFor != holder) { + throw new PSQLException(GT.tr("Tried to break lock on database connection"), + PSQLState.OBJECT_NOT_IN_STATE); + } + lockedFor = null; + lockCondition.signal(); + } + + /** + * Wait until our lock is released. Execution of a single synchronized method can then continue + * without further ado. Must be called at beginning of each synchronized public method. + */ + private void waitOnLock() throws PSQLException { + while (lockedFor != null) { + try { + lockCondition.await(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new PSQLException( + GT.tr("Interrupted while waiting to obtain lock on database connection"), + PSQLState.OBJECT_NOT_IN_STATE, ie); + } + } + } + + /** + * @param holder object assumed to hold the lock + * @return whether given object actually holds the lock + */ + boolean hasLockOn(Object holder) { + try (ResourceLock ignore = lock.obtain()) { + return lockedFor == holder; + } + } + + /** + * @param holder object assumed to hold the lock + * @return whether given object actually holds the lock + */ + private boolean hasLock(Object holder) { + return lockedFor == holder; + } + + // + // Query parsing + // + + @Override + public Query createSimpleQuery(String sql) throws SQLException { + List queries = Parser.parseJdbcSql(sql, + getStandardConformingStrings(), false, true, + isReWriteBatchedInsertsEnabled(), getQuoteReturningIdentifiers()); + return wrap(queries); + } + + @Override + public Query wrap(List queries) { + if (queries.isEmpty()) { + // Empty query + return emptyQuery; + } + if (queries.size() == 1) { + NativeQuery firstQuery = queries.get(0); + if (isReWriteBatchedInsertsEnabled() + && firstQuery.getCommand().isBatchedReWriteCompatible()) { + int valuesBraceOpenPosition = + firstQuery.getCommand().getBatchRewriteValuesBraceOpenPosition(); + int valuesBraceClosePosition = + firstQuery.getCommand().getBatchRewriteValuesBraceClosePosition(); + return new BatchedQuery(firstQuery, this, valuesBraceOpenPosition, + valuesBraceClosePosition, isColumnSanitiserDisabled()); + } else { + return new SimpleQuery(firstQuery, this, isColumnSanitiserDisabled()); + } + } + + // Multiple statements. + SimpleQuery[] subqueries = new SimpleQuery[queries.size()]; + int[] offsets = new int[subqueries.length]; + int offset = 0; + for (int i = 0; i < queries.size(); i++) { + NativeQuery nativeQuery = queries.get(i); + offsets[i] = offset; + subqueries[i] = new SimpleQuery(nativeQuery, this, isColumnSanitiserDisabled()); + offset += nativeQuery.bindPositions.length; + } + + return new CompositeQuery(subqueries, offsets); + } + + // + // Query execution + // + + private int updateQueryMode(int flags) { + switch (getPreferQueryMode()) { + case SIMPLE: + return flags | QUERY_EXECUTE_AS_SIMPLE; + case EXTENDED: + return flags & ~QUERY_EXECUTE_AS_SIMPLE; + default: + return flags; + } + } + + @Override + public void execute(Query query, ParameterList parameters, + ResultHandler handler, + int maxRows, int fetchSize, int flags) throws SQLException { + execute(query, parameters, handler, maxRows, fetchSize, flags, false); + } + + @Override + public void execute(Query query, ParameterList parameters, + ResultHandler handler, + int maxRows, int fetchSize, int flags, boolean adaptiveFetch) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + waitOnLock(); + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " simple execute, handler={0}, maxRows={1}, fetchSize={2}, flags={3}", + new Object[]{handler, maxRows, fetchSize, flags}); + } + + if (parameters == null) { + parameters = SimpleQuery.NO_PARAMETERS; + } + + flags = updateQueryMode(flags); + + boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0; + + ((V3ParameterList) parameters).convertFunctionOutParameters(); + + // Check parameters are all set.. + if (!describeOnly) { + ((V3ParameterList) parameters).checkAllParametersSet(); + } + + boolean autosave = false; + try { + try { + handler = sendQueryPreamble(handler, flags); + autosave = sendAutomaticSavepoint(query, flags); + sendQuery(query, (V3ParameterList) parameters, maxRows, fetchSize, flags, + handler, null, adaptiveFetch); + if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) { + // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message + // on its own + } else { + sendSync(); + } + processResults(handler, flags, adaptiveFetch); + estimatedReceiveBufferBytes = 0; + } catch (PGBindException se) { + // There are three causes of this error, an + // invalid total Bind message length, a + // BinaryStream that cannot provide the amount + // of data claimed by the length argument, and + // a BinaryStream that throws an Exception + // when reading. + // + // We simply do not send the Execute message + // so we can just continue on as if nothing + // has happened. Perhaps we need to + // introduce an error here to force the + // caller to rollback if there is a + // transaction in progress? + // + sendSync(); + processResults(handler, flags, adaptiveFetch); + estimatedReceiveBufferBytes = 0; + handler + .handleError(new PSQLException(GT.tr("Unable to bind parameter values for statement."), + PSQLState.INVALID_PARAMETER_VALUE, se.getIOException())); + } + } catch (IOException e) { + abort(); + handler.handleError( + new PSQLException(GT.tr("An I/O error occurred while sending to the backend."), + PSQLState.CONNECTION_FAILURE, e)); + } + + try { + handler.handleCompletion(); + if (cleanupSavePoints) { + releaseSavePoint(autosave, flags); + } + } catch (SQLException e) { + rollbackIfRequired(autosave, e); + } + } + } + + private boolean sendAutomaticSavepoint(Query query, int flags) throws IOException { + if (((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) == 0 + || getTransactionState() == TransactionState.OPEN) + && query != restoreToAutoSave + && !"COMMIT".equalsIgnoreCase(query.getNativeSql()) + && getAutoSave() != AutoSave.NEVER + // If query has no resulting fields, it cannot fail with 'cached plan must not change result type' + // thus no need to set a savepoint before such query + && (getAutoSave() == AutoSave.ALWAYS + // If CompositeQuery is observed, just assume it might fail and set the savepoint + || !(query instanceof SimpleQuery) + || ((SimpleQuery) query).getFields() != null)) { + + /* + create a different SAVEPOINT the first time so that all subsequent SAVEPOINTS can be released + easily. There have been reports of server resources running out if there are too many + SAVEPOINTS. + */ + sendOneQuery(autoSaveQuery, SimpleQuery.NO_PARAMETERS, 1, 0, + QUERY_NO_RESULTS | QUERY_NO_METADATA + // PostgreSQL does not support bind, exec, simple, sync message flow, + // so we force autosavepoint to use simple if the main query is using simple + | QUERY_EXECUTE_AS_SIMPLE); + return true; + } + return false; + } + + private void releaseSavePoint(boolean autosave, int flags) throws SQLException { + if ( autosave + && getAutoSave() == AutoSave.ALWAYS + && getTransactionState() == TransactionState.OPEN) { + try { + sendOneQuery(releaseAutoSave, SimpleQuery.NO_PARAMETERS, 1, 0, + QUERY_NO_RESULTS | QUERY_NO_METADATA + | QUERY_EXECUTE_AS_SIMPLE); + + } catch (IOException ex) { + throw new PSQLException(GT.tr("Error releasing savepoint"), PSQLState.IO_ERROR); + } + } + } + + private void rollbackIfRequired(boolean autosave, SQLException e) throws SQLException { + if (autosave + && getTransactionState() == TransactionState.FAILED + && (getAutoSave() == AutoSave.ALWAYS || willHealOnRetry(e))) { + try { + // ROLLBACK and AUTOSAVE are executed as simple always to overcome "statement no longer exists S_xx" + execute(restoreToAutoSave, SimpleQuery.NO_PARAMETERS, new ResultHandlerDelegate(null), + 1, 0, QUERY_NO_RESULTS | QUERY_NO_METADATA | QUERY_EXECUTE_AS_SIMPLE); + } catch (SQLException e2) { + // That's O(N), sorry + e.setNextException(e2); + } + } + throw e; + } + + // Deadlock avoidance: + // + // It's possible for the send and receive streams to get "deadlocked" against each other since + // we do not have a separate thread. The scenario is this: we have two streams: + // + // driver -> TCP buffering -> server + // server -> TCP buffering -> driver + // + // The server behaviour is roughly: + // while true: + // read message + // execute message + // write results + // + // If the server -> driver stream has a full buffer, the write will block. + // If the driver is still writing when this happens, and the driver -> server + // stream also fills up, we deadlock: the driver is blocked on write() waiting + // for the server to read some more data, and the server is blocked on write() + // waiting for the driver to read some more data. + // + // To avoid this, we guess at how much response data we can request from the + // server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES). + // This is the point where the server blocks on write and stops reading data. If we + // reach this point, we force a Sync message and read pending data from the server + // until ReadyForQuery, then go back to writing more queries unless we saw an error. + // + // This is not 100% reliable -- it's only done in the batch-query case and only + // at a reasonably high level (per query, not per message), and it's only an estimate + // -- so it might break. To do it correctly in all cases would seem to require a + // separate send or receive thread as we can only do the Sync-and-read-results + // operation at particular points, and also as we don't really know how much data + // the server is sending. + // + // Our message size estimation is coarse, and disregards asynchronous + // notifications, warnings/info/debug messages, etc, so the response size may be + // quite different from the 250 bytes assumed here even for queries that don't + // return data. + // + // See github issue #194 and #195 . + // + // Assume 64k server->client buffering, which is extremely conservative. A typical + // system will have 200kb or more of buffers for its receive buffers, and the sending + // system will typically have the same on the send side, giving us 400kb or to work + // with. (We could check Java's receive buffer size, but prefer to assume a very + // conservative buffer instead, and we don't know how big the server's send + // buffer is.) + // + private static final int MAX_BUFFERED_RECV_BYTES = 64000; + private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250; + + @Override + public void execute(Query[] queries, ParameterList[] parameterLists, + BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags) throws SQLException { + execute(queries, parameterLists, batchHandler, maxRows, fetchSize, flags, false); + } + + @Override + public void execute(Query[] queries, ParameterList[] parameterLists, + BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags, boolean adaptiveFetch) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + waitOnLock(); + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " batch execute {0} queries, handler={1}, maxRows={2}, fetchSize={3}, flags={4}", + new Object[]{queries.length, batchHandler, maxRows, fetchSize, flags}); + } + + flags = updateQueryMode(flags); + + boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0; + // Check parameters and resolve OIDs. + if (!describeOnly) { + for (ParameterList parameterList : parameterLists) { + if (parameterList != null) { + ((V3ParameterList) parameterList).checkAllParametersSet(); + } + } + } + + boolean autosave = false; + ResultHandler handler = batchHandler; + try { + handler = sendQueryPreamble(batchHandler, flags); + autosave = sendAutomaticSavepoint(queries[0], flags); + estimatedReceiveBufferBytes = 0; + + for (int i = 0; i < queries.length; i++) { + Query query = queries[i]; + V3ParameterList parameters = (V3ParameterList) parameterLists[i]; + if (parameters == null) { + parameters = SimpleQuery.NO_PARAMETERS; + } + + sendQuery(query, parameters, maxRows, fetchSize, flags, handler, batchHandler, adaptiveFetch); + + if (handler.getException() != null) { + break; + } + } + + if (handler.getException() == null) { + if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) { + // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message + // on its own + } else { + sendSync(); + } + processResults(handler, flags, adaptiveFetch); + estimatedReceiveBufferBytes = 0; + } + } catch (IOException e) { + abort(); + handler.handleError( + new PSQLException(GT.tr("An I/O error occurred while sending to the backend."), + PSQLState.CONNECTION_FAILURE, e)); + } + + try { + handler.handleCompletion(); + if (cleanupSavePoints) { + releaseSavePoint(autosave, flags); + } + } catch (SQLException e) { + rollbackIfRequired(autosave, e); + } + } + } + + private ResultHandler sendQueryPreamble(final ResultHandler delegateHandler, int flags) + throws IOException { + // First, send CloseStatements for finalized SimpleQueries that had statement names assigned. + processDeadParsedQueries(); + processDeadPortals(); + + // Send BEGIN on first statement in transaction. + if ((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) != 0 + || getTransactionState() != TransactionState.IDLE) { + return delegateHandler; + } + + int beginFlags = QueryExecutor.QUERY_NO_METADATA; + if ((flags & QueryExecutor.QUERY_ONESHOT) != 0) { + beginFlags |= QueryExecutor.QUERY_ONESHOT; + } + + beginFlags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; + + beginFlags = updateQueryMode(beginFlags); + + final SimpleQuery beginQuery = (flags & QueryExecutor.QUERY_READ_ONLY_HINT) == 0 ? beginTransactionQuery : beginReadOnlyTransactionQuery; + + sendOneQuery(beginQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags); + + // Insert a handler that intercepts the BEGIN. + return new ResultHandlerDelegate(delegateHandler) { + private boolean sawBegin = false; + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + if (sawBegin) { + super.handleResultRows(fromQuery, fields, tuples, cursor); + } + } + + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + if (!sawBegin) { + sawBegin = true; + if (!"BEGIN".equals(status)) { + handleError(new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status), + PSQLState.PROTOCOL_VIOLATION)); + } + } else { + super.handleCommandStatus(status, updateCount, insertOID); + } + } + }; + } + + // + // Fastpath + // + + @Override + @SuppressWarnings("deprecation") + public byte [] fastpathCall(int fnid, ParameterList parameters, + boolean suppressBegin) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + waitOnLock(); + if (!suppressBegin) { + doSubprotocolBegin(); + } + try { + sendFastpathCall(fnid, (SimpleParameterList) parameters); + return receiveFastpathResult(); + } catch (IOException ioe) { + abort(); + throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + } + + public void doSubprotocolBegin() throws SQLException { + if (getTransactionState() == TransactionState.IDLE) { + + LOGGER.log(Level.FINEST, "Issuing BEGIN before fastpath or copy call."); + + ResultHandler handler = new ResultHandlerBase() { + private boolean sawBegin = false; + + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + if (!sawBegin) { + if (!"BEGIN".equals(status)) { + handleError( + new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status), + PSQLState.PROTOCOL_VIOLATION)); + } + sawBegin = true; + } else { + handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status), + PSQLState.PROTOCOL_VIOLATION)); + } + } + + @Override + public void handleWarning(SQLWarning warning) { + // we don't want to ignore warnings and it would be tricky + // to chain them back to the connection, so since we don't + // expect to get them in the first place, we just consider + // them errors. + handleError(warning); + } + }; + + try { + /* Send BEGIN with simple protocol preferred */ + int beginFlags = QueryExecutor.QUERY_NO_METADATA + | QueryExecutor.QUERY_ONESHOT + | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; + beginFlags = updateQueryMode(beginFlags); + sendOneQuery(beginTransactionQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags); + sendSync(); + processResults(handler, 0); + estimatedReceiveBufferBytes = 0; + } catch (IOException ioe) { + throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + + } + + @Override + @SuppressWarnings("deprecation") + public ParameterList createFastpathParameters(int count) { + return new SimpleParameterList(count, this); + } + + private void sendFastpathCall(int fnid, SimpleParameterList params) + throws SQLException, IOException { + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " FE=> FunctionCall({0}, {1} params)", new Object[]{fnid, params.getParameterCount()}); + } + + // + // Total size = 4 (length) + // + 4 (function OID) + // + 2 (format code count) + N * 2 (format codes) + // + 2 (parameter count) + encodedSize (parameters) + // + 2 (result format) + + int paramCount = params.getParameterCount(); + int encodedSize = 0; + for (int i = 1; i <= paramCount; i++) { + if (params.isNull(i)) { + encodedSize += 4; + } else { + encodedSize += 4 + params.getV3Length(i); + } + } + + pgStream.sendChar('F'); + pgStream.sendInteger4(4 + 4 + 2 + 2 * paramCount + 2 + encodedSize + 2); + pgStream.sendInteger4(fnid); + pgStream.sendInteger2(paramCount); + for (int i = 1; i <= paramCount; i++) { + pgStream.sendInteger2(params.isBinary(i) ? 1 : 0); + } + pgStream.sendInteger2(paramCount); + for (int i = 1; i <= paramCount; i++) { + if (params.isNull(i)) { + pgStream.sendInteger4(-1); + } else { + pgStream.sendInteger4(params.getV3Length(i)); // Parameter size + params.writeV3Value(i, pgStream); + } + } + pgStream.sendInteger2(1); // Binary result format + pgStream.flush(); + } + + // Just for API compatibility with previous versions. + @Override + public void processNotifies() throws SQLException { + processNotifies(-1); + } + + /** + * @param timeoutMillis when > 0, block for this time + * when =0, block forever + * when < 0, don't block + */ + @Override + public void processNotifies(int timeoutMillis) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + waitOnLock(); + // Asynchronous notifies only arrive when we are not in a transaction + if (getTransactionState() != TransactionState.IDLE) { + return; + } + + if (hasNotifications()) { + // No need to timeout when there are already notifications. We just check for more in this case. + timeoutMillis = -1; + } + + boolean useTimeout = timeoutMillis > 0; + long startTime = 0L; + int oldTimeout = 0; + if (useTimeout) { + startTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); + try { + oldTimeout = pgStream.getSocket().getSoTimeout(); + } catch (SocketException e) { + throw new PSQLException(GT.tr("An error occurred while trying to get the socket " + + "timeout."), PSQLState.CONNECTION_FAILURE, e); + } + } + + try { + while (timeoutMillis >= 0 || pgStream.hasMessagePending()) { + if (useTimeout && timeoutMillis >= 0) { + setSocketTimeout(timeoutMillis); + } + int c = pgStream.receiveChar(); + if (useTimeout && timeoutMillis >= 0) { + setSocketTimeout(0); // Don't timeout after first char + } + switch (c) { + case 'A': // Asynchronous Notify + receiveAsyncNotify(); + timeoutMillis = -1; + continue; + case 'E': + // Error Response (response to pretty much everything; backend then skips until Sync) + throw receiveErrorResponse(); + case 'N': // Notice Response (warnings / info) + SQLWarning warning = receiveNoticeResponse(); + addWarning(warning); + if (useTimeout) { + long newTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); + timeoutMillis = timeoutMillis + (int)(startTime - newTimeMillis); // Overflows after 49 days, ignore that + startTime = newTimeMillis; + if (timeoutMillis == 0) { + timeoutMillis = -1; // Don't accidentally wait forever + } + } + break; + default: + throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c), + PSQLState.CONNECTION_FAILURE); + } + } + } catch (SocketTimeoutException ioe) { + // No notifications this time... + } catch (IOException ioe) { + throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."), + PSQLState.CONNECTION_FAILURE, ioe); + } finally { + if (useTimeout) { + setSocketTimeout(oldTimeout); + } + } + } + } + + private void setSocketTimeout(int millis) throws PSQLException { + try { + Socket s = pgStream.getSocket(); + if (!s.isClosed()) { // Is this check required? + pgStream.setNetworkTimeout(millis); + } + } catch (IOException e) { + throw new PSQLException(GT.tr("An error occurred while trying to reset the socket timeout."), + PSQLState.CONNECTION_FAILURE, e); + } + } + + private byte [] receiveFastpathResult() throws IOException, SQLException { + boolean endQuery = false; + SQLException error = null; + byte[] returnValue = null; + + while (!endQuery) { + int c = pgStream.receiveChar(); + switch (c) { + case 'A': // Asynchronous Notify + receiveAsyncNotify(); + break; + + case 'E': + // Error Response (response to pretty much everything; backend then skips until Sync) + SQLException newError = receiveErrorResponse(); + if (error == null) { + error = newError; + } else { + error.setNextException(newError); + } + // keep processing + break; + + case 'N': // Notice Response (warnings / info) + SQLWarning warning = receiveNoticeResponse(); + addWarning(warning); + break; + + case 'Z': // Ready For Query (eventual response to Sync) + receiveRFQ(); + endQuery = true; + break; + + case 'V': // FunctionCallResponse + int msgLen = pgStream.receiveInteger4(); + int valueLen = pgStream.receiveInteger4(); + + LOGGER.log(Level.FINEST, " <=BE FunctionCallResponse({0} bytes)", valueLen); + + if (valueLen != -1) { + byte[] buf = new byte[valueLen]; + pgStream.receive(buf, 0, valueLen); + returnValue = buf; + } + + break; + + case 'S': // Parameter Status + try { + receiveParameterStatus(); + } catch (SQLException e) { + if (error == null) { + error = e; + } else { + error.setNextException(e); + } + endQuery = true; + } + break; + + default: + throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c), + PSQLState.CONNECTION_FAILURE); + } + + } + + // did we get an error during this query? + if (error != null) { + throw error; + } + + return returnValue; + } + + // + // Copy subprotocol implementation + // + + /** + * Sends given query to BE to start, initialize and lock connection for a CopyOperation. + * + * @param sql COPY FROM STDIN / COPY TO STDOUT statement + * @return CopyIn or CopyOut operation object + * @throws SQLException on failure + */ + @Override + public CopyOperation startCopy(String sql, boolean suppressBegin) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + waitOnLock(); + if (!suppressBegin) { + doSubprotocolBegin(); + } + byte[] buf = sql.getBytes(StandardCharsets.UTF_8); + + try { + LOGGER.log(Level.FINEST, " FE=> Query(CopyStart)"); + + pgStream.sendChar('Q'); + pgStream.sendInteger4(buf.length + 4 + 1); + pgStream.send(buf); + pgStream.sendChar(0); + pgStream.flush(); + + return processCopyResults(null, true); + // expect a CopyInResponse or CopyOutResponse to our query above + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Database connection failed when starting copy"), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + } + + /** + * Locks connection and calls initializer for a new CopyOperation Called via startCopy -> + * processCopyResults. + * + * @param op an uninitialized CopyOperation + * @throws SQLException on locking failure + * @throws IOException on database connection failure + */ + private void initCopy(CopyOperationImpl op) throws SQLException, IOException { + try (ResourceLock ignore = lock.obtain()) { + pgStream.receiveInteger4(); // length not used + int rowFormat = pgStream.receiveChar(); + int numFields = pgStream.receiveInteger2(); + int[] fieldFormats = new int[numFields]; + + for (int i = 0; i < numFields; i++) { + fieldFormats[i] = pgStream.receiveInteger2(); + } + + lock(op); + op.init(this, rowFormat, fieldFormats); + } + } + + /** + * Finishes a copy operation and unlocks connection discarding any exchanged data. + * + * @param op the copy operation presumably currently holding lock on this connection + * @throws SQLException on any additional failure + */ + public void cancelCopy(CopyOperationImpl op) throws SQLException { + if (!hasLock(op)) { + throw new PSQLException(GT.tr("Tried to cancel an inactive copy operation"), + PSQLState.OBJECT_NOT_IN_STATE); + } + + SQLException error = null; + int errors = 0; + + try { + if (op instanceof CopyIn) { + try (ResourceLock ignore = lock.obtain()) { + LOGGER.log(Level.FINEST, "FE => CopyFail"); + final byte[] msg = "Copy cancel requested".getBytes(StandardCharsets.US_ASCII); + pgStream.sendChar('f'); // CopyFail + pgStream.sendInteger4(5 + msg.length); + pgStream.send(msg); + pgStream.sendChar(0); + pgStream.flush(); + do { + try { + processCopyResults(op, true); // discard rest of input + } catch (SQLException se) { // expected error response to failing copy + errors++; + if (error != null) { + SQLException e = se; + SQLException next; + while ((next = e.getNextException()) != null) { + e = next; + } + e.setNextException(error); + } + error = se; + } + } while (hasLock(op)); + } + } else if (op instanceof CopyOut) { + sendQueryCancel(); + } + + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Database connection failed when canceling copy operation"), + PSQLState.CONNECTION_FAILURE, ioe); + } finally { + // Need to ensure the lock isn't held anymore, or else + // future operations, rather than failing due to the + // broken connection, will simply hang waiting for this + // lock. + try (ResourceLock ignore = lock.obtain()) { + if (hasLock(op)) { + unlock(op); + } + } + } + + if (op instanceof CopyIn) { + if (errors < 1) { + throw new PSQLException(GT.tr("Missing expected error response to copy cancel request"), + PSQLState.COMMUNICATION_ERROR); + } else if (errors > 1) { + throw new PSQLException( + GT.tr("Got {0} error responses to single copy cancel request", String.valueOf(errors)), + PSQLState.COMMUNICATION_ERROR, error); + } + } + } + + /** + * Finishes writing to copy and unlocks connection. + * + * @param op the copy operation presumably currently holding lock on this connection + * @return number of rows updated for server versions 8.2 or newer + * @throws SQLException on failure + */ + public long endCopy(CopyOperationImpl op) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (!hasLock(op)) { + throw new PSQLException(GT.tr("Tried to end inactive copy"), PSQLState.OBJECT_NOT_IN_STATE); + } + + try { + LOGGER.log(Level.FINEST, " FE=> CopyDone"); + + pgStream.sendChar('c'); // CopyDone + pgStream.sendInteger4(4); + pgStream.flush(); + + do { + processCopyResults(op, true); + } while (hasLock(op)); + return op.getHandledRowCount(); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Database connection failed when ending copy"), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + } + + /** + * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly + * returns CommandComplete, which should not happen + * + * @param op the CopyIn operation presumably currently holding lock on this connection + * @param data bytes to send + * @param off index of first byte to send (usually 0) + * @param siz number of bytes to send (usually data.length) + * @throws SQLException on failure + */ + public void writeToCopy(CopyOperationImpl op, byte[] data, int off, int siz) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (!hasLock(op)) { + throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"), + PSQLState.OBJECT_NOT_IN_STATE); + } + + LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz); + + try { + pgStream.sendChar('d'); + pgStream.sendInteger4(siz + 4); + pgStream.send(data, off, siz); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Database connection failed when writing to copy"), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + } + + /** + * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly + * returns CommandComplete, which should not happen + * + * @param op the CopyIn operation presumably currently holding lock on this connection + * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter + * @throws SQLException on failure + */ + public void writeToCopy(CopyOperationImpl op, ByteStreamWriter from) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (!hasLock(op)) { + throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"), + PSQLState.OBJECT_NOT_IN_STATE); + } + + int siz = from.getLength(); + LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz); + + try { + pgStream.sendChar('d'); + pgStream.sendInteger4(siz + 4); + pgStream.send(from); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Database connection failed when writing to copy"), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + } + + public void flushCopy(CopyOperationImpl op) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (!hasLock(op)) { + throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"), + PSQLState.OBJECT_NOT_IN_STATE); + } + + try { + pgStream.flush(); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Database connection failed when writing to copy"), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + } + + /** + * Wait for a row of data to be received from server on an active copy operation + * Connection gets unlocked by processCopyResults() at end of operation. + * + * @param op the copy operation presumably currently holding lock on this connection + * @param block whether to block waiting for input + * @throws SQLException on any failure + */ + void readFromCopy(CopyOperationImpl op, boolean block) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (!hasLock(op)) { + throw new PSQLException(GT.tr("Tried to read from inactive copy"), + PSQLState.OBJECT_NOT_IN_STATE); + } + + try { + processCopyResults(op, block); // expect a call to handleCopydata() to store the data + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Database connection failed when reading from copy"), + PSQLState.CONNECTION_FAILURE, ioe); + } + } + } + + AtomicBoolean processingCopyResults = new AtomicBoolean(false); + + /** + * Handles copy sub protocol responses from server. Unlocks at end of sub protocol, so operations + * on pgStream or QueryExecutor are not allowed in a method after calling this! + * + * @param block whether to block waiting for input + * @return CopyIn when COPY FROM STDIN starts; CopyOut when COPY TO STDOUT starts; null when copy + * ends; otherwise, the operation given as parameter. + * @throws SQLException in case of misuse + * @throws IOException from the underlying connection + */ + CopyOperationImpl processCopyResults(CopyOperationImpl op, boolean block) + throws SQLException, IOException { + + /* + * fixes issue #1592 where one thread closes the stream and another is reading it + */ + if (pgStream.isClosed()) { + throw new PSQLException(GT.tr("PGStream is closed"), + PSQLState.CONNECTION_DOES_NOT_EXIST); + } + /* + * This is a hack as we should not end up here, but sometimes do with large copy operations. + */ + if (!processingCopyResults.compareAndSet(false, true)) { + LOGGER.log(Level.INFO, "Ignoring request to process copy results, already processing"); + return null; + } + + // put this all in a try, finally block and reset the processingCopyResults in the finally clause + try { + boolean endReceiving = false; + SQLException error = null; + SQLException errors = null; + int len; + + while (!endReceiving && (block || pgStream.hasMessagePending())) { + + // There is a bug in the server's implementation of the copy + // protocol. It returns command complete immediately upon + // receiving the EOF marker in the binary protocol, + // potentially before we've issued CopyDone. When we are not + // blocking, we don't think we are done, so we hold off on + // processing command complete and any subsequent messages + // until we actually are done with the copy. + // + if (!block) { + int c = pgStream.peekChar(); + if (c == 'C') { + // CommandComplete + LOGGER.log(Level.FINEST, " <=BE CommandStatus, Ignored until CopyDone"); + break; + } + } + + int c = pgStream.receiveChar(); + switch (c) { + + case 'A': // Asynchronous Notify + + LOGGER.log(Level.FINEST, " <=BE Asynchronous Notification while copying"); + + receiveAsyncNotify(); + break; + + case 'N': // Notice Response + + LOGGER.log(Level.FINEST, " <=BE Notification while copying"); + + addWarning(receiveNoticeResponse()); + break; + + case 'C': // Command Complete + + String status = receiveCommandStatus(); + + try { + if (op == null) { + throw new PSQLException(GT + .tr("Received CommandComplete ''{0}'' without an active copy operation", status), + PSQLState.OBJECT_NOT_IN_STATE); + } + op.handleCommandStatus(status); + } catch (SQLException se) { + error = se; + } + + block = true; + break; + + case 'E': // ErrorMessage (expected response to CopyFail) + + error = receiveErrorResponse(); + // We've received the error and we now expect to receive + // Ready for query, but we must block because it might still be + // on the wire and not here yet. + block = true; + break; + + case 'G': // CopyInResponse + + LOGGER.log(Level.FINEST, " <=BE CopyInResponse"); + + if (op != null) { + error = new PSQLException(GT.tr("Got CopyInResponse from server during an active {0}", + op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE); + } + + op = new CopyInImpl(); + initCopy(op); + endReceiving = true; + break; + + case 'H': // CopyOutResponse + + LOGGER.log(Level.FINEST, " <=BE CopyOutResponse"); + + if (op != null) { + error = new PSQLException(GT.tr("Got CopyOutResponse from server during an active {0}", + op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE); + } + + op = new CopyOutImpl(); + initCopy(op); + endReceiving = true; + break; + + case 'W': // CopyBothResponse + + LOGGER.log(Level.FINEST, " <=BE CopyBothResponse"); + + if (op != null) { + error = new PSQLException(GT.tr("Got CopyBothResponse from server during an active {0}", + op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE); + } + + op = new CopyDualImpl(); + initCopy(op); + endReceiving = true; + break; + + case 'd': // CopyData + + LOGGER.log(Level.FINEST, " <=BE CopyData"); + + len = pgStream.receiveInteger4() - 4; + + assert len > 0 : "Copy Data length must be greater than 4"; + + byte[] buf = pgStream.receive(len); + if (op == null) { + error = new PSQLException(GT.tr("Got CopyData without an active copy operation"), + PSQLState.OBJECT_NOT_IN_STATE); + } else if (!(op instanceof CopyOut)) { + error = new PSQLException( + GT.tr("Unexpected copydata from server for {0}", op.getClass().getName()), + PSQLState.COMMUNICATION_ERROR); + } else { + op.handleCopydata(buf); + } + endReceiving = true; + break; + + case 'c': // CopyDone (expected after all copydata received) + + LOGGER.log(Level.FINEST, " <=BE CopyDone"); + + len = pgStream.receiveInteger4() - 4; + if (len > 0) { + pgStream.receive(len); // not in specification; should never appear + } + + if (!(op instanceof CopyOut)) { + error = new PSQLException("Got CopyDone while not copying from server", + PSQLState.OBJECT_NOT_IN_STATE); + } + + // keep receiving since we expect a CommandComplete + block = true; + break; + case 'S': // Parameter Status + try { + receiveParameterStatus(); + } catch (SQLException e) { + error = e; + endReceiving = true; + } + break; + + case 'Z': // ReadyForQuery: After FE:CopyDone => BE:CommandComplete + + receiveRFQ(); + if (op != null && hasLock(op)) { + unlock(op); + } + op = null; + endReceiving = true; + break; + + // If the user sends a non-copy query, we've got to handle some additional things. + // + case 'T': // Row Description (response to Describe) + LOGGER.log(Level.FINEST, " <=BE RowDescription (during copy ignored)"); + + skipMessage(); + break; + + case 'D': // DataRow + LOGGER.log(Level.FINEST, " <=BE DataRow (during copy ignored)"); + + skipMessage(); + break; + + default: + throw new IOException( + GT.tr("Unexpected packet type during copy: {0}", Integer.toString(c))); + } + + // Collect errors into a neat chain for completeness + if (error != null) { + if (errors != null) { + error.setNextException(errors); + } + errors = error; + error = null; + } + } + + if (errors != null) { + throw errors; + } + return op; + + } finally { + /* + reset here in the finally block to make sure it really is cleared + */ + processingCopyResults.set(false); + } + } + + /* + * To prevent client/server protocol deadlocks, we try to manage the estimated recv buffer size + * and force a sync +flush and process results if we think it might be getting too full. + * + * See the comments above MAX_BUFFERED_RECV_BYTES's declaration for details. + */ + private void flushIfDeadlockRisk(Query query, boolean disallowBatching, + ResultHandler resultHandler, + BatchResultHandler batchHandler, + final int flags) throws IOException { + // Assume all statements need at least this much reply buffer space, + // plus params + estimatedReceiveBufferBytes += NODATA_QUERY_RESPONSE_SIZE_BYTES; + + SimpleQuery sq = (SimpleQuery) query; + if (sq.isStatementDescribed()) { + /* + * Estimate the response size of the fields and add it to the expected response size. + * + * It's impossible for us to estimate the rowcount. We'll assume one row, as that's the common + * case for batches and we're leaving plenty of breathing room in this approach. It's still + * not deadlock-proof though; see pgjdbc github issues #194 and #195. + */ + int maxResultRowSize = sq.getMaxResultRowSize(); + if (maxResultRowSize >= 0) { + estimatedReceiveBufferBytes += maxResultRowSize; + } else { + LOGGER.log(Level.FINEST, "Couldn't estimate result size or result size unbounded, " + + "disabling batching for this query."); + disallowBatching = true; + } + } else { + /* + * We only describe a statement if we're expecting results from it, so it's legal to batch + * unprepared statements. We'll abort later if we get any uresults from them where none are + * expected. For now all we can do is hope the user told us the truth and assume that + * NODATA_QUERY_RESPONSE_SIZE_BYTES is enough to cover it. + */ + } + + if (disallowBatching || estimatedReceiveBufferBytes >= MAX_BUFFERED_RECV_BYTES) { + LOGGER.log(Level.FINEST, "Forcing Sync, receive buffer full or batching disallowed"); + sendSync(); + processResults(resultHandler, flags); + estimatedReceiveBufferBytes = 0; + if (batchHandler != null) { + batchHandler.secureProgress(); + } + } + + } + + /* + * Send a query to the backend. + */ + private void sendQuery(Query query, V3ParameterList parameters, int maxRows, int fetchSize, + int flags, ResultHandler resultHandler, + BatchResultHandler batchHandler, boolean adaptiveFetch) throws IOException, SQLException { + // Now the query itself. + Query[] subqueries = query.getSubqueries(); + SimpleParameterList[] subparams = parameters.getSubparams(); + + // We know this is deprecated, but still respect it in case anyone's using it. + // PgJDBC its self no longer does. + @SuppressWarnings("deprecation") + boolean disallowBatching = (flags & QueryExecutor.QUERY_DISALLOW_BATCHING) != 0; + + if (subqueries == null) { + flushIfDeadlockRisk(query, disallowBatching, resultHandler, batchHandler, flags); + + // If we saw errors, don't send anything more. + if (resultHandler.getException() == null) { + if (fetchSize != 0) { + adaptiveFetchCache.addNewQuery(adaptiveFetch, query); + } + sendOneQuery((SimpleQuery) query, (SimpleParameterList) parameters, maxRows, fetchSize, + flags); + } + } else { + for (int i = 0; i < subqueries.length; i++) { + final Query subquery = subqueries[i]; + flushIfDeadlockRisk(subquery, disallowBatching, resultHandler, batchHandler, flags); + + // If we saw errors, don't send anything more. + if (resultHandler.getException() != null) { + break; + } + + // In the situation where parameters is already + // NO_PARAMETERS it cannot know the correct + // number of array elements to return in the + // above call to getSubparams(), so it must + // return null which we check for here. + // + SimpleParameterList subparam = SimpleQuery.NO_PARAMETERS; + if (subparams != null) { + subparam = subparams[i]; + } + if (fetchSize != 0) { + adaptiveFetchCache.addNewQuery(adaptiveFetch, subquery); + } + sendOneQuery((SimpleQuery) subquery, subparam, maxRows, fetchSize, flags); + } + } + } + + // + // Message sending + // + + private void sendSync() throws IOException { + LOGGER.log(Level.FINEST, " FE=> Sync"); + + pgStream.sendChar('S'); // Sync + pgStream.sendInteger4(4); // Length + pgStream.flush(); + // Below "add queues" are likely not required at all + pendingExecuteQueue.add(new ExecuteRequest(sync, null, true)); + pendingDescribePortalQueue.add(sync); + } + + private void sendParse(SimpleQuery query, SimpleParameterList params, boolean oneShot) + throws IOException { + // Already parsed, or we have a Parse pending and the types are right? + int[] typeOIDs = params.getTypeOIDs(); + if (query.isPreparedFor(typeOIDs, deallocateEpoch)) { + return; + } + + // Clean up any existing statement, as we can't use it. + query.unprepare(); + processDeadParsedQueries(); + + // Remove any cached Field values. The re-parsed query might report different + // fields because input parameter types may result in different type inferences + // for unspecified types. + query.setFields(null); + + String statementName = null; + if (!oneShot) { + // Generate a statement name to use. + statementName = "S_" + (nextUniqueID++); + + // And prepare the new statement. + // NB: Must clone the OID array, as it's a direct reference to + // the SimpleParameterList's internal array that might be modified + // under us. + query.setStatementName(statementName, deallocateEpoch); + query.setPrepareTypes(typeOIDs); + registerParsedQuery(query, statementName); + } + + byte[] encodedStatementName = query.getEncodedStatementName(); + String nativeSql = query.getNativeSql(); + + if (LOGGER.isLoggable(Level.FINEST)) { + StringBuilder sbuf = new StringBuilder(" FE=> Parse(stmt=" + statementName + ",query=\""); + sbuf.append(nativeSql); + sbuf.append("\",oids={"); + for (int i = 1; i <= params.getParameterCount(); i++) { + if (i != 1) { + sbuf.append(","); + } + sbuf.append(params.getTypeOID(i)); + } + sbuf.append("})"); + LOGGER.log(Level.FINEST, sbuf.toString()); + } + + // + // Send Parse. + // + + byte[] queryUtf8 = nativeSql.getBytes(StandardCharsets.UTF_8); + + // Total size = 4 (size field) + // + N + 1 (statement name, zero-terminated) + // + N + 1 (query, zero terminated) + // + 2 (parameter count) + N * 4 (parameter types) + int encodedSize = 4 + + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1 + + queryUtf8.length + 1 + + 2 + 4 * params.getParameterCount(); + + pgStream.sendChar('P'); // Parse + pgStream.sendInteger4(encodedSize); + if (encodedStatementName != null) { + pgStream.send(encodedStatementName); + } + pgStream.sendChar(0); // End of statement name + pgStream.send(queryUtf8); // Query string + pgStream.sendChar(0); // End of query string. + pgStream.sendInteger2(params.getParameterCount()); // # of parameter types specified + for (int i = 1; i <= params.getParameterCount(); i++) { + pgStream.sendInteger4(params.getTypeOID(i)); + } + + pendingParseQueue.add(query); + } + + private void sendBind(SimpleQuery query, SimpleParameterList params, Portal portal, + boolean noBinaryTransfer) throws IOException { + // + // Send Bind. + // + + String statementName = query.getStatementName(); + byte[] encodedStatementName = query.getEncodedStatementName(); + byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName(); + + if (LOGGER.isLoggable(Level.FINEST)) { + StringBuilder sbuf = new StringBuilder(" FE=> Bind(stmt=" + statementName + ",portal=" + portal); + for (int i = 1; i <= params.getParameterCount(); i++) { + sbuf.append(",$").append(i).append("=<") + .append(params.toString(i, true)) + .append(">,type=").append(Oid.toString(params.getTypeOID(i))); + } + sbuf.append(")"); + LOGGER.log(Level.FINEST, sbuf.toString()); + } + + // Total size = 4 (size field) + N + 1 (destination portal) + // + N + 1 (statement name) + // + 2 (param format code count) + N * 2 (format codes) + // + 2 (param value count) + N (encoded param value size) + // + 2 (result format code count, 0) + long encodedSize = 0; + for (int i = 1; i <= params.getParameterCount(); i++) { + if (params.isNull(i)) { + encodedSize += 4; + } else { + encodedSize += (long) 4 + params.getV3Length(i); + } + } + + Field[] fields = query.getFields(); + if (!noBinaryTransfer && query.needUpdateFieldFormats() && fields != null) { + for (Field field : fields) { + if (useBinary(field)) { + field.setFormat(Field.BINARY_FORMAT); + query.setHasBinaryFields(true); + } + } + } + // If text-only results are required (e.g. updateable resultset), and the query has binary columns, + // flip to text format. + if (noBinaryTransfer && query.hasBinaryFields() && fields != null) { + for (Field field : fields) { + if (field.getFormat() != Field.TEXT_FORMAT) { + field.setFormat(Field.TEXT_FORMAT); + } + } + query.resetNeedUpdateFieldFormats(); + query.setHasBinaryFields(false); + } + + // This is not the number of binary fields, but the total number + // of fields if any of them are binary or zero if all of them + // are text. + int numBinaryFields = !noBinaryTransfer && query.hasBinaryFields() && fields != null + ? fields.length : 0; + + encodedSize = 4 + + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1 + + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1 + + 2 + params.getParameterCount() * 2 + + 2 + encodedSize + + 2 + numBinaryFields * 2; + + // backend's MaxAllocSize is the largest message that can + // be received from a client. If we have a bigger value + // from either very large parameters or incorrect length + // descriptions of setXXXStream we do not send the bind + // message. + // + if (encodedSize > 0x3fffffff) { + throw new PGBindException(new IOException(GT.tr( + "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters.", + encodedSize))); + } + + pgStream.sendChar('B'); // Bind + pgStream.sendInteger4((int) encodedSize); // Message size + if (encodedPortalName != null) { + pgStream.send(encodedPortalName); // Destination portal name. + } + pgStream.sendChar(0); // End of portal name. + if (encodedStatementName != null) { + pgStream.send(encodedStatementName); // Source statement name. + } + pgStream.sendChar(0); // End of statement name. + + pgStream.sendInteger2(params.getParameterCount()); // # of parameter format codes + for (int i = 1; i <= params.getParameterCount(); i++) { + pgStream.sendInteger2(params.isBinary(i) ? 1 : 0); // Parameter format code + } + + pgStream.sendInteger2(params.getParameterCount()); // # of parameter values + + // If an error occurs when reading a stream we have to + // continue pumping out data to match the length we + // said we would. Once we've done that we throw + // this exception. Multiple exceptions can occur and + // it really doesn't matter which one is reported back + // to the caller. + // + PGBindException bindException = null; + + for (int i = 1; i <= params.getParameterCount(); i++) { + if (params.isNull(i)) { + pgStream.sendInteger4(-1); // Magic size of -1 means NULL + } else { + pgStream.sendInteger4(params.getV3Length(i)); // Parameter size + try { + params.writeV3Value(i, pgStream); // Parameter value + } catch (PGBindException be) { + bindException = be; + } + } + } + + pgStream.sendInteger2(numBinaryFields); // # of result format codes + for (int i = 0; fields != null && i < numBinaryFields; i++) { + pgStream.sendInteger2(fields[i].getFormat()); + } + + pendingBindQueue.add(portal == null ? UNNAMED_PORTAL : portal); + + if (bindException != null) { + throw bindException; + } + } + + /** + * Returns true if the specified field should be retrieved using binary encoding. + * + * @param field The field whose Oid type to analyse. + * @return True if {@link Field#BINARY_FORMAT} should be used, false if + * {@link Field#BINARY_FORMAT}. + */ + private boolean useBinary(Field field) { + int oid = field.getOID(); + return useBinaryForReceive(oid); + } + + private void sendDescribePortal(SimpleQuery query, Portal portal) throws IOException { + // + // Send Describe. + // + + LOGGER.log(Level.FINEST, " FE=> Describe(portal={0})", portal); + + byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName(); + + // Total size = 4 (size field) + 1 (describe type, 'P') + N + 1 (portal name) + int encodedSize = 4 + 1 + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1; + + pgStream.sendChar('D'); // Describe + pgStream.sendInteger4(encodedSize); // message size + pgStream.sendChar('P'); // Describe (Portal) + if (encodedPortalName != null) { + pgStream.send(encodedPortalName); // portal name to close + } + pgStream.sendChar(0); // end of portal name + + pendingDescribePortalQueue.add(query); + query.setPortalDescribed(true); + } + + private void sendDescribeStatement(SimpleQuery query, SimpleParameterList params, + boolean describeOnly) throws IOException { + // Send Statement Describe + + LOGGER.log(Level.FINEST, " FE=> Describe(statement={0})", query.getStatementName()); + + byte[] encodedStatementName = query.getEncodedStatementName(); + + // Total size = 4 (size field) + 1 (describe type, 'S') + N + 1 (portal name) + int encodedSize = 4 + 1 + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1; + + pgStream.sendChar('D'); // Describe + pgStream.sendInteger4(encodedSize); // Message size + pgStream.sendChar('S'); // Describe (Statement); + if (encodedStatementName != null) { + pgStream.send(encodedStatementName); // Statement name + } + pgStream.sendChar(0); // end message + + // Note: statement name can change over time for the same query object + // Thus we take a snapshot of the query name + pendingDescribeStatementQueue.add( + new DescribeRequest(query, params, describeOnly, query.getStatementName())); + pendingDescribePortalQueue.add(query); + query.setStatementDescribed(true); + query.setPortalDescribed(true); + } + + private void sendExecute(SimpleQuery query, Portal portal, int limit) + throws IOException { + // + // Send Execute. + // + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " FE=> Execute(portal={0},limit={1})", new Object[]{portal, limit}); + } + + byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName(); + int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length; + + // Total size = 4 (size field) + 1 + N (source portal) + 4 (max rows) + pgStream.sendChar('E'); // Execute + pgStream.sendInteger4(4 + 1 + encodedSize + 4); // message size + if (encodedPortalName != null) { + pgStream.send(encodedPortalName); // portal name + } + pgStream.sendChar(0); // portal name terminator + pgStream.sendInteger4(limit); // row limit + + pendingExecuteQueue.add(new ExecuteRequest(query, portal, false)); + } + + private void sendClosePortal(String portalName) throws IOException { + // + // Send Close. + // + + LOGGER.log(Level.FINEST, " FE=> ClosePortal({0})", portalName); + + byte[] encodedPortalName = portalName == null ? null : portalName.getBytes(StandardCharsets.UTF_8); + int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length; + + // Total size = 4 (size field) + 1 (close type, 'P') + 1 + N (portal name) + pgStream.sendChar('C'); // Close + pgStream.sendInteger4(4 + 1 + 1 + encodedSize); // message size + pgStream.sendChar('P'); // Close (Portal) + if (encodedPortalName != null) { + pgStream.send(encodedPortalName); + } + pgStream.sendChar(0); // unnamed portal + } + + private void sendCloseStatement(String statementName) throws IOException { + // + // Send Close. + // + + LOGGER.log(Level.FINEST, " FE=> CloseStatement({0})", statementName); + + byte[] encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8); + + // Total size = 4 (size field) + 1 (close type, 'S') + N + 1 (statement name) + pgStream.sendChar('C'); // Close + pgStream.sendInteger4(4 + 1 + encodedStatementName.length + 1); // message size + pgStream.sendChar('S'); // Close (Statement) + pgStream.send(encodedStatementName); // statement to close + pgStream.sendChar(0); // statement name terminator + } + + // sendOneQuery sends a single statement via the extended query protocol. + // Per the FE/BE docs this is essentially the same as how a simple query runs + // (except that it generates some extra acknowledgement messages, and we + // can send several queries before doing the Sync) + // + // Parse S_n from "query string with parameter placeholders"; skipped if already done previously + // or if oneshot + // Bind C_n from S_n plus parameters (or from unnamed statement for oneshot queries) + // Describe C_n; skipped if caller doesn't want metadata + // Execute C_n with maxRows limit; maxRows = 1 if caller doesn't want results + // (above repeats once per call to sendOneQuery) + // Sync (sent by caller) + // + private void sendOneQuery(SimpleQuery query, SimpleParameterList params, int maxRows, + int fetchSize, int flags) throws IOException { + boolean asSimple = (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0; + if (asSimple) { + assert (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) == 0 + : "Simple mode does not support describe requests. sql = " + query.getNativeSql() + + ", flags = " + flags; + sendSimpleQuery(query, params); + return; + } + + assert !query.getNativeQuery().multiStatement + : "Queries that might contain ; must be executed with QueryExecutor.QUERY_EXECUTE_AS_SIMPLE mode. " + + "Given query is " + query.getNativeSql(); + + // Per https://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY + // A Bind message can use the unnamed prepared statement to create a named portal. + // If the Bind is successful, an Execute message can reference that named portal until either + // the end of the current transaction + // or the named portal is explicitly destroyed + + boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0; + boolean noMeta = (flags & QueryExecutor.QUERY_NO_METADATA) != 0; + boolean describeOnly = (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) != 0; + // extended queries always use a portal + // the usePortal flag controls whether or not we use a *named* portal + boolean usePortal = (flags & QueryExecutor.QUERY_FORWARD_CURSOR) != 0 && !noResults && !noMeta + && fetchSize > 0 && !describeOnly; + boolean oneShot = (flags & QueryExecutor.QUERY_ONESHOT) != 0; + boolean noBinaryTransfer = (flags & QUERY_NO_BINARY_TRANSFER) != 0; + boolean forceDescribePortal = (flags & QUERY_FORCE_DESCRIBE_PORTAL) != 0; + + // Work out how many rows to fetch in this pass. + + int rows; + if (noResults) { + rows = 1; // We're discarding any results anyway, so limit data transfer to a minimum + } else if (!usePortal) { + rows = maxRows; // Not using a portal -- fetchSize is irrelevant + } else if (maxRows != 0 && fetchSize > maxRows) { + // fetchSize > maxRows, use maxRows (nb: fetchSize cannot be 0 if usePortal == true) + rows = maxRows; + } else { + rows = fetchSize; // maxRows > fetchSize + } + + sendParse(query, params, oneShot); + + // Must do this after sendParse to pick up any changes to the + // query's state. + // + boolean queryHasUnknown = query.hasUnresolvedTypes(); + boolean paramsHasUnknown = params.hasUnresolvedTypes(); + + boolean describeStatement = describeOnly + || (!oneShot && paramsHasUnknown && queryHasUnknown && !query.isStatementDescribed()); + + if (!describeStatement && paramsHasUnknown && !queryHasUnknown) { + int[] queryOIDs = query.getPrepareTypes(); + int[] paramOIDs = params.getTypeOIDs(); + for (int i = 0; i < paramOIDs.length; i++) { + // Only supply type information when there isn't any + // already, don't arbitrarily overwrite user supplied + // type information. + if (paramOIDs[i] == Oid.UNSPECIFIED) { + params.setResolvedType(i + 1, queryOIDs[i]); + } + } + } + + if (describeStatement) { + sendDescribeStatement(query, params, describeOnly); + if (describeOnly) { + return; + } + } + + // Construct a new portal if needed. + Portal portal = null; + if (usePortal) { + String portalName = "C_" + (nextUniqueID++); + portal = new Portal(query, portalName); + } + + sendBind(query, params, portal, noBinaryTransfer); + + // A statement describe will also output a RowDescription, + // so don't reissue it here if we've already done so. + // + if (!noMeta && !describeStatement) { + /* + * don't send describe if we already have cached the row description from previous executions + * + * XXX Clearing the fields / unpreparing the query (in sendParse) is incorrect, see bug #267. + * We might clear the cached fields in a later execution of this query if the bind parameter + * types change, but we're assuming here that they'll still be valid when we come to process + * the results of this query, so we don't send a new describe here. We re-describe after the + * fields are cleared, but the result of that gets processed after processing the results from + * earlier executions that we didn't describe because we didn't think we had to. + * + * To work around this, force a Describe at each execution in batches where this can be a + * problem. It won't cause more round trips so the performance impact is low, and it'll ensure + * that the field information available when we decoded the results. This is undeniably a + * hack, but there aren't many good alternatives. + */ + if (!query.isPortalDescribed() || forceDescribePortal) { + sendDescribePortal(query, portal); + } + } + + sendExecute(query, portal, rows); + } + + private void sendSimpleQuery(SimpleQuery query, SimpleParameterList params) throws IOException { + String nativeSql = query.toString(params); + + LOGGER.log(Level.FINEST, " FE=> SimpleQuery(query=\"{0}\")", nativeSql); + Encoding encoding = pgStream.getEncoding(); + + byte[] encoded = encoding.encode(nativeSql); + pgStream.sendChar('Q'); + pgStream.sendInteger4(encoded.length + 4 + 1); + pgStream.send(encoded); + pgStream.sendChar(0); + pgStream.flush(); + pendingExecuteQueue.add(new ExecuteRequest(query, null, true)); + pendingDescribePortalQueue.add(query); + } + + // + // Garbage collection of parsed statements. + // + // When a statement is successfully parsed, registerParsedQuery is called. + // This creates a PhantomReference referring to the "owner" of the statement + // (the originating Query object) and inserts that reference as a key in + // parsedQueryMap. The values of parsedQueryMap are the corresponding allocated + // statement names. The originating Query object also holds a reference to the + // PhantomReference. + // + // When the owning Query object is closed, it enqueues and clears the associated + // PhantomReference. + // + // If the owning Query object becomes unreachable (see java.lang.ref javadoc) before + // being closed, the corresponding PhantomReference is enqueued on + // parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued + // when a GC occurs, so this is not necessarily prompt but should eventually happen. + // + // Periodically (currently, just before query execution), the parsedQueryCleanupQueue + // is polled. For each enqueued PhantomReference we find, we remove the corresponding + // entry from parsedQueryMap, obtaining the name of the underlying statement in the + // process. Then we send a message to the backend to deallocate that statement. + // + + private final HashMap, String> parsedQueryMap = + new HashMap<>(); + private final ReferenceQueue parsedQueryCleanupQueue = + new ReferenceQueue<>(); + + private void registerParsedQuery(SimpleQuery query, String statementName) { + if (statementName == null) { + return; + } + + PhantomReference cleanupRef = + new PhantomReference<>(query, parsedQueryCleanupQueue); + parsedQueryMap.put(cleanupRef, statementName); + query.setCleanupRef(cleanupRef); + } + + private void processDeadParsedQueries() throws IOException { + Reference deadQuery; + while ((deadQuery = parsedQueryCleanupQueue.poll()) != null) { + String statementName = parsedQueryMap.remove(deadQuery); + sendCloseStatement(statementName); + deadQuery.clear(); + } + } + + // + // Essentially the same strategy is used for the cleanup of portals. + // Note that each Portal holds a reference to the corresponding Query + // that generated it, so the Query won't be collected (and the statement + // closed) until all the Portals are, too. This is required by the mechanics + // of the backend protocol: when a statement is closed, all dependent portals + // are also closed. + // + + private final HashMap, String> openPortalMap = + new HashMap<>(); + private final ReferenceQueue openPortalCleanupQueue = new ReferenceQueue<>(); + + private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed"); + + private void registerOpenPortal(Portal portal) { + if (portal == UNNAMED_PORTAL) { + return; // Using the unnamed portal. + } + + String portalName = portal.getPortalName(); + PhantomReference cleanupRef = + new PhantomReference<>(portal, openPortalCleanupQueue); + openPortalMap.put(cleanupRef, portalName); + portal.setCleanupRef(cleanupRef); + } + + private void processDeadPortals() throws IOException { + Reference deadPortal; + while ((deadPortal = openPortalCleanupQueue.poll()) != null) { + String portalName = openPortalMap.remove(deadPortal); + sendClosePortal(portalName); + deadPortal.clear(); + } + } + + protected void processResults(ResultHandler handler, int flags) throws IOException { + processResults(handler, flags, false); + } + + protected void processResults(ResultHandler handler, int flags, boolean adaptiveFetch) + throws IOException { + boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0; + boolean bothRowsAndStatus = (flags & QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS) != 0; + + List tuples = null; + + int c; + boolean endQuery = false; + + // At the end of a command execution we have the CommandComplete + // message to tell us we're done, but with a describeOnly command + // we have no real flag to let us know we're done. We've got to + // look for the next RowDescription or NoData message and return + // from there. + boolean doneAfterRowDescNoData = false; + + while (!endQuery) { + c = pgStream.receiveChar(); + switch (c) { + case 'A': // Asynchronous Notify + receiveAsyncNotify(); + break; + + case '1': // Parse Complete (response to Parse) + pgStream.receiveInteger4(); // len, discarded + + SimpleQuery parsedQuery = pendingParseQueue.removeFirst(); + String parsedStatementName = parsedQuery.getStatementName(); + + LOGGER.log(Level.FINEST, " <=BE ParseComplete [{0}]", parsedStatementName); + + break; + + case 't': { // ParameterDescription + pgStream.receiveInteger4(); // len, discarded + + LOGGER.log(Level.FINEST, " <=BE ParameterDescription"); + + DescribeRequest describeData = pendingDescribeStatementQueue.getFirst(); + SimpleQuery query = describeData.query; + SimpleParameterList params = describeData.parameterList; + boolean describeOnly = describeData.describeOnly; + // This might differ from query.getStatementName if the query was re-prepared + String origStatementName = describeData.statementName; + + int numParams = pgStream.receiveInteger2(); + + for (int i = 1; i <= numParams; i++) { + int typeOid = pgStream.receiveInteger4(); + params.setResolvedType(i, typeOid); + } + + // Since we can issue multiple Parse and DescribeStatement + // messages in a single network trip, we need to make + // sure the describe results we requested are still + // applicable to the latest parsed query. + // + if ((origStatementName == null && query.getStatementName() == null) + || (origStatementName != null + && origStatementName.equals(query.getStatementName()))) { + query.setPrepareTypes(params.getTypeOIDs()); + } + + if (describeOnly) { + doneAfterRowDescNoData = true; + } else { + pendingDescribeStatementQueue.removeFirst(); + } + break; + } + + case '2': // Bind Complete (response to Bind) + pgStream.receiveInteger4(); // len, discarded + + Portal boundPortal = pendingBindQueue.removeFirst(); + LOGGER.log(Level.FINEST, " <=BE BindComplete [{0}]", boundPortal); + + registerOpenPortal(boundPortal); + break; + + case '3': // Close Complete (response to Close) + pgStream.receiveInteger4(); // len, discarded + LOGGER.log(Level.FINEST, " <=BE CloseComplete"); + break; + + case 'n': // No Data (response to Describe) + pgStream.receiveInteger4(); // len, discarded + LOGGER.log(Level.FINEST, " <=BE NoData"); + + pendingDescribePortalQueue.removeFirst(); + + if (doneAfterRowDescNoData) { + DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst(); + SimpleQuery currentQuery = describeData.query; + + Field[] fields = currentQuery.getFields(); + + if (fields != null) { // There was a resultset. + tuples = new ArrayList<>(); + handler.handleResultRows(currentQuery, fields, tuples, null); + tuples = null; + } + } + break; + + case 's': { // Portal Suspended (end of Execute) + // nb: this appears *instead* of CommandStatus. + // Must be a SELECT if we suspended, so don't worry about it. + + pgStream.receiveInteger4(); // len, discarded + LOGGER.log(Level.FINEST, " <=BE PortalSuspended"); + + ExecuteRequest executeData = pendingExecuteQueue.removeFirst(); + SimpleQuery currentQuery = executeData.query; + Portal currentPortal = executeData.portal; + + if (currentPortal != null) { + // Existence of portal defines if query was using fetching. + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes()); + } + pgStream.clearMaxRowSizeBytes(); + + Field[] fields = currentQuery.getFields(); + if (fields != null && tuples == null) { + // When no results expected, pretend an empty resultset was returned + // Not sure if new ArrayList can be always replaced with emptyList + tuples = noResults ? Collections.emptyList() : new ArrayList(); + } + + if (fields != null && tuples != null) { + handler.handleResultRows(currentQuery, fields, tuples, currentPortal); + } + tuples = null; + break; + } + + case 'C': { // Command Status (end of Execute) + // Handle status. + String status = receiveCommandStatus(); + if (isFlushCacheOnDeallocate() + && (status.startsWith("DEALLOCATE ALL") || status.startsWith("DISCARD ALL"))) { + deallocateEpoch++; + } + + doneAfterRowDescNoData = false; + + ExecuteRequest executeData = pendingExecuteQueue.peekFirst(); + SimpleQuery currentQuery = executeData.query; + Portal currentPortal = executeData.portal; + + if (currentPortal != null) { + // Existence of portal defines if query was using fetching. + + // Command executed, adaptive fetch size can be removed for this query, max row size can be cleared + adaptiveFetchCache.removeQuery(adaptiveFetch, currentQuery); + // Update to change fetch size for other fetch portals of this query + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes()); + } + pgStream.clearMaxRowSizeBytes(); + + if (status.startsWith("SET")) { + String nativeSql = currentQuery.getNativeQuery().nativeSql; + // Scan only the first 1024 characters to + // avoid big overhead for long queries. + if (nativeSql.lastIndexOf("search_path", 1024) != -1 + && !nativeSql.equals(lastSetSearchPathQuery)) { + // Search path was changed, invalidate prepared statement cache + lastSetSearchPathQuery = nativeSql; + deallocateEpoch++; + } + } + + if (!executeData.asSimple) { + pendingExecuteQueue.removeFirst(); + } else { + // For simple 'Q' queries, executeQueue is cleared via ReadyForQuery message + } + + // we want to make sure we do not add any results from these queries to the result set + if (currentQuery == autoSaveQuery + || currentQuery == releaseAutoSave) { + // ignore "SAVEPOINT" or RELEASE SAVEPOINT status from autosave query + break; + } + + Field[] fields = currentQuery.getFields(); + if (fields != null && tuples == null) { + // When no results expected, pretend an empty resultset was returned + // Not sure if new ArrayList can be always replaced with emptyList + tuples = noResults ? Collections.emptyList() : new ArrayList(); + } + + // If we received tuples we must know the structure of the + // resultset, otherwise we won't be able to fetch columns + // from it, etc, later. + if (fields == null && tuples != null) { + throw new IllegalStateException( + "Received resultset tuples, but no field structure for them"); + } + + if (fields != null && tuples != null) { + // There was a resultset. + handler.handleResultRows(currentQuery, fields, tuples, null); + tuples = null; + + if (bothRowsAndStatus) { + interpretCommandStatus(status, handler); + } + } else { + interpretCommandStatus(status, handler); + } + + if (executeData.asSimple) { + // Simple queries might return several resultsets, thus we clear + // fields, so queries like "select 1;update; select2" will properly + // identify that "update" did not return any results + currentQuery.setFields(null); + } + + if (currentPortal != null) { + currentPortal.close(); + } + break; + } + + case 'D': // Data Transfer (ongoing Execute response) + Tuple tuple = null; + try { + tuple = pgStream.receiveTupleV3(); + } catch (OutOfMemoryError oome) { + if (!noResults) { + handler.handleError( + new PSQLException(GT.tr("Ran out of memory retrieving query results."), + PSQLState.OUT_OF_MEMORY, oome)); + } + } catch (SQLException e) { + handler.handleError(e); + } + if (!noResults) { + if (tuples == null) { + tuples = new ArrayList<>(); + } + if (tuple != null) { + tuples.add(tuple); + } + } + + if (LOGGER.isLoggable(Level.FINEST)) { + int length; + if (tuple == null) { + length = -1; + } else { + length = tuple.length(); + } + LOGGER.log(Level.FINEST, " <=BE DataRow(len={0})", length); + } + + break; + + case 'E': + // Error Response (response to pretty much everything; backend then skips until Sync) + SQLException error = receiveErrorResponse(); + handler.handleError(error); + if (willHealViaReparse(error)) { + // prepared statement ... is not valid kind of error + // Technically speaking, the error is unexpected, thus we invalidate other + // server-prepared statements just in case. + deallocateEpoch++; + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " FE: received {0}, will invalidate statements. deallocateEpoch is now {1}", + new Object[]{error.getSQLState(), deallocateEpoch}); + } + } + // keep processing + break; + + case 'I': { // Empty Query (end of Execute) + pgStream.receiveInteger4(); + + LOGGER.log(Level.FINEST, " <=BE EmptyQuery"); + + ExecuteRequest executeData = pendingExecuteQueue.removeFirst(); + Portal currentPortal = executeData.portal; + handler.handleCommandStatus("EMPTY", 0, 0); + if (currentPortal != null) { + currentPortal.close(); + } + break; + } + + case 'N': // Notice Response + SQLWarning warning = receiveNoticeResponse(); + handler.handleWarning(warning); + break; + + case 'S': // Parameter Status + try { + receiveParameterStatus(); + } catch (SQLException e) { + handler.handleError(e); + endQuery = true; + } + break; + + case 'T': // Row Description (response to Describe) + Field[] fields = receiveFields(); + tuples = new ArrayList<>(); + + SimpleQuery query = pendingDescribePortalQueue.peekFirst(); + if (!pendingExecuteQueue.isEmpty() + && !pendingExecuteQueue.peekFirst().asSimple) { + pendingDescribePortalQueue.removeFirst(); + } + query.setFields(fields); + + if (doneAfterRowDescNoData) { + DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst(); + SimpleQuery currentQuery = describeData.query; + currentQuery.setFields(fields); + + handler.handleResultRows(currentQuery, fields, tuples, null); + tuples = null; + } + break; + + case 'Z': // Ready For Query (eventual response to Sync) + receiveRFQ(); + if (!pendingExecuteQueue.isEmpty() + && pendingExecuteQueue.peekFirst().asSimple) { + tuples = null; + pgStream.clearResultBufferCount(); + + ExecuteRequest executeRequest = pendingExecuteQueue.removeFirst(); + // Simple queries might return several resultsets, thus we clear + // fields, so queries like "select 1;update; select2" will properly + // identify that "update" did not return any results + executeRequest.query.setFields(null); + + pendingDescribePortalQueue.removeFirst(); + if (!pendingExecuteQueue.isEmpty()) { + if (getTransactionState() == TransactionState.IDLE) { + handler.secureProgress(); + } + // process subsequent results (e.g. for cases like batched execution of simple 'Q' queries) + break; + } + } + endQuery = true; + + // Reset the statement name of Parses that failed. + while (!pendingParseQueue.isEmpty()) { + SimpleQuery failedQuery = pendingParseQueue.removeFirst(); + failedQuery.unprepare(); + } + + pendingParseQueue.clear(); // No more ParseComplete messages expected. + // Pending "describe" requests might be there in case of error + // If that is the case, reset "described" status, so the statement is properly + // described on next execution + while (!pendingDescribeStatementQueue.isEmpty()) { + DescribeRequest request = pendingDescribeStatementQueue.removeFirst(); + LOGGER.log(Level.FINEST, " FE marking setStatementDescribed(false) for query {0}", request.query); + request.query.setStatementDescribed(false); + } + while (!pendingDescribePortalQueue.isEmpty()) { + SimpleQuery describePortalQuery = pendingDescribePortalQueue.removeFirst(); + LOGGER.log(Level.FINEST, " FE marking setPortalDescribed(false) for query {0}", describePortalQuery); + describePortalQuery.setPortalDescribed(false); + } + pendingBindQueue.clear(); // No more BindComplete messages expected. + pendingExecuteQueue.clear(); // No more query executions expected. + break; + + case 'G': // CopyInResponse + LOGGER.log(Level.FINEST, " <=BE CopyInResponse"); + LOGGER.log(Level.FINEST, " FE=> CopyFail"); + + // COPY sub-protocol is not implemented yet + // We'll send a CopyFail message for COPY FROM STDIN so that + // server does not wait for the data. + + byte[] buf = "COPY commands are only supported using the CopyManager API.".getBytes(StandardCharsets.US_ASCII); + pgStream.sendChar('f'); + pgStream.sendInteger4(buf.length + 4 + 1); + pgStream.send(buf); + pgStream.sendChar(0); + pgStream.flush(); + sendSync(); // send sync message + skipMessage(); // skip the response message + break; + + case 'H': // CopyOutResponse + LOGGER.log(Level.FINEST, " <=BE CopyOutResponse"); + + skipMessage(); + // In case of CopyOutResponse, we cannot abort data transfer, + // so just throw an error and ignore CopyData messages + handler.handleError( + new PSQLException(GT.tr("COPY commands are only supported using the CopyManager API."), + PSQLState.NOT_IMPLEMENTED)); + break; + + case 'c': // CopyDone + skipMessage(); + LOGGER.log(Level.FINEST, " <=BE CopyDone"); + break; + + case 'd': // CopyData + skipMessage(); + LOGGER.log(Level.FINEST, " <=BE CopyData"); + break; + + default: + throw new IOException("Unexpected packet type: " + c); + } + + } + } + + /** + * Ignore the response message by reading the message length and skipping over those bytes in the + * communication stream. + */ + private void skipMessage() throws IOException { + int len = pgStream.receiveInteger4(); + + assert len >= 4 : "Length from skip message must be at least 4 "; + + // skip len-4 (length includes the 4 bytes for message length itself + pgStream.skip(len - 4); + } + + @Override + public void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize, + boolean adaptiveFetch) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + waitOnLock(); + final Portal portal = (Portal) cursor; + + // Insert a ResultHandler that turns bare command statuses into empty datasets + // (if the fetch returns no rows, we see just a CommandStatus..) + final ResultHandler delegateHandler = handler; + final SimpleQuery query = portal.getQuery(); + handler = new ResultHandlerDelegate(delegateHandler) { + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + handleResultRows(query, NO_FIELDS, new ArrayList<>(), null); + } + }; + + // Now actually run it. + + try { + processDeadParsedQueries(); + processDeadPortals(); + + sendExecute(query, portal, fetchSize); + sendSync(); + + processResults(handler, 0, adaptiveFetch); + estimatedReceiveBufferBytes = 0; + } catch (IOException e) { + abort(); + handler.handleError( + new PSQLException(GT.tr("An I/O error occurred while sending to the backend."), + PSQLState.CONNECTION_FAILURE, e)); + } + + handler.handleCompletion(); + } + } + + @Override + public int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor) { + if (cursor instanceof Portal) { + Query query = ((Portal) cursor).getQuery(); + if (Objects.nonNull(query)) { + return adaptiveFetchCache + .getFetchSizeForQuery(adaptiveFetch, query); + } + } + return -1; + } + + @Override + public void setAdaptiveFetch(boolean adaptiveFetch) { + this.adaptiveFetchCache.setAdaptiveFetch(adaptiveFetch); + } + + @Override + public boolean getAdaptiveFetch() { + return this.adaptiveFetchCache.getAdaptiveFetch(); + } + + @Override + public void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) { + if (cursor instanceof Portal) { + Query query = ((Portal) cursor).getQuery(); + if (Objects.nonNull(query)) { + adaptiveFetchCache.addNewQuery(adaptiveFetch, query); + } + } + } + + @Override + public void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) { + if (cursor instanceof Portal) { + Query query = ((Portal) cursor).getQuery(); + if (Objects.nonNull(query)) { + adaptiveFetchCache.removeQuery(adaptiveFetch, query); + } + } + } + + /* + * Receive the field descriptions from the back end. + */ + private Field[] receiveFields() throws IOException { + pgStream.receiveInteger4(); // MESSAGE SIZE + int size = pgStream.receiveInteger2(); + Field[] fields = new Field[size]; + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE RowDescription({0})", size); + } + + for (int i = 0; i < fields.length; i++) { + String columnLabel = pgStream.receiveCanonicalString(); + int tableOid = pgStream.receiveInteger4(); + short positionInTable = (short) pgStream.receiveInteger2(); + int typeOid = pgStream.receiveInteger4(); + int typeLength = pgStream.receiveInteger2(); + int typeModifier = pgStream.receiveInteger4(); + int formatType = pgStream.receiveInteger2(); + fields[i] = new Field(columnLabel, + typeOid, typeLength, typeModifier, tableOid, positionInTable); + fields[i].setFormat(formatType); + + LOGGER.log(Level.FINEST, " {0}", fields[i]); + } + + return fields; + } + + private void receiveAsyncNotify() throws IOException { + int len = pgStream.receiveInteger4(); // MESSAGE SIZE + assert len > 4 : "Length for AsyncNotify must be at least 4"; + + int pid = pgStream.receiveInteger4(); + String msg = pgStream.receiveCanonicalString(); + String param = pgStream.receiveString(); + addNotification(new Notification(msg, pid, param)); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE AsyncNotify({0},{1},{2})", new Object[]{pid, msg, param}); + } + } + + private SQLException receiveErrorResponse() throws IOException { + // it's possible to get more than one error message for a query + // see libpq comments wrt backend closing a connection + // so, append messages to a string buffer and keep processing + // check at the bottom to see if we need to throw an exception + + int elen = pgStream.receiveInteger4(); + assert elen > 4 : "Error response length must be greater than 4"; + + EncodingPredictor.DecodeResult totalMessage = pgStream.receiveErrorString(elen - 4); + ServerErrorMessage errorMsg = new ServerErrorMessage(totalMessage); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg.toString()); + } + + PSQLException error = new PSQLException(errorMsg, this.logServerErrorDetail); + if (transactionFailCause == null) { + transactionFailCause = error; + } else { + error.initCause(transactionFailCause); + } + return error; + } + + private SQLWarning receiveNoticeResponse() throws IOException { + int nlen = pgStream.receiveInteger4(); + assert nlen > 4 : "Notice Response length must be greater than 4"; + + ServerErrorMessage warnMsg = new ServerErrorMessage(pgStream.receiveString(nlen - 4)); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE NoticeResponse({0})", warnMsg.toString()); + } + + return new PSQLWarning(warnMsg); + } + + private String receiveCommandStatus() throws IOException { + // TODO: better handle the msg len + int len = pgStream.receiveInteger4(); + // read len -5 bytes (-4 for len and -1 for trailing \0) + String status = pgStream.receiveString(len - 5); + // now read and discard the trailing \0 + pgStream.receiveChar(); // Receive(1) would allocate new byte[1], so avoid it + + LOGGER.log(Level.FINEST, " <=BE CommandStatus({0})", status); + + return status; + } + + private void interpretCommandStatus(String status, ResultHandler handler) { + try { + commandCompleteParser.parse(status); + } catch (SQLException e) { + handler.handleError(e); + return; + } + long oid = commandCompleteParser.getOid(); + long count = commandCompleteParser.getRows(); + + handler.handleCommandStatus(status, count, oid); + } + + private void receiveRFQ() throws IOException { + if (pgStream.receiveInteger4() != 5) { + throw new IOException("unexpected length of ReadyForQuery message"); + } + + char tStatus = (char) pgStream.receiveChar(); + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE ReadyForQuery({0})", tStatus); + } + + // Update connection state. + switch (tStatus) { + case 'I': + transactionFailCause = null; + setTransactionState(TransactionState.IDLE); + break; + case 'T': + transactionFailCause = null; + setTransactionState(TransactionState.OPEN); + break; + case 'E': + setTransactionState(TransactionState.FAILED); + break; + default: + throw new IOException( + "unexpected transaction state in ReadyForQuery message: " + (int) tStatus); + } + } + + @Override + @SuppressWarnings("deprecation") + protected void sendCloseMessage() throws IOException { + closeAction.sendCloseMessage(pgStream); + } + + public void readStartupMessages() throws IOException, SQLException { + for (int i = 0; i < 1000; i++) { + int beresp = pgStream.receiveChar(); + switch (beresp) { + case 'Z': + receiveRFQ(); + // Ready For Query; we're done. + return; + + case 'K': + // BackendKeyData + int msgLen = pgStream.receiveInteger4(); + if (msgLen != 12) { + throw new PSQLException(GT.tr("Protocol error. Session setup failed."), + PSQLState.PROTOCOL_VIOLATION); + } + + int pid = pgStream.receiveInteger4(); + int ckey = pgStream.receiveInteger4(); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE BackendKeyData(pid={0},ckey={1})", new Object[]{pid, ckey}); + } + + setBackendKeyData(pid, ckey); + break; + + case 'E': + // Error + throw receiveErrorResponse(); + + case 'N': + // Warning + addWarning(receiveNoticeResponse()); + break; + + case 'S': + // ParameterStatus + receiveParameterStatus(); + + break; + + default: + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " invalid message type={0}", (char) beresp); + } + throw new PSQLException(GT.tr("Protocol error. Session setup failed."), + PSQLState.PROTOCOL_VIOLATION); + } + } + throw new PSQLException(GT.tr("Protocol error. Session setup failed."), + PSQLState.PROTOCOL_VIOLATION); + } + + public void receiveParameterStatus() throws IOException, SQLException { + // ParameterStatus + pgStream.receiveInteger4(); // MESSAGE SIZE + final String name = pgStream.receiveCanonicalStringIfPresent(); + final String value = pgStream.receiveCanonicalStringIfPresent(); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE ParameterStatus({0} = {1})", new Object[]{name, value}); + } + + // if the name is empty, there is nothing to do + if (name.isEmpty()) { + return; + } + + // Update client-visible parameter status map for getParameterStatuses() + onParameterStatus(name, value); + + if ("client_encoding".equals(name)) { + if (allowEncodingChanges) { + if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) { + LOGGER.log(Level.FINE, + "pgjdbc expects client_encoding to be UTF8 for proper operation. Actual encoding is {0}", + value); + } + pgStream.setEncoding(Encoding.getDatabaseEncoding(value)); + } else if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) { + close(); // we're screwed now; we can't trust any subsequent string. + throw new PSQLException(GT.tr( + "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.", + value), PSQLState.CONNECTION_FAILURE); + + } + } + + if ("DateStyle".equals(name) && !value.startsWith("ISO") + && !value.toUpperCase(Locale.ROOT).startsWith("ISO")) { + close(); // we're screwed now; we can't trust any subsequent date. + throw new PSQLException(GT.tr( + "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.", + value), PSQLState.CONNECTION_FAILURE); + } + + if ("standard_conforming_strings".equals(name)) { + if ("on".equals(value)) { + setStandardConformingStrings(true); + } else if ("off".equals(value)) { + setStandardConformingStrings(false); + } else { + close(); + // we're screwed now; we don't know how to escape string literals + throw new PSQLException(GT.tr( + "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.", + value), PSQLState.CONNECTION_FAILURE); + } + return; + } + + if ("TimeZone".equals(name)) { + setTimeZone(TimestampUtils.parseBackendTimeZone(value)); + } else if ("application_name".equals(name)) { + setApplicationName(value); + } else if ("server_version_num".equals(name)) { + setServerVersionNum(Integer.parseInt(value)); + } else if ("server_version".equals(name)) { + setServerVersion(value); + } else if ("integer_datetimes".equals(name)) { + if ("on".equals(value)) { + setIntegerDateTimes(true); + } else if ("off".equals(value)) { + setIntegerDateTimes(false); + } else { + throw new PSQLException(GT.tr("Protocol error. Session setup failed."), + PSQLState.PROTOCOL_VIOLATION); + } + } + } + + public void setTimeZone(TimeZone timeZone) { + this.timeZone = timeZone; + } + + @Override + public TimeZone getTimeZone() { + return timeZone; + } + + public void setApplicationName(String applicationName) { + this.applicationName = applicationName; + } + + @Override + public String getApplicationName() { + if (applicationName == null) { + return ""; + } + return applicationName; + } + + @Override + public ReplicationProtocol getReplicationProtocol() { + return replicationProtocol; + } + + @Override + public void addBinaryReceiveOid(int oid) { + synchronized (useBinaryReceiveForOids) { + useBinaryReceiveForOids.add(oid); + } + } + + @Override + public void removeBinaryReceiveOid(int oid) { + synchronized (useBinaryReceiveForOids) { + useBinaryReceiveForOids.remove(oid); + } + } + + @Override + @SuppressWarnings("deprecation") + public Set getBinaryReceiveOids() { + // copy the values to prevent ConcurrentModificationException when reader accesses the elements + synchronized (useBinaryReceiveForOids) { + return new HashSet<>(useBinaryReceiveForOids); + } + } + + @Override + public boolean useBinaryForReceive(int oid) { + synchronized (useBinaryReceiveForOids) { + return useBinaryReceiveForOids.contains(oid); + } + } + + @Override + public void setBinaryReceiveOids(Set oids) { + synchronized (useBinaryReceiveForOids) { + useBinaryReceiveForOids.clear(); + useBinaryReceiveForOids.addAll(oids); + } + } + + @Override + public void addBinarySendOid(int oid) { + synchronized (useBinarySendForOids) { + useBinarySendForOids.add(oid); + } + } + + @Override + public void removeBinarySendOid(int oid) { + synchronized (useBinarySendForOids) { + useBinarySendForOids.remove(oid); + } + } + + @Override + @SuppressWarnings("deprecation") + public Set getBinarySendOids() { + // copy the values to prevent ConcurrentModificationException when reader accesses the elements + synchronized (useBinarySendForOids) { + return new HashSet<>(useBinarySendForOids); + } + } + + @Override + public boolean useBinaryForSend(int oid) { + synchronized (useBinarySendForOids) { + return useBinarySendForOids.contains(oid); + } + } + + @Override + public void setBinarySendOids(Set oids) { + synchronized (useBinarySendForOids) { + useBinarySendForOids.clear(); + useBinarySendForOids.addAll(oids); + } + } + + private void setIntegerDateTimes(boolean state) { + integerDateTimes = state; + } + + @Override + public boolean getIntegerDateTimes() { + return integerDateTimes; + } + + private final Deque pendingParseQueue = new ArrayDeque<>(); + private final Deque pendingBindQueue = new ArrayDeque<>(); + private final Deque pendingExecuteQueue = new ArrayDeque<>(); + private final Deque pendingDescribeStatementQueue = + new ArrayDeque<>(); + private final Deque pendingDescribePortalQueue = new ArrayDeque<>(); + + private long nextUniqueID = 1; + private final boolean allowEncodingChanges; + private final boolean cleanupSavePoints; + + /** + *

The estimated server response size since we last consumed the input stream from the server, in + * bytes.

+ * + *

Starts at zero, reset by every Sync message. Mainly used for batches.

+ * + *

Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.

+ */ + private int estimatedReceiveBufferBytes; + + private final SimpleQuery beginTransactionQuery = + new SimpleQuery( + new NativeQuery("BEGIN", null, false, SqlCommand.BLANK), + null, false); + + private final SimpleQuery beginReadOnlyTransactionQuery = + new SimpleQuery( + new NativeQuery("BEGIN READ ONLY", null, false, SqlCommand.BLANK), + null, false); + + private final SimpleQuery emptyQuery = + new SimpleQuery( + new NativeQuery("", null, false, + SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK) + ), null, false); + + private final SimpleQuery autoSaveQuery = + new SimpleQuery( + new NativeQuery("SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK), + null, false); + + private final SimpleQuery releaseAutoSave = + new SimpleQuery( + new NativeQuery("RELEASE SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK), + null, false); + + /* + In autosave mode we use this query to roll back errored transactions + */ + private final SimpleQuery restoreToAutoSave = + new SimpleQuery( + new NativeQuery("ROLLBACK TO SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK), + null, false); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java new file mode 100644 index 0000000..1e6571f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java @@ -0,0 +1,623 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.core.Oid; +import org.postgresql.core.PGStream; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Utils; +import org.postgresql.geometric.PGbox; +import org.postgresql.geometric.PGpoint; +import org.postgresql.jdbc.UUIDArrayAssistant; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.StreamWrapper; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.sql.SQLException; +import java.util.Arrays; + +/** + * Parameter list for a single-statement V3 query. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +class SimpleParameterList implements V3ParameterList { + + private static final byte IN = 1; + private static final byte OUT = 2; + private static final byte INOUT = IN | OUT; + + private static final byte TEXT = 0; + private static final byte BINARY = 4; + + SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) { + this.paramValues = new Object[paramCount]; + this.paramTypes = new int[paramCount]; + this.encoded = new byte[paramCount][]; + this.flags = new byte[paramCount]; + this.transferModeRegistry = transferModeRegistry; + } + + @Override + public void registerOutParameter(int index, int sqlType) throws SQLException { + if (index < 1 || index > paramValues.length) { + throw new PSQLException( + GT.tr("The column index is out of range: {0}, number of columns: {1}.", + index, paramValues.length), + PSQLState.INVALID_PARAMETER_VALUE); + } + + flags[index - 1] |= OUT; + } + + private void bind(int index, Object value, int oid, byte binary) throws SQLException { + if (index < 1 || index > paramValues.length) { + throw new PSQLException( + GT.tr("The column index is out of range: {0}, number of columns: {1}.", + index, paramValues.length), + PSQLState.INVALID_PARAMETER_VALUE); + } + + --index; + + encoded[index] = null; + paramValues[index] = value; + flags[index] = (byte) (direction(index) | IN | binary); + + // If we are setting something to an UNSPECIFIED NULL, don't overwrite + // our existing type for it. We don't need the correct type info to + // send this value, and we don't want to overwrite and require a + // reparse. + if (oid == Oid.UNSPECIFIED && paramTypes[index] != Oid.UNSPECIFIED && value == NULL_OBJECT) { + return; + } + + paramTypes[index] = oid; + pos = index + 1; + } + + @Override + public int getParameterCount() { + return paramValues.length; + } + + @Override + public int getOutParameterCount() { + int count = 0; + for (int i = 0; i < paramTypes.length; i++) { + if ((direction(i) & OUT) == OUT) { + count++; + } + } + // Every function has at least one output. + if (count == 0) { + count = 1; + } + return count; + + } + + @Override + public int getInParameterCount() { + int count = 0; + for (int i = 0; i < paramTypes.length; i++) { + if (direction(i) != OUT) { + count++; + } + } + return count; + } + + @Override + public void setIntParameter(int index, int value) throws SQLException { + byte[] data = new byte[4]; + ByteConverter.int4(data, 0, value); + bind(index, data, Oid.INT4, BINARY); + } + + @Override + public void setLiteralParameter(int index, String value, int oid) throws SQLException { + bind(index, value, oid, TEXT); + } + + @Override + public void setStringParameter(int index, String value, int oid) throws SQLException { + bind(index, value, oid, TEXT); + } + + @Override + public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException { + bind(index, value, oid, BINARY); + } + + @Override + public void setBytea(int index, byte[] data, int offset, int length) throws SQLException { + bind(index, new StreamWrapper(data, offset, length), Oid.BYTEA, BINARY); + } + + @Override + public void setBytea(int index, InputStream stream, int length) throws SQLException { + bind(index, new StreamWrapper(stream, length), Oid.BYTEA, BINARY); + } + + @Override + public void setBytea(int index, InputStream stream) throws SQLException { + bind(index, new StreamWrapper(stream), Oid.BYTEA, BINARY); + } + + @Override + public void setBytea(int index, ByteStreamWriter writer) throws SQLException { + bind(index, writer, Oid.BYTEA, BINARY); + } + + @Override + public void setText(int index, InputStream stream) throws SQLException { + bind(index, new StreamWrapper(stream), Oid.TEXT, TEXT); + } + + @Override + public void setNull(int index, int oid) throws SQLException { + + byte binaryTransfer = TEXT; + + if (transferModeRegistry != null && transferModeRegistry.useBinaryForReceive(oid)) { + binaryTransfer = BINARY; + } + bind(index, NULL_OBJECT, oid, binaryTransfer); + } + + /** + *

Escapes a given text value as a literal, wraps it in single quotes, casts it to the + * to the given data type, and finally wraps the whole thing in parentheses.

+ * + *

For example, "123" and "int4" becomes "('123'::int)"

+ * + *

The additional parentheses is added to ensure that the surrounding text of where the + * parameter value is entered does modify the interpretation of the value.

+ * + *

For example if our input SQL is: SELECT ?b

+ * + *

Using a parameter value of '{}' and type of json we'd get:

+ * + *
+   * test=# SELECT ('{}'::json)b;
+   *  b
+   * ----
+   *  {}
+   * 
+ * + *

But without the parentheses the result changes:

+ * + *
+   * test=# SELECT '{}'::jsonb;
+   * jsonb
+   * -------
+   * {}
+   * 
+ **/ + private static String quoteAndCast(String text, String type, boolean standardConformingStrings) { + StringBuilder sb = new StringBuilder((text.length() + 10) / 10 * 11); // Add 10% for escaping. + sb.append("('"); + try { + Utils.escapeLiteral(sb, text, standardConformingStrings); + } catch (SQLException e) { + // This should only happen if we have an embedded null + // and there's not much we can do if we do hit one. + // + // To force a server side failure, we deliberately include + // a zero byte character in the literal to force the server + // to reject the command. + sb.append('\u0000'); + } + sb.append("'"); + if (type != null) { + sb.append("::"); + sb.append(type); + } + sb.append(")"); + return sb.toString(); + } + + @Override + public String toString(int index, boolean standardConformingStrings) { + --index; + Object paramValue = paramValues[index]; + if (paramValue == null) { + return "?"; + } else if (paramValue == NULL_OBJECT) { + return "(NULL)"; + } + String textValue; + String type; + if ((flags[index] & BINARY) == BINARY) { + // handle some of the numeric types + switch (paramTypes[index]) { + case Oid.INT2: + short s = ByteConverter.int2((byte[]) paramValue, 0); + textValue = Short.toString(s); + type = "int2"; + break; + + case Oid.INT4: + int i = ByteConverter.int4((byte[]) paramValue, 0); + textValue = Integer.toString(i); + type = "int4"; + break; + + case Oid.INT8: + long l = ByteConverter.int8((byte[]) paramValue, 0); + textValue = Long.toString(l); + type = "int8"; + break; + + case Oid.FLOAT4: + float f = ByteConverter.float4((byte[]) paramValue, 0); + if (Float.isNaN(f)) { + return "('NaN'::real)"; + } + textValue = Float.toString(f); + type = "real"; + break; + + case Oid.FLOAT8: + double d = ByteConverter.float8((byte[]) paramValue, 0); + if (Double.isNaN(d)) { + return "('NaN'::double precision)"; + } + textValue = Double.toString(d); + type = "double precision"; + break; + + case Oid.NUMERIC: + Number n = ByteConverter.numeric((byte[]) paramValue); + if (n instanceof Double) { + assert ((Double) n).isNaN(); + return "('NaN'::numeric)"; + } + textValue = n.toString(); + type = "numeric"; + break; + + case Oid.UUID: + textValue = + new UUIDArrayAssistant().buildElement((byte[]) paramValue, 0, 16).toString(); + type = "uuid"; + break; + + case Oid.POINT: + PGpoint pgPoint = new PGpoint(); + pgPoint.setByteValue((byte[]) paramValue, 0); + textValue = pgPoint.toString(); + type = "point"; + break; + + case Oid.BOX: + PGbox pgBox = new PGbox(); + pgBox.setByteValue((byte[]) paramValue, 0); + textValue = pgBox.toString(); + type = "box"; + break; + + default: + return "?"; + } + } else { + textValue = paramValue.toString(); + switch (paramTypes[index]) { + case Oid.INT2: + type = "int2"; + break; + case Oid.INT4: + type = "int4"; + break; + case Oid.INT8: + type = "int8"; + break; + case Oid.FLOAT4: + type = "real"; + break; + case Oid.FLOAT8: + type = "double precision"; + break; + case Oid.TIMESTAMP: + type = "timestamp"; + break; + case Oid.TIMESTAMPTZ: + type = "timestamp with time zone"; + break; + case Oid.TIME: + type = "time"; + break; + case Oid.TIMETZ: + type = "time with time zone"; + break; + case Oid.DATE: + type = "date"; + break; + case Oid.INTERVAL: + type = "interval"; + break; + case Oid.NUMERIC: + type = "numeric"; + break; + case Oid.UUID: + type = "uuid"; + break; + case Oid.BOOL: + type = "boolean"; + break; + case Oid.BOX: + type = "box"; + break; + case Oid.POINT: + type = "point"; + break; + default: + type = null; + } + } + return quoteAndCast(textValue, type, standardConformingStrings); + } + + @Override + public void checkAllParametersSet() throws SQLException { + for (int i = 0; i < paramTypes.length; i++) { + if (direction(i) != OUT && paramValues[i] == null) { + throw new PSQLException(GT.tr("No value specified for parameter {0}.", i + 1), + PSQLState.INVALID_PARAMETER_VALUE); + } + } + } + + @Override + public void convertFunctionOutParameters() { + for (int i = 0; i < paramTypes.length; i++) { + if (direction(i) == OUT) { + paramTypes[i] = Oid.VOID; + paramValues[i] = NULL_OBJECT; + } + } + } + + // + // bytea helper + // + + private static void streamBytea(PGStream pgStream, StreamWrapper wrapper) throws IOException { + byte[] rawData = wrapper.getBytes(); + if (rawData != null) { + pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength()); + return; + } + + pgStream.sendStream(wrapper.getStream(), wrapper.getLength()); + } + + // + // byte stream writer support + // + + private static void streamBytea(PGStream pgStream, ByteStreamWriter writer) throws IOException { + pgStream.send(writer); + } + + @Override + public int[] getTypeOIDs() { + return paramTypes; + } + + // + // Package-private V3 accessors + // + + int getTypeOID(int index) { + return paramTypes[index - 1]; + } + + boolean hasUnresolvedTypes() { + for (int paramType : paramTypes) { + if (paramType == Oid.UNSPECIFIED) { + return true; + } + } + return false; + } + + void setResolvedType(int index, int oid) { + // only allow overwriting an unknown value or VOID value + if (paramTypes[index - 1] == Oid.UNSPECIFIED || paramTypes[index - 1] == Oid.VOID) { + paramTypes[index - 1] = oid; + } else if (paramTypes[index - 1] != oid) { + throw new IllegalArgumentException("Can't change resolved type for param: " + index + " from " + + paramTypes[index - 1] + " to " + oid); + } + } + + boolean isNull(int index) { + return paramValues[index - 1] == NULL_OBJECT; + } + + boolean isBinary(int index) { + return (flags[index - 1] & BINARY) != 0; + } + + private byte direction(int index) { + return (byte) (flags[index] & INOUT); + } + + int getV3Length(int index) { + --index; + + // Null? + Object value = paramValues[index]; + if (value == null || value == NULL_OBJECT) { + throw new IllegalArgumentException("can't getV3Length() on a null parameter"); + } + + // Directly encoded? + if (value instanceof byte[]) { + return ((byte[]) value).length; + } + + // Binary-format bytea? + if (value instanceof StreamWrapper) { + return ((StreamWrapper) value).getLength(); + } + + // Binary-format bytea? + if (value instanceof ByteStreamWriter) { + return ((ByteStreamWriter) value).getLength(); + } + + // Already encoded? + byte[] encoded = this.encoded[index]; + if (encoded == null) { + // Encode value and compute actual length using UTF-8. + this.encoded[index] = encoded = value.toString().getBytes(StandardCharsets.UTF_8); + } + + return encoded.length; + } + + void writeV3Value(int index, PGStream pgStream) throws IOException { + --index; + + // Null? + Object paramValue = paramValues[index]; + if (paramValue == null || paramValue == NULL_OBJECT) { + throw new IllegalArgumentException("can't writeV3Value() on a null parameter"); + } + + // Directly encoded? + if (paramValue instanceof byte[]) { + pgStream.send((byte[]) paramValue); + return; + } + + // Binary-format bytea? + if (paramValue instanceof StreamWrapper) { + try (StreamWrapper streamWrapper = (StreamWrapper) paramValue) { + streamBytea(pgStream, streamWrapper); + } + return; + } + + // Streamed bytea? + if (paramValue instanceof ByteStreamWriter) { + streamBytea(pgStream, (ByteStreamWriter) paramValue); + return; + } + + // Encoded string. + if (encoded[index] == null) { + encoded[index] = ((String) paramValue).getBytes(StandardCharsets.UTF_8); + } + pgStream.send(encoded[index]); + } + + @Override + public ParameterList copy() { + SimpleParameterList newCopy = new SimpleParameterList(paramValues.length, transferModeRegistry); + System.arraycopy(paramValues, 0, newCopy.paramValues, 0, paramValues.length); + System.arraycopy(paramTypes, 0, newCopy.paramTypes, 0, paramTypes.length); + System.arraycopy(flags, 0, newCopy.flags, 0, flags.length); + newCopy.pos = pos; + return newCopy; + } + + @Override + public void clear() { + Arrays.fill(paramValues, null); + Arrays.fill(paramTypes, 0); + Arrays.fill(encoded, null); + Arrays.fill(flags, (byte) 0); + pos = 0; + } + + @Override + public SimpleParameterList [] getSubparams() { + return null; + } + + @Override + public Object[] getValues() { + return paramValues; + } + + @Override + public int[] getParamTypes() { + return paramTypes; + } + + @Override + public byte[] getFlags() { + return flags; + } + + @Override + public byte[] [] getEncoding() { + return encoded; + } + + @Override + public void appendAll(ParameterList list) throws SQLException { + if (list instanceof SimpleParameterList ) { + /* only v3.SimpleParameterList is compatible with this type + we need to create copies of our parameters, otherwise the values can be changed */ + SimpleParameterList spl = (SimpleParameterList) list; + int inParamCount = spl.getInParameterCount(); + if ((pos + inParamCount) > paramValues.length) { + throw new PSQLException( + GT.tr("Added parameters index out of range: {0}, number of columns: {1}.", + (pos + inParamCount), paramValues.length), + PSQLState.INVALID_PARAMETER_VALUE); + } + System.arraycopy(spl.getValues(), 0, this.paramValues, pos, inParamCount); + System.arraycopy(spl.getParamTypes(), 0, this.paramTypes, pos, inParamCount); + System.arraycopy(spl.getFlags(), 0, this.flags, pos, inParamCount); + System.arraycopy(spl.getEncoding(), 0, this.encoded, pos, inParamCount); + pos += inParamCount; + } + } + + /** + * Useful implementation of toString. + * @return String representation of the list values + */ + @Override + public String toString() { + StringBuilder ts = new StringBuilder("<["); + if (paramValues.length > 0) { + ts.append(toString(1, true)); + for (int c = 2; c <= paramValues.length; c++) { + ts.append(" ,").append(toString(c, true)); + } + } + ts.append("]>"); + return ts.toString(); + } + + private final Object[] paramValues; + private final int[] paramTypes; + private final byte[] flags; + private final byte[] [] encoded; + private final TypeTransferModeRegistry transferModeRegistry; + + /** + * Marker object representing NULL; this distinguishes "parameter never set" from "parameter set + * to null". + */ + private static final Object NULL_OBJECT = new Object(); + + private int pos; +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java new file mode 100644 index 0000000..d405f4b --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.core.Field; +import org.postgresql.core.NativeQuery; +import org.postgresql.core.Oid; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.SqlCommand; +import org.postgresql.jdbc.PgResultSet; + +import java.lang.ref.PhantomReference; +import java.nio.charset.StandardCharsets; +import java.util.BitSet; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * V3 Query implementation for a single-statement query. This also holds the state of any associated + * server-side named statement. We use a PhantomReference managed by the QueryExecutor to handle + * statement cleanup. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +class SimpleQuery implements Query { + private static final Logger LOGGER = Logger.getLogger(SimpleQuery.class.getName()); + + SimpleQuery(SimpleQuery src) { + this(src.nativeQuery, src.transferModeRegistry, src.sanitiserDisabled); + } + + SimpleQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry, + boolean sanitiserDisabled) { + this.nativeQuery = query; + this.transferModeRegistry = transferModeRegistry; + this.sanitiserDisabled = sanitiserDisabled; + } + + @Override + public ParameterList createParameterList() { + if (nativeQuery.bindPositions.length == 0) { + return NO_PARAMETERS; + } + + return new SimpleParameterList(getBindCount(), transferModeRegistry); + } + + @Override + public String toString(ParameterList parameters) { + return nativeQuery.toString(parameters); + } + + @Override + public String toString() { + return toString(null); + } + + @Override + public void close() { + unprepare(); + } + + @Override + public SimpleQuery [] getSubqueries() { + return null; + } + + /** + *

Return maximum size in bytes that each result row from this query may return. Mainly used for + * batches that return results.

+ * + *

Results are cached until/unless the query is re-described.

+ * + * @return Max size of result data in bytes according to returned fields, 0 if no results, -1 if + * result is unbounded. + * @throws IllegalStateException if the query is not described + */ + public int getMaxResultRowSize() { + if (cachedMaxResultRowSize != null) { + return cachedMaxResultRowSize; + } + if (!this.statementDescribed) { + throw new IllegalStateException( + "Cannot estimate result row size on a statement that is not described"); + } + int maxResultRowSize = 0; + if (fields != null) { + for (Field f : fields) { + final int fieldLength = f.getLength(); + if (fieldLength < 1 || fieldLength >= 65535) { + /* + * Field length unknown or large; we can't make any safe estimates about the result size, + * so we have to fall back to sending queries individually. + */ + maxResultRowSize = -1; + break; + } + maxResultRowSize += fieldLength; + } + } + cachedMaxResultRowSize = maxResultRowSize; + return maxResultRowSize; + } + + // + // Implementation guts + // + + @Override + public String getNativeSql() { + return nativeQuery.nativeSql; + } + + void setStatementName(String statementName, short deallocateEpoch) { + assert statementName != null : "statement name should not be null"; + this.statementName = statementName; + this.encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8); + this.deallocateEpoch = deallocateEpoch; + } + + void setPrepareTypes(int[] paramTypes) { + // Remember which parameters were unspecified since the parameters will be overridden later by + // ParameterDescription message + for (int i = 0; i < paramTypes.length; i++) { + int paramType = paramTypes[i]; + if (paramType == Oid.UNSPECIFIED) { + if (this.unspecifiedParams == null) { + this.unspecifiedParams = new BitSet(); + } + this.unspecifiedParams.set(i); + } + } + + // paramTypes is changed by "describe statement" response, so we clone the array + // However, we can reuse array if there is one + if (this.preparedTypes == null) { + this.preparedTypes = paramTypes.clone(); + return; + } + System.arraycopy(paramTypes, 0, this.preparedTypes, 0, paramTypes.length); + } + + int [] getPrepareTypes() { + return preparedTypes; + } + + String getStatementName() { + return statementName; + } + + boolean isPreparedFor(int[] paramTypes, short deallocateEpoch) { + if (statementName == null || preparedTypes == null) { + return false; // Not prepared. + } + if (this.deallocateEpoch != deallocateEpoch) { + return false; + } + + assert paramTypes.length == preparedTypes.length + : String.format("paramTypes:%1$d preparedTypes:%2$d", paramTypes.length, + preparedTypes.length); + // Check for compatible types. + BitSet unspecified = this.unspecifiedParams; + for (int i = 0; i < paramTypes.length; i++) { + int paramType = paramTypes[i]; + // Either paramType should match prepared type + // Or paramType==UNSPECIFIED and the prepare type was UNSPECIFIED + + // Note: preparedTypes can be updated by "statement describe" + // 1) parse(name="S_01", sql="select ?::timestamp", types={UNSPECIFIED}) + // 2) statement describe: bind 1 type is TIMESTAMP + // 3) SimpleQuery.preparedTypes is updated to TIMESTAMP + // ... + // 4.1) bind(name="S_01", ..., types={TIMESTAMP}) -> OK (since preparedTypes is equal to TIMESTAMP) + // 4.2) bind(name="S_01", ..., types={UNSPECIFIED}) -> OK (since the query was initially parsed with UNSPECIFIED) + // 4.3) bind(name="S_01", ..., types={DATE}) -> KO, unprepare and parse required + + int preparedType = preparedTypes[i]; + if (paramType != preparedType + && (paramType != Oid.UNSPECIFIED + || unspecified == null + || !unspecified.get(i))) { + if (LOGGER.isLoggable(Level.FINER)) { + LOGGER.log(Level.FINER, + "Statement {0} does not match new parameter types. Will have to un-prepare it and parse once again." + + " To avoid performance issues, use the same data type for the same bind position. Bind index (1-based) is {1}," + + " preparedType was {2} (after describe {3}), current bind type is {4}", + new Object[]{statementName, i + 1, + Oid.toString(unspecified != null && unspecified.get(i) ? 0 : preparedType), + Oid.toString(preparedType), Oid.toString(paramType)}); + } + return false; + } + } + + return true; + } + + boolean hasUnresolvedTypes() { + if (preparedTypes == null) { + return true; + } + + return this.unspecifiedParams != null && !this.unspecifiedParams.isEmpty(); + } + + byte [] getEncodedStatementName() { + return encodedStatementName; + } + + /** + * Sets the fields that this query will return. + * + * @param fields The fields that this query will return. + */ + void setFields(Field [] fields) { + this.fields = fields; + this.resultSetColumnNameIndexMap = null; + this.cachedMaxResultRowSize = null; + this.needUpdateFieldFormats = fields != null; + this.hasBinaryFields = false; // just in case + } + + /** + * Returns the fields that this query will return. If the result set fields are not known returns + * null. + * + * @return the fields that this query will return. + */ + Field [] getFields() { + return fields; + } + + /** + * Returns true if current query needs field formats be adjusted as per connection configuration. + * Subsequent invocations would return {@code false}. The idea is to perform adjustments only + * once, not for each + * {@link QueryExecutorImpl#sendBind(SimpleQuery, SimpleParameterList, Portal, boolean)} + * + * @return true if current query needs field formats be adjusted as per connection configuration + */ + boolean needUpdateFieldFormats() { + if (needUpdateFieldFormats) { + needUpdateFieldFormats = false; + return true; + } + return false; + } + + public void resetNeedUpdateFieldFormats() { + needUpdateFieldFormats = fields != null; + } + + public boolean hasBinaryFields() { + return hasBinaryFields; + } + + public void setHasBinaryFields(boolean hasBinaryFields) { + this.hasBinaryFields = hasBinaryFields; + } + + // Have we sent a Describe Portal message for this query yet? + boolean isPortalDescribed() { + return portalDescribed; + } + + void setPortalDescribed(boolean portalDescribed) { + this.portalDescribed = portalDescribed; + this.cachedMaxResultRowSize = null; + } + + // Have we sent a Describe Statement message for this query yet? + // Note that we might not have need to, so this may always be false. + @Override + public boolean isStatementDescribed() { + return statementDescribed; + } + + void setStatementDescribed(boolean statementDescribed) { + this.statementDescribed = statementDescribed; + this.cachedMaxResultRowSize = null; + } + + @Override + public boolean isEmpty() { + return getNativeSql().isEmpty(); + } + + void setCleanupRef(PhantomReference cleanupRef) { + PhantomReference oldCleanupRef = this.cleanupRef; + if (oldCleanupRef != null) { + oldCleanupRef.clear(); + oldCleanupRef.enqueue(); + } + this.cleanupRef = cleanupRef; + } + + void unprepare() { + PhantomReference cleanupRef = this.cleanupRef; + if (cleanupRef != null) { + cleanupRef.clear(); + cleanupRef.enqueue(); + this.cleanupRef = null; + } + if (this.unspecifiedParams != null) { + this.unspecifiedParams.clear(); + } + + statementName = null; + encodedStatementName = null; + fields = null; + this.resultSetColumnNameIndexMap = null; + portalDescribed = false; + statementDescribed = false; + cachedMaxResultRowSize = null; + } + + @Override + public int getBatchSize() { + return 1; + } + + NativeQuery getNativeQuery() { + return nativeQuery; + } + + public final int getBindCount() { + return nativeQuery.bindPositions.length * getBatchSize(); + } + + private Map resultSetColumnNameIndexMap; + + @Override + public Map getResultSetColumnNameIndexMap() { + Map columnPositions = this.resultSetColumnNameIndexMap; + if (columnPositions == null && fields != null) { + columnPositions = + PgResultSet.createColumnNameIndexMap(fields, sanitiserDisabled); + if (statementName != null) { + // Cache column positions for server-prepared statements only + this.resultSetColumnNameIndexMap = columnPositions; + } + } + return columnPositions; + } + + @Override + public SqlCommand getSqlCommand() { + return nativeQuery.getCommand(); + } + + private final NativeQuery nativeQuery; + + private final TypeTransferModeRegistry transferModeRegistry; + private String statementName; + private byte [] encodedStatementName; + /** + * The stored fields from previous execution or describe of a prepared statement. Always null for + * non-prepared statements. + */ + private Field [] fields; + private boolean needUpdateFieldFormats; + private boolean hasBinaryFields; + private boolean portalDescribed; + private boolean statementDescribed; + private final boolean sanitiserDisabled; + private PhantomReference cleanupRef; + private int [] preparedTypes; + private BitSet unspecifiedParams; + private short deallocateEpoch; + + private Integer cachedMaxResultRowSize; + + static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java b/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java new file mode 100644 index 0000000..c50570c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +public interface TypeTransferModeRegistry { + /** + * Returns if given oid should be sent in binary format. + * @param oid type oid + * @return true if given oid should be sent in binary format + */ + boolean useBinaryForSend(int oid); + + /** + * Returns if given oid should be received in binary format. + * @param oid type oid + * @return true if given oid should be received in binary format + */ + boolean useBinaryForReceive(int oid); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java new file mode 100644 index 0000000..c49e0e0 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.core.v3; + +import org.postgresql.core.ParameterList; + +import java.sql.SQLException; + +/** + * Common interface for all V3 parameter list implementations. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +interface V3ParameterList extends ParameterList { + /** + * Ensure that all parameters in this list have been assigned values. Return silently if all is + * well, otherwise throw an appropriate exception. + * + * @throws SQLException if not all parameters are set. + */ + void checkAllParametersSet() throws SQLException; + + /** + * Convert any function output parameters to the correct type (void) and set an ignorable value + * for it. + */ + void convertFunctionOutParameters(); + + /** + * Return a list of the SimpleParameterList objects that make up this parameter list. If this + * object is already a SimpleParameterList, returns null (avoids an extra array construction in + * the common case). + * + * @return an array of single-statement parameter lists, or null if this object is + * already a single-statement parameter list. + */ + SimpleParameterList [] getSubparams(); + + /** + * Return the parameter type information. + * @return an array of {@link org.postgresql.core.Oid} type information + */ + int [] getParamTypes(); + + /** + * Return the flags for each parameter. + * @return an array of bytes used to store flags. + */ + byte [] getFlags(); + + /** + * Return the encoding for each parameter. + * @return nested byte array of bytes with encoding information. + */ + byte [] [] getEncoding(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java new file mode 100644 index 0000000..83e1c92 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3.adaptivefetch; + +import org.postgresql.PGProperty; +import org.postgresql.core.Query; + +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * The main purpose of this class is to handle adaptive fetching process. Adaptive fetching is used + * to compute fetch size to fully use size defined by maxResultBuffer. Computing is made by dividing + * maxResultBuffer size by max row result size noticed so far. Each query have separate adaptive + * fetch size computed, but same queries have it shared. If adaptive fetch is turned on, first fetch + * is going to be made with defaultRowFetchSize, next fetching of resultSet will be made with + * computed adaptive fetch size. If adaptive fetch is turned on during fetching, then first fetching + * made by ResultSet will be made with defaultRowFetchSize, next will use computed adaptive fetch + * size. Property adaptiveFetch need properties defaultRowFetchSize and maxResultBuffer to work. + */ +public class AdaptiveFetchCache { + + private final Map adaptiveFetchInfoMap; + private boolean adaptiveFetch; + private final int minimumAdaptiveFetchSize; + private int maximumAdaptiveFetchSize = -1; + private long maximumResultBufferSize = -1; + + public AdaptiveFetchCache(long maximumResultBufferSize, Properties info) + throws SQLException { + this.adaptiveFetchInfoMap = new HashMap<>(); + + this.adaptiveFetch = PGProperty.ADAPTIVE_FETCH.getBoolean(info); + this.minimumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MINIMUM.getInt(info); + this.maximumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MAXIMUM.getInt(info); + + this.maximumResultBufferSize = maximumResultBufferSize; + } + + /** + * Add query to being cached and computing adaptive fetch size. + * + * @param adaptiveFetch state of adaptive fetch, which should be used during adding query + * @param query query to be cached + */ + public void addNewQuery(boolean adaptiveFetch, Query query) { + if (adaptiveFetch && maximumResultBufferSize != -1) { + String sql = query.getNativeSql().trim(); + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql); + if (adaptiveFetchCacheEntry == null) { + adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + } + adaptiveFetchCacheEntry.incrementCounter(); + + adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry); + } + } + + /** + * Update adaptive fetch size for given query. + * + * @param adaptiveFetch state of adaptive fetch, which should be used during updating fetch + * size for query + * @param query query to be updated + * @param maximumRowSizeBytes max row size used during updating information about adaptive fetch + * size for given query + */ + public void updateQueryFetchSize(boolean adaptiveFetch, Query query, int maximumRowSizeBytes) { + if (adaptiveFetch && maximumResultBufferSize != -1) { + String sql = query.getNativeSql().trim(); + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql); + if (adaptiveFetchCacheEntry != null) { + int adaptiveMaximumRowSize = adaptiveFetchCacheEntry.getMaximumRowSizeBytes(); + if (adaptiveMaximumRowSize < maximumRowSizeBytes && maximumRowSizeBytes > 0) { + int newFetchSize = (int) (maximumResultBufferSize / maximumRowSizeBytes); + newFetchSize = adjustFetchSize(newFetchSize); + + adaptiveFetchCacheEntry.setMaximumRowSizeBytes(maximumRowSizeBytes); + adaptiveFetchCacheEntry.setSize(newFetchSize); + + adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry); + } + } + } + } + + /** + * Get adaptive fetch size for given query. + * + * @param adaptiveFetch state of adaptive fetch, which should be used during getting fetch size + * for query + * @param query query to which we want get adaptive fetch size + * @return adaptive fetch size for query or -1 if size doesn't exist/adaptive fetch state is false + */ + public int getFetchSizeForQuery(boolean adaptiveFetch, Query query) { + if (adaptiveFetch && maximumResultBufferSize != -1) { + String sql = query.getNativeSql().trim(); + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql); + if (adaptiveFetchCacheEntry != null) { + return adaptiveFetchCacheEntry.getSize(); + } + } + return -1; + } + + /** + * Remove query information from caching. + * + * @param adaptiveFetch state of adaptive fetch, which should be used during removing fetch size + * for query + * @param query query to be removed from caching + */ + public void removeQuery(boolean adaptiveFetch, Query query) { + if (adaptiveFetch && maximumResultBufferSize != -1) { + String sql = query.getNativeSql().trim(); + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql); + if (adaptiveFetchCacheEntry != null) { + adaptiveFetchCacheEntry.decrementCounter(); + + if (adaptiveFetchCacheEntry.getCounter() < 1) { + adaptiveFetchInfoMap.remove(sql); + } else { + adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry); + } + } + } + } + + /** + * Set maximum and minimum constraints on given value. + * + * @param actualSize value which should be the computed fetch size + * @return value which meet the constraints + */ + private int adjustFetchSize(int actualSize) { + int size = adjustMaximumFetchSize(actualSize); + size = adjustMinimumFetchSize(size); + return size; + } + + /** + * Set minimum constraint on given value. + * + * @param actualSize value which should be the computed fetch size + * @return value which meet the minimum constraint + */ + private int adjustMinimumFetchSize(int actualSize) { + if (minimumAdaptiveFetchSize == 0) { + return actualSize; + } + if (minimumAdaptiveFetchSize > actualSize) { + return minimumAdaptiveFetchSize; + } else { + return actualSize; + } + } + + /** + * Set maximum constraint on given value. + * + * @param actualSize value which should be the computed fetch size + * @return value which meet the maximum constraint + */ + private int adjustMaximumFetchSize(int actualSize) { + if (maximumAdaptiveFetchSize == -1) { + return actualSize; + } + if (maximumAdaptiveFetchSize < actualSize) { + return maximumAdaptiveFetchSize; + } else { + return actualSize; + } + } + + /** + * Get state of adaptive fetch. + * + * @return state of adaptive fetch + */ + public boolean getAdaptiveFetch() { + return adaptiveFetch; + } + + /** + * Set state of adaptive fetch. + * + * @param adaptiveFetch desired state of adaptive fetch + */ + public void setAdaptiveFetch(boolean adaptiveFetch) { + this.adaptiveFetch = adaptiveFetch; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java new file mode 100644 index 0000000..97a12ff --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3.adaptivefetch; + +public class AdaptiveFetchCacheEntry { + + private int size = -1; // Holds information about adaptive fetch size for query + private int counter; // Number of queries in execution using that query info + private int maximumRowSizeBytes = -1; // Maximum row size in bytes saved for query so far + + public int getSize() { + return size; + } + + public void setSize(int size) { + this.size = size; + } + + public int getCounter() { + return counter; + } + + public void setCounter(int counter) { + this.counter = counter; + } + + public int getMaximumRowSizeBytes() { + return maximumRowSizeBytes; + } + + public void setMaximumRowSizeBytes(int maximumRowSizeBytes) { + this.maximumRowSizeBytes = maximumRowSizeBytes; + } + + public void incrementCounter() { + counter++; + } + + public void decrementCounter() { + counter--; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java new file mode 100644 index 0000000..f1fb9fa --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3.replication; + +import org.postgresql.copy.CopyDual; +import org.postgresql.replication.LogSequenceNumber; +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.ReplicationType; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.net.SocketTimeoutException; +import java.nio.ByteBuffer; +import java.sql.SQLException; +import java.util.Date; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class V3PGReplicationStream implements PGReplicationStream { + + private static final Logger LOGGER = Logger.getLogger(V3PGReplicationStream.class.getName()); + public static final long POSTGRES_EPOCH_2000_01_01 = 946684800000L; + private static final long NANOS_PER_MILLISECOND = 1000000L; + + private final CopyDual copyDual; + private final long updateInterval; + private final ReplicationType replicationType; + private long lastStatusUpdate; + private boolean closeFlag; + + private LogSequenceNumber lastServerLSN = LogSequenceNumber.INVALID_LSN; + /** + * Last receive LSN + payload size. + */ + private volatile LogSequenceNumber lastReceiveLSN = LogSequenceNumber.INVALID_LSN; + private volatile LogSequenceNumber lastAppliedLSN = LogSequenceNumber.INVALID_LSN; + private volatile LogSequenceNumber lastFlushedLSN = LogSequenceNumber.INVALID_LSN; + private volatile LogSequenceNumber startOfLastMessageLSN = LogSequenceNumber.INVALID_LSN; + private volatile LogSequenceNumber explicitlyFlushedLSN = LogSequenceNumber.INVALID_LSN; + + /** + * @param copyDual bidirectional copy protocol + * @param startLSN the position in the WAL that we want to initiate replication from + * usually the currentLSN returned by calling pg_current_wal_lsn()for v10 + * above or pg_current_xlog_location() depending on the version of the + * server + * @param updateIntervalMs the number of millisecond between status packets sent back to the + * server. A value of zero disables the periodic status updates + * completely, although an update will still be sent when requested by the + * server, to avoid timeout disconnect. + * @param replicationType LOGICAL or PHYSICAL + */ + public V3PGReplicationStream(CopyDual copyDual, LogSequenceNumber startLSN, long updateIntervalMs, + ReplicationType replicationType + ) { + this.copyDual = copyDual; + this.updateInterval = updateIntervalMs * NANOS_PER_MILLISECOND; + this.lastStatusUpdate = System.nanoTime() - (updateIntervalMs * NANOS_PER_MILLISECOND); + this.lastReceiveLSN = startLSN; + this.replicationType = replicationType; + } + + @Override + public ByteBuffer read() throws SQLException { + checkClose(); + + ByteBuffer payload = null; + while (payload == null && copyDual.isActive()) { + payload = readInternal(true); + } + + return payload; + } + + @Override + public ByteBuffer readPending() throws SQLException { + checkClose(); + return readInternal(false); + } + + @Override + public LogSequenceNumber getLastReceiveLSN() { + return lastReceiveLSN; + } + + @Override + public LogSequenceNumber getLastFlushedLSN() { + return lastFlushedLSN; + } + + @Override + public LogSequenceNumber getLastAppliedLSN() { + return lastAppliedLSN; + } + + @Override + public void setFlushedLSN(LogSequenceNumber flushed) { + this.lastFlushedLSN = flushed; + } + + @Override + public void setAppliedLSN(LogSequenceNumber applied) { + this.lastAppliedLSN = applied; + } + + @Override + public void forceUpdateStatus() throws SQLException { + checkClose(); + updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, true); + } + + @Override + public boolean isClosed() { + return closeFlag || !copyDual.isActive(); + } + + private ByteBuffer readInternal(boolean block) throws SQLException { + boolean updateStatusRequired = false; + while (copyDual.isActive()) { + + ByteBuffer buffer = receiveNextData(block); + + if (updateStatusRequired || isTimeUpdate()) { + timeUpdateStatus(); + } + + if (buffer == null) { + return null; + } + + int code = buffer.get(); + + switch (code) { + + case 'k': //KeepAlive message + updateStatusRequired = processKeepAliveMessage(buffer); + updateStatusRequired |= updateInterval == 0; + break; + + case 'w': //XLogData + return processXLogData(buffer); + + default: + throw new PSQLException( + GT.tr("Unexpected packet type during replication: {0}", Integer.toString(code)), + PSQLState.PROTOCOL_VIOLATION + ); + } + } + + return null; + } + + private ByteBuffer receiveNextData(boolean block) throws SQLException { + try { + byte[] message = copyDual.readFromCopy(block); + if (message != null) { + return ByteBuffer.wrap(message); + } else { + return null; + } + } catch (PSQLException e) { //todo maybe replace on thread sleep? + if (e.getCause() instanceof SocketTimeoutException) { + //signal for keep alive + return null; + } + + throw e; + } + } + + private boolean isTimeUpdate() { + /* a value of 0 disables automatic updates */ + if ( updateInterval == 0 ) { + return false; + } + long diff = System.nanoTime() - lastStatusUpdate; + return diff >= updateInterval; + } + + private void timeUpdateStatus() throws SQLException { + updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, false); + } + + private void updateStatusInternal( + LogSequenceNumber received, LogSequenceNumber flushed, LogSequenceNumber applied, + boolean replyRequired) + throws SQLException { + byte[] reply = prepareUpdateStatus(received, flushed, applied, replyRequired); + copyDual.writeToCopy(reply, 0, reply.length); + copyDual.flushCopy(); + + explicitlyFlushedLSN = flushed; + lastStatusUpdate = System.nanoTime(); + } + + private byte[] prepareUpdateStatus(LogSequenceNumber received, LogSequenceNumber flushed, + LogSequenceNumber applied, boolean replyRequired) { + ByteBuffer byteBuffer = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1); + + long now = System.nanoTime() / NANOS_PER_MILLISECOND; + long systemClock = TimeUnit.MICROSECONDS.convert((now - POSTGRES_EPOCH_2000_01_01), + TimeUnit.MICROSECONDS); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " FE=> StandbyStatusUpdate(received: {0}, flushed: {1}, applied: {2}, clock: {3})", + new Object[]{received.asString(), flushed.asString(), applied.asString(), new Date(now)}); + } + + byteBuffer.put((byte) 'r'); + byteBuffer.putLong(received.asLong()); + byteBuffer.putLong(flushed.asLong()); + byteBuffer.putLong(applied.asLong()); + byteBuffer.putLong(systemClock); + if (replyRequired) { + byteBuffer.put((byte) 1); + } else { + byteBuffer.put(received == LogSequenceNumber.INVALID_LSN ? (byte) 1 : (byte) 0); + } + + lastStatusUpdate = now; + return byteBuffer.array(); + } + + private boolean processKeepAliveMessage(ByteBuffer buffer) { + lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong()); + if (lastServerLSN.asLong() > lastReceiveLSN.asLong()) { + lastReceiveLSN = lastServerLSN; + } + // if the client has confirmed flush of last XLogData msg and KeepAlive shows ServerLSN is still + // advancing, we can safely advance FlushLSN to ServerLSN + if (explicitlyFlushedLSN.asLong() >= startOfLastMessageLSN.asLong() + && lastServerLSN.asLong() > explicitlyFlushedLSN.asLong() + && lastServerLSN.asLong() > lastFlushedLSN.asLong()) { + lastFlushedLSN = lastServerLSN; + } + + long lastServerClock = buffer.getLong(); + + boolean replyRequired = buffer.get() != 0; + + if (LOGGER.isLoggable(Level.FINEST)) { + Date clockTime = new Date( + TimeUnit.MILLISECONDS.convert(lastServerClock, TimeUnit.MICROSECONDS) + + POSTGRES_EPOCH_2000_01_01); + LOGGER.log(Level.FINEST, " <=BE Keepalive(lastServerWal: {0}, clock: {1} needReply: {2})", + new Object[]{lastServerLSN.asString(), clockTime, replyRequired}); + } + + return replyRequired; + } + + private ByteBuffer processXLogData(ByteBuffer buffer) { + long startLsn = buffer.getLong(); + startOfLastMessageLSN = LogSequenceNumber.valueOf(startLsn); + lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong()); + long systemClock = buffer.getLong(); + + if (replicationType == ReplicationType.LOGICAL) { + lastReceiveLSN = LogSequenceNumber.valueOf(startLsn); + } else if (replicationType == ReplicationType.PHYSICAL) { + int payloadSize = buffer.limit() - buffer.position(); + lastReceiveLSN = LogSequenceNumber.valueOf(startLsn + payloadSize); + } + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " <=BE XLogData(currWal: {0}, lastServerWal: {1}, clock: {2})", + new Object[]{lastReceiveLSN.asString(), lastServerLSN.asString(), systemClock}); + } + + return buffer.slice(); + } + + private void checkClose() throws PSQLException { + if (isClosed()) { + throw new PSQLException(GT.tr("This replication stream has been closed."), + PSQLState.CONNECTION_DOES_NOT_EXIST); + } + } + + @Override + public void close() throws SQLException { + if (isClosed()) { + return; + } + + LOGGER.log(Level.FINEST, " FE=> StopReplication"); + + copyDual.endCopy(); + + closeFlag = true; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java new file mode 100644 index 0000000..c522447 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3.replication; + +import org.postgresql.copy.CopyDual; +import org.postgresql.core.PGStream; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.ReplicationProtocol; +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.ReplicationType; +import org.postgresql.replication.fluent.CommonOptions; +import org.postgresql.replication.fluent.logical.LogicalReplicationOptions; +import org.postgresql.replication.fluent.physical.PhysicalReplicationOptions; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class V3ReplicationProtocol implements ReplicationProtocol { + + private static final Logger LOGGER = Logger.getLogger(V3ReplicationProtocol.class.getName()); + private final QueryExecutor queryExecutor; + private final PGStream pgStream; + + public V3ReplicationProtocol(QueryExecutor queryExecutor, PGStream pgStream) { + this.queryExecutor = queryExecutor; + this.pgStream = pgStream; + } + + @Override + public PGReplicationStream startLogical(LogicalReplicationOptions options) + throws SQLException { + + String query = createStartLogicalQuery(options); + return initializeReplication(query, options, ReplicationType.LOGICAL); + } + + @Override + public PGReplicationStream startPhysical(PhysicalReplicationOptions options) + throws SQLException { + + String query = createStartPhysicalQuery(options); + return initializeReplication(query, options, ReplicationType.PHYSICAL); + } + + private PGReplicationStream initializeReplication(String query, CommonOptions options, + ReplicationType replicationType) + throws SQLException { + LOGGER.log(Level.FINEST, " FE=> StartReplication(query: {0})", query); + + configureSocketTimeout(options); + CopyDual copyDual = (CopyDual) queryExecutor.startCopy(query, true); + + return new V3PGReplicationStream( + copyDual, + options.getStartLSNPosition(), + options.getStatusInterval(), + replicationType + ); + } + + /** + * START_REPLICATION [SLOT slot_name] [PHYSICAL] XXX/XXX. + */ + private String createStartPhysicalQuery(PhysicalReplicationOptions options) { + StringBuilder builder = new StringBuilder(); + builder.append("START_REPLICATION"); + + if (options.getSlotName() != null) { + builder.append(" SLOT ").append(options.getSlotName()); + } + + builder.append(" PHYSICAL ").append(options.getStartLSNPosition().asString()); + + return builder.toString(); + } + + /** + * START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [option_value] [, ... ] ) ] + */ + private String createStartLogicalQuery(LogicalReplicationOptions options) { + StringBuilder builder = new StringBuilder(); + builder.append("START_REPLICATION SLOT ") + .append(options.getSlotName()) + .append(" LOGICAL ") + .append(options.getStartLSNPosition().asString()); + + Properties slotOptions = options.getSlotOptions(); + if (slotOptions.isEmpty()) { + return builder.toString(); + } + + //todo replace on java 8 + builder.append(" ("); + boolean isFirst = true; + for (String name : slotOptions.stringPropertyNames()) { + if (isFirst) { + isFirst = false; + } else { + builder.append(", "); + } + builder.append('\"').append(name).append('\"').append(" ") + .append('\'').append(slotOptions.getProperty(name)).append('\''); + } + builder.append(")"); + + return builder.toString(); + } + + private void configureSocketTimeout(CommonOptions options) throws PSQLException { + if (options.getStatusInterval() == 0) { + return; + } + + try { + int previousTimeOut = pgStream.getSocket().getSoTimeout(); + + int minimalTimeOut; + if (previousTimeOut > 0) { + minimalTimeOut = Math.min(previousTimeOut, options.getStatusInterval()); + } else { + minimalTimeOut = options.getStatusInterval(); + } + + pgStream.getSocket().setSoTimeout(minimalTimeOut); + // Use blocking 1ms reads for `available()` checks + pgStream.setMinStreamAvailableCheckDelay(0); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("The connection attempt failed."), + PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java new file mode 100644 index 0000000..44edf9a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ds; + +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.util.DriverInfo; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.sql.SQLException; + +import javax.sql.ConnectionPoolDataSource; +import javax.sql.PooledConnection; + +/** + * PostgreSQL implementation of ConnectionPoolDataSource. The app server or middleware vendor should + * provide a DataSource implementation that takes advantage of this ConnectionPoolDataSource. If + * not, you can use the PostgreSQL implementation known as PoolingDataSource, but that should only + * be used if your server or middleware vendor does not provide their own. Why? The server may want + * to reuse the same Connection across all EJBs requesting a Connection within the same Transaction, + * or provide other similar advanced features. + * + *

+ * In any case, in order to use this ConnectionPoolDataSource, you must set the property + * databaseName. The settings for serverName, portNumber, user, and password are optional. Note: + * these properties are declared in the superclass. + *

+ * + *

+ * This implementation supports JDK 1.3 and higher. + *

+ * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +@SuppressWarnings("serial") +public class PGConnectionPoolDataSource extends BaseDataSource + implements ConnectionPoolDataSource, Serializable { + private boolean defaultAutoCommit = true; + + /** + * Gets a description of this DataSource. + */ + @Override + public String getDescription() { + return "ConnectionPoolDataSource from " + DriverInfo.DRIVER_FULL_NAME; + } + + /** + * Gets a connection which may be pooled by the app server or middleware implementation of + * DataSource. + * + * @throws java.sql.SQLException Occurs when the physical database connection cannot be + * established. + */ + @Override + public PooledConnection getPooledConnection() throws SQLException { + return new PGPooledConnection(getConnection(), defaultAutoCommit); + } + + /** + * Gets a connection which may be pooled by the app server or middleware implementation of + * DataSource. + * + * @throws java.sql.SQLException Occurs when the physical database connection cannot be + * established. + */ + @Override + public PooledConnection getPooledConnection(String user, String password) throws SQLException { + return new PGPooledConnection(getConnection(user, password), defaultAutoCommit); + } + + /** + * Gets whether connections supplied by this pool will have autoCommit turned on by default. The + * default value is {@code true}, so that autoCommit will be turned on by default. + * + * @return true if connections supplied by this pool will have autoCommit + */ + public boolean isDefaultAutoCommit() { + return defaultAutoCommit; + } + + /** + * Sets whether connections supplied by this pool will have autoCommit turned on by default. The + * default value is {@code true}, so that autoCommit will be turned on by default. + * + * @param defaultAutoCommit whether connections supplied by this pool will have autoCommit + */ + public void setDefaultAutoCommit(boolean defaultAutoCommit) { + this.defaultAutoCommit = defaultAutoCommit; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + writeBaseObject(out); + out.writeBoolean(defaultAutoCommit); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + readBaseObject(in); + defaultAutoCommit = in.readBoolean(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java b/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java new file mode 100644 index 0000000..147e7bc --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java @@ -0,0 +1,463 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group. + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ds; + +import org.postgresql.PGConnection; +import org.postgresql.PGStatement; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.LinkedList; +import java.util.List; + +import javax.sql.ConnectionEvent; +import javax.sql.ConnectionEventListener; +import javax.sql.PooledConnection; +import javax.sql.StatementEventListener; + +/** + * PostgreSQL implementation of the PooledConnection interface. This shouldn't be used directly, as + * the pooling client should just interact with the ConnectionPool instead. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + * @author Csaba Nagy (ncsaba@yahoo.com) + * @see org.postgresql.ds.PGConnectionPoolDataSource + */ +@SuppressWarnings("rawtypes") +public class PGPooledConnection implements PooledConnection { + private final List listeners = new LinkedList<>(); + private Connection con; + private ConnectionHandler last; + private final boolean autoCommit; + private final boolean isXA; + + /** + * Creates a new PooledConnection representing the specified physical connection. + * + * @param con connection + * @param autoCommit whether to autocommit + * @param isXA whether connection is a XA connection + */ + public PGPooledConnection(Connection con, boolean autoCommit, boolean isXA) { + this.con = con; + this.autoCommit = autoCommit; + this.isXA = isXA; + } + + public PGPooledConnection(Connection con, boolean autoCommit) { + this(con, autoCommit, false); + } + + /** + * Adds a listener for close or fatal error events on the connection handed out to a client. + */ + @Override + public void addConnectionEventListener(ConnectionEventListener connectionEventListener) { + listeners.add(connectionEventListener); + } + + /** + * Removes a listener for close or fatal error events on the connection handed out to a client. + */ + @Override + public void removeConnectionEventListener(ConnectionEventListener connectionEventListener) { + listeners.remove(connectionEventListener); + } + + /** + * Closes the physical database connection represented by this PooledConnection. If any client has + * a connection based on this PooledConnection, it is forcibly closed as well. + */ + @Override + public void close() throws SQLException { + Connection con = this.con; + ConnectionHandler last = this.last; + if (last != null) { + last.close(); + if (con != null && !con.isClosed()) { + if (!con.getAutoCommit()) { + try { + con.rollback(); + } catch (SQLException ignored) { + } + } + } + } + if (con == null) { + return; + } + try { + con.close(); + } finally { + this.con = null; + } + } + + /** + * Gets a handle for a client to use. This is a wrapper around the physical connection, so the + * client can call close and it will just return the connection to the pool without really closing + * the physical connection. + * + *

+ * According to the JDBC 2.0 Optional Package spec (6.2.3), only one client may have an active + * handle to the connection at a time, so if there is a previous handle active when this is + * called, the previous one is forcibly closed and its work rolled back. + *

+ */ + @Override + public Connection getConnection() throws SQLException { + Connection con = this.con; + if (con == null) { + // Before throwing the exception, let's notify the registered listeners about the error + PSQLException sqlException = + new PSQLException(GT.tr("This PooledConnection has already been closed."), + PSQLState.CONNECTION_DOES_NOT_EXIST); + fireConnectionFatalError(sqlException); + throw sqlException; + } + // If any error occurs while opening a new connection, the listeners + // have to be notified. This gives a chance to connection pools to + // eliminate bad pooled connections. + try { + // Only one connection can be open at a time from this PooledConnection. See JDBC 2.0 Optional + // Package spec section 6.2.3 + ConnectionHandler last = this.last; + if (last != null) { + last.close(); + if (con != null) { + if (!con.getAutoCommit()) { + try { + con.rollback(); + } catch (SQLException ignored) { + } + } + con.clearWarnings(); + } + } + /* + * In XA-mode, autocommit is handled in PGXAConnection, because it depends on whether an + * XA-transaction is open or not + */ + if (!isXA && con != null) { + con.setAutoCommit(autoCommit); + } + } catch (SQLException sqlException) { + fireConnectionFatalError(sqlException); + throw (SQLException) sqlException.fillInStackTrace(); + } + ConnectionHandler handler = new ConnectionHandler(con); + last = handler; + + Connection proxyCon = (Connection) Proxy.newProxyInstance(getClass().getClassLoader(), + new Class[]{Connection.class, PGConnection.class}, handler); + handler.setProxy(proxyCon); + return proxyCon; + } + + /** + * Used to fire a connection closed event to all listeners. + */ + void fireConnectionClosed() { + ConnectionEvent evt = null; + // Copy the listener list so the listener can remove itself during this method call + ConnectionEventListener[] local = + listeners.toArray(new ConnectionEventListener[0]); + for (ConnectionEventListener listener : local) { + if (evt == null) { + evt = createConnectionEvent(null); + } + listener.connectionClosed(evt); + } + } + + /** + * Used to fire a connection error event to all listeners. + */ + void fireConnectionFatalError(SQLException e) { + ConnectionEvent evt = null; + // Copy the listener list so the listener can remove itself during this method call + ConnectionEventListener[] local = + listeners.toArray(new ConnectionEventListener[0]); + for (ConnectionEventListener listener : local) { + if (evt == null) { + evt = createConnectionEvent(e); + } + listener.connectionErrorOccurred(evt); + } + } + + protected ConnectionEvent createConnectionEvent(SQLException e) { + return e == null ? new ConnectionEvent(this) : new ConnectionEvent(this, e); + } + + // Classes we consider fatal. + private static final String[] fatalClasses = { + "08", // connection error + "53", // insufficient resources + + // nb: not just "57" as that includes query cancel which is nonfatal + "57P01", // admin shutdown + "57P02", // crash shutdown + "57P03", // cannot connect now + + "58", // system error (backend) + "60", // system error (driver) + "99", // unexpected error + "F0", // configuration file error (backend) + "XX", // internal error (backend) + }; + + private static boolean isFatalState(String state) { + if (state == null) { + // no info, assume fatal + return true; + } + if (state.length() < 2) { + // no class info, assume fatal + return true; + } + + for (String fatalClass : fatalClasses) { + if (state.startsWith(fatalClass)) { + return true; // fatal + } + } + + return false; + } + + /** + * Fires a connection error event, but only if we think the exception is fatal. + * + * @param e the SQLException to consider + */ + private void fireConnectionError(SQLException e) { + if (!isFatalState(e.getSQLState())) { + return; + } + + fireConnectionFatalError(e); + } + + /** + * Instead of declaring a class implementing Connection, which would have to be updated for every + * JDK rev, use a dynamic proxy to handle all calls through the Connection interface. This is the + * part that requires JDK 1.3 or higher, though JDK 1.2 could be supported with a 3rd-party proxy + * package. + */ + private class ConnectionHandler implements InvocationHandler { + private Connection con; + private Connection proxy; // the Connection the client is currently using, which is a proxy + private boolean automatic; + + ConnectionHandler(Connection con) { + this.con = con; + } + + @Override + @SuppressWarnings("throwing.nullable") + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + final String methodName = method.getName(); + // From Object + if (method.getDeclaringClass() == Object.class) { + if ("toString".equals(methodName)) { + return "Pooled connection wrapping physical connection " + con; + } + if ("equals".equals(methodName)) { + return proxy == args[0]; + } + if ("hashCode".equals(methodName)) { + return System.identityHashCode(proxy); + } + try { + return method.invoke(con, args); + } catch (InvocationTargetException e) { + // throwing.nullable + throw e.getTargetException(); + } + } + + // All the rest is from the Connection or PGConnection interface + Connection con = this.con; + if ("isClosed".equals(methodName)) { + return con == null || con.isClosed(); + } + if ("close".equals(methodName)) { + // we are already closed and a double close + // is not an error. + if (con == null) { + return null; + } + + SQLException ex = null; + if (!con.isClosed()) { + if (!isXA && !con.getAutoCommit()) { + try { + con.rollback(); + } catch (SQLException e) { + ex = e; + } + } + con.clearWarnings(); + } + this.con = null; + this.proxy = null; + last = null; + fireConnectionClosed(); + if (ex != null) { + throw ex; + } + return null; + } + if (con == null || con.isClosed()) { + throw new PSQLException(automatic + ? GT.tr( + "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.") + : GT.tr("Connection has been closed."), PSQLState.CONNECTION_DOES_NOT_EXIST); + } + + // From here on in, we invoke via reflection, catch exceptions, + // and check if they're fatal before rethrowing. + try { + if ("createStatement".equals(methodName)) { + Statement st = (Statement) method.invoke(con, args); + return Proxy.newProxyInstance(getClass().getClassLoader(), + new Class[]{Statement.class, PGStatement.class}, + new StatementHandler(this, st)); + } else if ("prepareCall".equals(methodName)) { + Statement st = (Statement) method.invoke(con, args); + return Proxy.newProxyInstance(getClass().getClassLoader(), + new Class[]{CallableStatement.class, PGStatement.class}, + new StatementHandler(this, st)); + } else if ("prepareStatement".equals(methodName)) { + Statement st = (Statement) method.invoke(con, args); + return Proxy.newProxyInstance(getClass().getClassLoader(), + new Class[]{PreparedStatement.class, PGStatement.class}, + new StatementHandler(this, st)); + } else { + return method.invoke(con, args); + } + } catch (final InvocationTargetException ite) { + final Throwable te = ite.getTargetException(); + if (te instanceof SQLException) { + fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal + } + throw te; + } + } + + Connection getProxy() { + return proxy; + } + + void setProxy(Connection proxy) { + this.proxy = proxy; + } + + public void close() { + if (con != null) { + automatic = true; + } + con = null; + proxy = null; + // No close event fired here: see JDBC 2.0 Optional Package spec section 6.3 + } + + public boolean isClosed() { + return con == null; + } + } + + /** + *

Instead of declaring classes implementing Statement, PreparedStatement, and CallableStatement, + * which would have to be updated for every JDK rev, use a dynamic proxy to handle all calls + * through the Statement interfaces. This is the part that requires JDK 1.3 or higher, though JDK + * 1.2 could be supported with a 3rd-party proxy package.

+ * + *

The StatementHandler is required in order to return the proper Connection proxy for the + * getConnection method.

+ */ + private class StatementHandler implements InvocationHandler { + private ConnectionHandler con; + private Statement st; + + StatementHandler(ConnectionHandler con, Statement st) { + this.con = con; + this.st = st; + } + + @Override + @SuppressWarnings("throwing.nullable") + public Object invoke(Object proxy, Method method, Object[] args) + throws Throwable { + final String methodName = method.getName(); + // From Object + if (method.getDeclaringClass() == Object.class) { + if ("toString".equals(methodName)) { + return "Pooled statement wrapping physical statement " + st; + } + if ("hashCode".equals(methodName)) { + return System.identityHashCode(proxy); + } + if ("equals".equals(methodName)) { + return proxy == args[0]; + } + return method.invoke(st, args); + } + + Statement st = this.st; + // All the rest is from the Statement interface + if ("isClosed".equals(methodName)) { + return st == null || st.isClosed(); + } + if ("close".equals(methodName)) { + if (st == null || st.isClosed()) { + return null; + } + con = null; + this.st = null; + st.close(); + return null; + } + if (st == null || st.isClosed()) { + throw new PSQLException(GT.tr("Statement has been closed."), PSQLState.OBJECT_NOT_IN_STATE); + } + if ("getConnection".equals(methodName)) { + return con.getProxy(); // the proxied connection, not a physical connection + } + + // Delegate the call to the proxied Statement. + try { + return method.invoke(st, args); + } catch (final InvocationTargetException ite) { + final Throwable te = ite.getTargetException(); + if (te instanceof SQLException) { + fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal + } + throw te; + } + } + } + + @Override + public void removeStatementEventListener(StatementEventListener listener) { + } + + @Override + public void addStatementEventListener(StatementEventListener listener) { + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java new file mode 100644 index 0000000..4743bb9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ds; + +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.util.DriverInfo; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Stack; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; + +import javax.naming.NamingException; +import javax.naming.Reference; +import javax.naming.StringRefAddr; +import javax.sql.ConnectionEvent; +import javax.sql.ConnectionEventListener; +import javax.sql.DataSource; +import javax.sql.PooledConnection; + +/** + * DataSource which uses connection pooling. Don't use this if your + * server/middleware vendor provides a connection pooling implementation which interfaces with the + * PostgreSQL ConnectionPoolDataSource implementation! This class is provided as a + * convenience, but the JDBC Driver is really not supposed to handle the connection pooling + * algorithm. Instead, the server or middleware product is supposed to handle the mechanics of + * connection pooling, and use the PostgreSQL implementation of ConnectionPoolDataSource to provide + * the connections to pool. + * + *

+ * If you're sure you want to use this, then you must set the properties dataSourceName, + * databaseName, user, and password (if required for the user). The settings for serverName, + * portNumber, initialConnections, and maxConnections are optional. Note that only connections + * for the default user will be pooled! Connections for other users will be normal non-pooled + * connections, and will not count against the maximum pool size limit. + *

+ * + *

+ * If you put this DataSource in JNDI, and access it from different JVMs (or otherwise load this + * class from different ClassLoaders), you'll end up with one pool per ClassLoader or VM. This is + * another area where a server-specific implementation may provide advanced features, such as using + * a single pool across all VMs in a cluster. + *

+ * + *

+ * This implementation supports JDK 1.5 and higher. + *

+ * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + * + * @deprecated Since 42.0.0, instead of this class you should use a fully featured connection pool + * like HikariCP, vibur-dbcp, commons-dbcp, c3p0, etc. + */ +@SuppressWarnings("try") +@Deprecated +public class PGPoolingDataSource extends BaseDataSource implements DataSource { + protected static ConcurrentMap dataSources = + new ConcurrentHashMap<>(); + + public static PGPoolingDataSource getDataSource(String name) { + return dataSources.get(name); + } + + // Additional Data Source properties + protected String dataSourceName; // Must be protected for subclasses to sync updates to it + private int initialConnections; + private int maxConnections; + // State variables + private boolean initialized; + private final Stack available = new Stack<>(); + private final Stack used = new Stack<>(); + private boolean isClosed; + private final ResourceLock lock = new ResourceLock(); + private final Condition lockCondition = lock.newCondition(); + private PGConnectionPoolDataSource source; + + /** + * Gets a description of this DataSource. + */ + @Override + public String getDescription() { + return "Pooling DataSource '" + dataSourceName + " from " + DriverInfo.DRIVER_FULL_NAME; + } + + /** + * Ensures the DataSource properties are not changed after the DataSource has been used. + * + * @throws IllegalStateException The Server Name cannot be changed after the DataSource has been + * used. + */ + @Override + public void setServerName(String serverName) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + super.setServerName(serverName); + } + + /** + * Ensures the DataSource properties are not changed after the DataSource has been used. + * + * @throws IllegalStateException The Database Name cannot be changed after the DataSource has been + * used. + */ + @Override + public void setDatabaseName(String databaseName) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + super.setDatabaseName(databaseName); + } + + /** + * Ensures the DataSource properties are not changed after the DataSource has been used. + * + * @throws IllegalStateException The User cannot be changed after the DataSource has been used. + */ + @Override + public void setUser(String user) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + super.setUser(user); + } + + /** + * Ensures the DataSource properties are not changed after the DataSource has been used. + * + * @throws IllegalStateException The Password cannot be changed after the DataSource has been + * used. + */ + @Override + public void setPassword(String password) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + super.setPassword(password); + } + + /** + * Ensures the DataSource properties are not changed after the DataSource has been used. + * + * @throws IllegalStateException The Port Number cannot be changed after the DataSource has been + * used. + */ + @Override + public void setPortNumber(int portNumber) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + super.setPortNumber(portNumber); + } + + /** + * Gets the number of connections that will be created when this DataSource is initialized. If you + * do not call initialize explicitly, it will be initialized the first time a connection is drawn + * from it. + * + * @return number of connections that will be created when this DataSource is initialized + */ + public int getInitialConnections() { + return initialConnections; + } + + /** + * Sets the number of connections that will be created when this DataSource is initialized. If you + * do not call initialize explicitly, it will be initialized the first time a connection is drawn + * from it. + * + * @param initialConnections number of initial connections + * @throws IllegalStateException The Initial Connections cannot be changed after the DataSource + * has been used. + */ + public void setInitialConnections(int initialConnections) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + this.initialConnections = initialConnections; + } + + /** + * Gets the maximum number of connections that the pool will allow. If a request comes in and this + * many connections are in use, the request will block until a connection is available. Note that + * connections for a user other than the default user will not be pooled and don't count against + * this limit. + * + * @return The maximum number of pooled connection allowed, or 0 for no maximum. + */ + public int getMaxConnections() { + return maxConnections; + } + + /** + * Sets the maximum number of connections that the pool will allow. If a request comes in and this + * many connections are in use, the request will block until a connection is available. Note that + * connections for a user other than the default user will not be pooled and don't count against + * this limit. + * + * @param maxConnections The maximum number of pooled connection to allow, or 0 for no maximum. + * @throws IllegalStateException The Maximum Connections cannot be changed after the DataSource + * has been used. + */ + public void setMaxConnections(int maxConnections) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + this.maxConnections = maxConnections; + } + + /** + * Gets the name of this DataSource. This uniquely identifies the DataSource. You cannot use more + * than one DataSource in the same VM with the same name. + * + * @return name of this DataSource + */ + public String getDataSourceName() { + return dataSourceName; + } + + /** + * Sets the name of this DataSource. This is required, and uniquely identifies the DataSource. You + * cannot create or use more than one DataSource in the same VM with the same name. + * + * @param dataSourceName datasource name + * @throws IllegalStateException The Data Source Name cannot be changed after the DataSource has + * been used. + * @throws IllegalArgumentException Another PoolingDataSource with the same dataSourceName already + * exists. + */ + public void setDataSourceName(String dataSourceName) { + if (initialized) { + throw new IllegalStateException( + "Cannot set Data Source properties after DataSource has been used"); + } + if (this.dataSourceName != null && dataSourceName != null + && dataSourceName.equals(this.dataSourceName)) { + return; + } + PGPoolingDataSource previous = dataSources.putIfAbsent(dataSourceName, this); + if (previous != null) { + throw new IllegalArgumentException( + "DataSource with name '" + dataSourceName + "' already exists!"); + } + if (this.dataSourceName != null) { + dataSources.remove(this.dataSourceName); + } + this.dataSourceName = dataSourceName; + } + + /** + * Initializes this DataSource. If the initialConnections is greater than zero, that number of + * connections will be created. After this method is called, the DataSource properties cannot be + * changed. If you do not call this explicitly, it will be called the first time you get a + * connection from the DataSource. + * + * @throws SQLException Occurs when the initialConnections is greater than zero, but the + * DataSource is not able to create enough physical connections. + */ + public void initialize() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + PGConnectionPoolDataSource source = createConnectionPool(); + this.source = source; + try { + source.initializeFrom(this); + } catch (Exception e) { + throw new PSQLException(GT.tr("Failed to setup DataSource."), PSQLState.UNEXPECTED_ERROR, + e); + } + + while (available.size() < initialConnections) { + available.push(source.getPooledConnection()); + } + + initialized = true; + } + } + + protected boolean isInitialized() { + return initialized; + } + + /** + * Creates the appropriate ConnectionPool to use for this DataSource. + * + * @return appropriate ConnectionPool to use for this DataSource + */ + protected PGConnectionPoolDataSource createConnectionPool() { + return new PGConnectionPoolDataSource(); + } + + /** + * Gets a non-pooled connection, unless the user and password are the same as the default + * values for this connection pool. + * + * @return A pooled connection. + * @throws SQLException Occurs when no pooled connection is available, and a new physical + * connection cannot be created. + */ + @Override + public Connection getConnection(String user, String password) + throws SQLException { + // If this is for the default user/password, use a pooled connection + if (user == null || (user.equals(getUser()) && ((password == null && getPassword() == null) + || (password != null && password.equals(getPassword()))))) { + return getConnection(); + } + // Otherwise, use a non-pooled connection + if (!initialized) { + initialize(); + } + return super.getConnection(user, password); + } + + /** + * Gets a connection from the connection pool. + * + * @return A pooled connection. + * @throws SQLException Occurs when no pooled connection is available, and a new physical + * connection cannot be created. + */ + @Override + public Connection getConnection() throws SQLException { + if (!initialized) { + initialize(); + } + return getPooledConnection(); + } + + /** + * Closes this DataSource, and all the pooled connections, whether in use or not. + */ + public void close() { + try (ResourceLock ignore = lock.obtain()) { + isClosed = true; + while (!available.isEmpty()) { + PooledConnection pci = available.pop(); + try { + pci.close(); + } catch (SQLException ignored) { + } + } + while (!used.isEmpty()) { + PooledConnection pci = used.pop(); + pci.removeConnectionEventListener(connectionEventListener); + try { + pci.close(); + } catch (SQLException ignored) { + } + } + } + removeStoredDataSource(); + } + + protected void removeStoredDataSource() { + dataSources.remove(dataSourceName); + } + + protected void addDataSource(String dataSourceName) { + dataSources.put(dataSourceName, this); + } + + /** + * Gets a connection from the pool. Will get an available one if present, or create a new one if + * under the max limit. Will block if all used and a new one would exceed the max. + */ + private Connection getPooledConnection() throws SQLException { + PooledConnection pc = null; + try (ResourceLock ignore = lock.obtain()) { + if (isClosed) { + throw new PSQLException(GT.tr("DataSource has been closed."), + PSQLState.CONNECTION_DOES_NOT_EXIST); + } + while (true) { + if (!available.isEmpty()) { + pc = available.pop(); + used.push(pc); + break; + } + if (maxConnections == 0 || used.size() < maxConnections) { + pc = source.getPooledConnection(); + used.push(pc); + break; + } else { + try { + // Wake up every second at a minimum + lockCondition.await(1000L, TimeUnit.MILLISECONDS); + } catch (InterruptedException ignored) { + } + } + } + } + pc.addConnectionEventListener(connectionEventListener); + return pc.getConnection(); + } + + /** + * Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection. + * This is the only way connections are marked as unused. + */ + private final ConnectionEventListener connectionEventListener = new ConnectionEventListener() { + @Override + public void connectionClosed(ConnectionEvent event) { + ((PooledConnection) event.getSource()).removeConnectionEventListener(this); + try (ResourceLock ignore = lock.obtain()) { + if (isClosed) { + return; // DataSource has been closed + } + boolean removed = used.remove(event.getSource()); + if (removed) { + available.push((PooledConnection) event.getSource()); + // There's now a new connection available + lockCondition.signal(); + } else { + // a connection error occurred + } + } + } + + /** + * This is only called for fatal errors, where the physical connection is useless afterward and + * should be removed from the pool. + */ + @Override + public void connectionErrorOccurred(ConnectionEvent event) { + ((PooledConnection) event.getSource()).removeConnectionEventListener(this); + try (ResourceLock ignore = lock.obtain()) { + if (isClosed) { + return; // DataSource has been closed + } + used.remove(event.getSource()); + // We're now at least 1 connection under the max + lockCondition.signal(); + } + } + }; + + /** + * Adds custom properties for this DataSource to the properties defined in the superclass. + */ + @Override + public Reference getReference() throws NamingException { + Reference ref = super.getReference(); + ref.add(new StringRefAddr("dataSourceName", dataSourceName)); + if (initialConnections > 0) { + ref.add(new StringRefAddr("initialConnections", Integer.toString(initialConnections))); + } + if (maxConnections > 0) { + ref.add(new StringRefAddr("maxConnections", Integer.toString(maxConnections))); + } + return ref; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java new file mode 100644 index 0000000..f3865dc --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ds; + +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.util.DriverInfo; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.sql.SQLException; + +import javax.sql.DataSource; + +/** + * Simple DataSource which does not perform connection pooling. In order to use the DataSource, you + * must set the property databaseName. The settings for serverName, portNumber, user, and password + * are optional. Note: these properties are declared in the superclass. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +@SuppressWarnings("serial") +public class PGSimpleDataSource extends BaseDataSource implements DataSource, Serializable { + /** + * Gets a description of this DataSource. + */ + @Override + public String getDescription() { + return "Non-Pooling DataSource from " + DriverInfo.DRIVER_FULL_NAME; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + writeBaseObject(out); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + readBaseObject(in); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java new file mode 100644 index 0000000..612c26e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java @@ -0,0 +1,1848 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ds.common; + +import org.postgresql.Driver; +import org.postgresql.PGProperty; +import org.postgresql.jdbc.AutoSave; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.util.ExpressionProperties; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.URLCoder; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.naming.NamingException; +import javax.naming.RefAddr; +import javax.naming.Reference; +import javax.naming.Referenceable; +import javax.naming.StringRefAddr; +import javax.sql.CommonDataSource; + +/** + * Base class for data sources and related classes. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ + +public abstract class BaseDataSource implements CommonDataSource, Referenceable { + private static final Logger LOGGER = Logger.getLogger(BaseDataSource.class.getName()); + + // Standard properties, defined in the JDBC 2.0 Optional Package spec + private String[] serverNames = new String[]{"localhost"}; + private String databaseName = ""; + private String user; + private String password; + private int[] portNumbers = new int[]{0}; + + // Map for all other properties + private Properties properties = new Properties(); + + /* + * Ensure the driver is loaded as JDBC Driver might be invisible to Java's ServiceLoader. + * Usually, {@code Class.forName(...)} is not required as {@link DriverManager} detects JDBC drivers + * via {@code META-INF/services/java.sql.Driver} entries. However there might be cases when the driver + * is located at the application level classloader, thus it might be required to perform manual + * registration of the driver. + */ + static { + try { + Class.forName("org.postgresql.Driver"); + } catch (ClassNotFoundException e) { + throw new IllegalStateException( + "BaseDataSource is unable to load org.postgresql.Driver. Please check if you have proper PostgreSQL JDBC Driver jar on the classpath", + e); + } + } + + /** + * Gets a connection to the PostgreSQL database. The database is identified by the DataSource + * properties serverName, databaseName, and portNumber. The user to connect as is identified by + * the DataSource properties user and password. + * + * @return A valid database connection. + * @throws SQLException Occurs when the database connection cannot be established. + */ + public Connection getConnection() throws SQLException { + return getConnection(user, password); + } + + /** + * Gets a connection to the PostgreSQL database. The database is identified by the DataSource + * properties serverName, databaseName, and portNumber. The user to connect as is identified by + * the arguments user and password, which override the DataSource properties by the same name. + * + * @param user user + * @param password password + * @return A valid database connection. + * @throws SQLException Occurs when the database connection cannot be established. + */ + public Connection getConnection(String user, String password) + throws SQLException { + try { + Connection con = DriverManager.getConnection(getUrl(), user, password); + if (LOGGER.isLoggable(Level.FINE)) { + LOGGER.log(Level.FINE, "Created a {0} for {1} at {2}", + new Object[]{getDescription(), user, getUrl()}); + } + return con; + } catch (SQLException e) { + LOGGER.log(Level.FINE, "Failed to create a {0} for {1} at {2}: {3}", + new Object[]{getDescription(), user, getUrl(), e}); + throw e; + } + } + + /** + * This implementation don't use a LogWriter. + */ + @Override + public PrintWriter getLogWriter() { + return null; + } + + /** + * This implementation don't use a LogWriter. + * + * @param printWriter Not used + */ + @Override + public void setLogWriter(PrintWriter printWriter) { + // NOOP + } + + /** + * Gets the name of the host the PostgreSQL database is running on. + * + * @return name of the host the PostgreSQL database is running on + * @deprecated use {@link #getServerNames()} + */ + @Deprecated + public String getServerName() { + return serverNames[0]; + } + + /** + * Gets the name of the host(s) the PostgreSQL database is running on. + * + * @return name of the host(s) the PostgreSQL database is running on + */ + public String[] getServerNames() { + return serverNames; + } + + /** + * Sets the name of the host the PostgreSQL database is running on. If this is changed, it will + * only affect future calls to getConnection. The default value is {@code localhost}. + * + * @param serverName name of the host the PostgreSQL database is running on + * @deprecated use {@link #setServerNames(String[])} + */ + @Deprecated + public void setServerName(String serverName) { + this.setServerNames(new String[]{serverName}); + } + + /** + * Sets the name of the host(s) the PostgreSQL database is running on. If this is changed, it will + * only affect future calls to getConnection. The default value is {@code localhost}. + * + * @param serverNames name of the host(s) the PostgreSQL database is running on + */ + @SuppressWarnings("nullness") + public void setServerNames(String [] serverNames) { + if (serverNames == null || serverNames.length == 0) { + this.serverNames = new String[]{"localhost"}; + } else { + serverNames = serverNames.clone(); + for (int i = 0; i < serverNames.length; i++) { + String serverName = serverNames[i]; + if (serverName == null || "".equals(serverName)) { + serverNames[i] = "localhost"; + } + } + this.serverNames = serverNames; + } + } + + /** + * Gets the name of the PostgreSQL database, running on the server identified by the serverName + * property. + * + * @return name of the PostgreSQL database + */ + public String getDatabaseName() { + return databaseName; + } + + /** + * Sets the name of the PostgreSQL database, running on the server identified by the serverName + * property. If this is changed, it will only affect future calls to getConnection. + * + * @param databaseName name of the PostgreSQL database + */ + public void setDatabaseName(String databaseName) { + this.databaseName = databaseName; + } + + /** + * Gets a description of this DataSource-ish thing. Must be customized by subclasses. + * + * @return description of this DataSource-ish thing + */ + public abstract String getDescription(); + + /** + * Gets the user to connect as by default. If this is not specified, you must use the + * getConnection method which takes a user and password as parameters. + * + * @return user to connect as by default + */ + public String getUser() { + return user; + } + + /** + * Sets the user to connect as by default. If this is not specified, you must use the + * getConnection method which takes a user and password as parameters. If this is changed, it will + * only affect future calls to getConnection. + * + * @param user user to connect as by default + */ + public void setUser(String user) { + this.user = user; + } + + /** + * Gets the password to connect with by default. If this is not specified but a password is needed + * to log in, you must use the getConnection method which takes a user and password as parameters. + * + * @return password to connect with by default + */ + public String getPassword() { + return password; + } + + /** + * Sets the password to connect with by default. If this is not specified but a password is needed + * to log in, you must use the getConnection method which takes a user and password as parameters. + * If this is changed, it will only affect future calls to getConnection. + * + * @param password password to connect with by default + */ + public void setPassword(String password) { + this.password = password; + } + + /** + * Gets the port which the PostgreSQL server is listening on for TCP/IP connections. + * + * @return The port, or 0 if the default port will be used. + * @deprecated use {@link #getPortNumbers()} + */ + @Deprecated + public int getPortNumber() { + if (portNumbers == null || portNumbers.length == 0) { + return 0; + } + return portNumbers[0]; + } + + /** + * Gets the port(s) which the PostgreSQL server is listening on for TCP/IP connections. + * + * @return The port(s), or 0 if the default port will be used. + */ + public int[] getPortNumbers() { + return portNumbers; + } + + /** + * Sets the port which the PostgreSQL server is listening on for TCP/IP connections. Be sure the + * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0, + * the default port will be used. + * + * @param portNumber port which the PostgreSQL server is listening on for TCP/IP + * @deprecated use {@link #setPortNumbers(int[])} + */ + @Deprecated + public void setPortNumber(int portNumber) { + setPortNumbers(new int[]{portNumber}); + } + + /** + * Sets the port(s) which the PostgreSQL server is listening on for TCP/IP connections. Be sure the + * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0, + * the default port will be used. + * + * @param portNumbers port(s) which the PostgreSQL server is listening on for TCP/IP + */ + public void setPortNumbers(int [] portNumbers) { + if (portNumbers == null || portNumbers.length == 0) { + portNumbers = new int[]{0}; + } + this.portNumbers = Arrays.copyOf(portNumbers, portNumbers.length); + } + + /** + * @return command line options for this connection + */ + public String getOptions() { + return PGProperty.OPTIONS.getOrDefault(properties); + } + + /** + * Set command line options for this connection + * + * @param options string to set options to + */ + public void setOptions(String options) { + PGProperty.OPTIONS.set(properties, options); + } + + /** + * @return login timeout + * @see PGProperty#LOGIN_TIMEOUT + */ + @Override + public int getLoginTimeout() { + return PGProperty.LOGIN_TIMEOUT.getIntNoCheck(properties); + } + + /** + * @param loginTimeout login timeout + * @see PGProperty#LOGIN_TIMEOUT + */ + @Override + public void setLoginTimeout(int loginTimeout) { + PGProperty.LOGIN_TIMEOUT.set(properties, loginTimeout); + } + + /** + * @return connect timeout + * @see PGProperty#CONNECT_TIMEOUT + */ + public int getConnectTimeout() { + return PGProperty.CONNECT_TIMEOUT.getIntNoCheck(properties); + } + + /** + * @param connectTimeout connect timeout + * @see PGProperty#CONNECT_TIMEOUT + */ + public void setConnectTimeout(int connectTimeout) { + PGProperty.CONNECT_TIMEOUT.set(properties, connectTimeout); + } + + /** + * + * @return GSS ResponseTimeout + * @see PGProperty#GSS_RESPONSE_TIMEOUT + */ + public int getGssResponseTimeout() { + return PGProperty.GSS_RESPONSE_TIMEOUT.getIntNoCheck(properties); + } + + /** + * + * @param gssResponseTimeout gss response timeout + * @see PGProperty#GSS_RESPONSE_TIMEOUT + */ + public void setGssResponseTimeout(int gssResponseTimeout) { + PGProperty.GSS_RESPONSE_TIMEOUT.set(properties, gssResponseTimeout); + } + + /** + * + * @return SSL ResponseTimeout + * @see PGProperty#SSL_RESPONSE_TIMEOUT + */ + public int getSslResponseTimeout() { + return PGProperty.SSL_RESPONSE_TIMEOUT.getIntNoCheck(properties); + } + + /** + * + * @param sslResponseTimeout ssl response timeout + * @see PGProperty#SSL_RESPONSE_TIMEOUT + */ + public void setSslResponseTimeout(int sslResponseTimeout) { + PGProperty.SSL_RESPONSE_TIMEOUT.set(properties, sslResponseTimeout); + } + + /** + * @return protocol version + * @see PGProperty#PROTOCOL_VERSION + */ + public int getProtocolVersion() { + if (!PGProperty.PROTOCOL_VERSION.isPresent(properties)) { + return 0; + } else { + return PGProperty.PROTOCOL_VERSION.getIntNoCheck(properties); + } + } + + /** + * @param protocolVersion protocol version + * @see PGProperty#PROTOCOL_VERSION + */ + public void setProtocolVersion(int protocolVersion) { + if (protocolVersion == 0) { + PGProperty.PROTOCOL_VERSION.set(properties, null); + } else { + PGProperty.PROTOCOL_VERSION.set(properties, protocolVersion); + } + } + + /** + * @return quoteReturningIdentifiers + * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS + */ + public boolean getQuoteReturningIdentifiers() { + return PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(properties); + } + + /** + * @param quoteIdentifiers indicate whether to quote identifiers + * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS + */ + public void setQuoteReturningIdentifiers(boolean quoteIdentifiers) { + PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(properties, quoteIdentifiers); + } + + /** + * @return receive buffer size + * @see PGProperty#RECEIVE_BUFFER_SIZE + */ + public int getReceiveBufferSize() { + return PGProperty.RECEIVE_BUFFER_SIZE.getIntNoCheck(properties); + } + + /** + * @param nbytes receive buffer size + * @see PGProperty#RECEIVE_BUFFER_SIZE + */ + public void setReceiveBufferSize(int nbytes) { + PGProperty.RECEIVE_BUFFER_SIZE.set(properties, nbytes); + } + + /** + * @return send buffer size + * @see PGProperty#SEND_BUFFER_SIZE + */ + public int getSendBufferSize() { + return PGProperty.SEND_BUFFER_SIZE.getIntNoCheck(properties); + } + + /** + * @param nbytes send buffer size + * @see PGProperty#SEND_BUFFER_SIZE + */ + public void setSendBufferSize(int nbytes) { + PGProperty.SEND_BUFFER_SIZE.set(properties, nbytes); + } + + /** + * @param count prepare threshold + * @see PGProperty#PREPARE_THRESHOLD + */ + public void setPrepareThreshold(int count) { + PGProperty.PREPARE_THRESHOLD.set(properties, count); + } + + /** + * @return prepare threshold + * @see PGProperty#PREPARE_THRESHOLD + */ + public int getPrepareThreshold() { + return PGProperty.PREPARE_THRESHOLD.getIntNoCheck(properties); + } + + /** + * @return prepared statement cache size (number of statements per connection) + * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES + */ + public int getPreparedStatementCacheQueries() { + return PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getIntNoCheck(properties); + } + + /** + * @param cacheSize prepared statement cache size (number of statements per connection) + * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES + */ + public void setPreparedStatementCacheQueries(int cacheSize) { + PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.set(properties, cacheSize); + } + + /** + * @return prepared statement cache size (number of megabytes per connection) + * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB + */ + public int getPreparedStatementCacheSizeMiB() { + return PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getIntNoCheck(properties); + } + + /** + * @param cacheSize statement cache size (number of megabytes per connection) + * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB + */ + public void setPreparedStatementCacheSizeMiB(int cacheSize) { + PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.set(properties, cacheSize); + } + + /** + * @return database metadata cache fields size (number of fields cached per connection) + * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS + */ + public int getDatabaseMetadataCacheFields() { + return PGProperty.DATABASE_METADATA_CACHE_FIELDS.getIntNoCheck(properties); + } + + /** + * @param cacheSize database metadata cache fields size (number of fields cached per connection) + * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS + */ + public void setDatabaseMetadataCacheFields(int cacheSize) { + PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(properties, cacheSize); + } + + /** + * @return database metadata cache fields size (number of megabytes per connection) + * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB + */ + public int getDatabaseMetadataCacheFieldsMiB() { + return PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getIntNoCheck(properties); + } + + /** + * @param cacheSize database metadata cache fields size (number of megabytes per connection) + * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB + */ + public void setDatabaseMetadataCacheFieldsMiB(int cacheSize) { + PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(properties, cacheSize); + } + + /** + * @param fetchSize default fetch size + * @see PGProperty#DEFAULT_ROW_FETCH_SIZE + */ + public void setDefaultRowFetchSize(int fetchSize) { + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize); + } + + /** + * @return default fetch size + * @see PGProperty#DEFAULT_ROW_FETCH_SIZE + */ + public int getDefaultRowFetchSize() { + return PGProperty.DEFAULT_ROW_FETCH_SIZE.getIntNoCheck(properties); + } + + /** + * @param unknownLength unknown length + * @see PGProperty#UNKNOWN_LENGTH + */ + public void setUnknownLength(int unknownLength) { + PGProperty.UNKNOWN_LENGTH.set(properties, unknownLength); + } + + /** + * @return unknown length + * @see PGProperty#UNKNOWN_LENGTH + */ + public int getUnknownLength() { + return PGProperty.UNKNOWN_LENGTH.getIntNoCheck(properties); + } + + /** + * @param seconds socket timeout + * @see PGProperty#SOCKET_TIMEOUT + */ + public void setSocketTimeout(int seconds) { + PGProperty.SOCKET_TIMEOUT.set(properties, seconds); + } + + /** + * @return socket timeout + * @see PGProperty#SOCKET_TIMEOUT + */ + public int getSocketTimeout() { + return PGProperty.SOCKET_TIMEOUT.getIntNoCheck(properties); + } + + /** + * @param seconds timeout that is used for sending cancel command + * @see PGProperty#CANCEL_SIGNAL_TIMEOUT + */ + public void setCancelSignalTimeout(int seconds) { + PGProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds); + } + + /** + * @return timeout that is used for sending cancel command in seconds + * @see PGProperty#CANCEL_SIGNAL_TIMEOUT + */ + public int getCancelSignalTimeout() { + return PGProperty.CANCEL_SIGNAL_TIMEOUT.getIntNoCheck(properties); + } + + /** + * @param enabled if SSL is enabled + * @see PGProperty#SSL + */ + public void setSsl(boolean enabled) { + if (enabled) { + PGProperty.SSL.set(properties, true); + } else { + PGProperty.SSL.set(properties, false); + } + } + + /** + * @return true if SSL is enabled + * @see PGProperty#SSL + */ + public boolean getSsl() { + // "true" if "ssl" is set but empty + return PGProperty.SSL.getBoolean(properties) || "".equals(PGProperty.SSL.getOrDefault(properties)); + } + + /** + * @param classname SSL factory class name + * @see PGProperty#SSL_FACTORY + */ + public void setSslfactory(String classname) { + PGProperty.SSL_FACTORY.set(properties, classname); + } + + /** + * @return SSL factory class name + * @see PGProperty#SSL_FACTORY + */ + public String getSslfactory() { + return PGProperty.SSL_FACTORY.getOrDefault(properties); + } + + /** + * @return SSL mode + * @see PGProperty#SSL_MODE + */ + public String getSslMode() { + return PGProperty.SSL_MODE.getOrDefault(properties); + } + + /** + * @param mode SSL mode + * @see PGProperty#SSL_MODE + */ + public void setSslMode(String mode) { + PGProperty.SSL_MODE.set(properties, mode); + } + + /** + * @return SSL mode + * @see PGProperty#SSL_FACTORY_ARG + */ + @SuppressWarnings("deprecation") + public String getSslFactoryArg() { + return PGProperty.SSL_FACTORY_ARG.getOrDefault(properties); + } + + /** + * @param arg argument forwarded to SSL factory + * @see PGProperty#SSL_FACTORY_ARG + */ + @SuppressWarnings("deprecation") + public void setSslFactoryArg(String arg) { + PGProperty.SSL_FACTORY_ARG.set(properties, arg); + } + + /** + * @return argument forwarded to SSL factory + * @see PGProperty#SSL_HOSTNAME_VERIFIER + */ + public String getSslHostnameVerifier() { + return PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(properties); + } + + /** + * @param className SSL hostname verifier + * @see PGProperty#SSL_HOSTNAME_VERIFIER + */ + public void setSslHostnameVerifier(String className) { + PGProperty.SSL_HOSTNAME_VERIFIER.set(properties, className); + } + + /** + * @return className SSL hostname verifier + * @see PGProperty#SSL_CERT + */ + public String getSslCert() { + return PGProperty.SSL_CERT.getOrDefault(properties); + } + + /** + * @param file SSL certificate + * @see PGProperty#SSL_CERT + */ + public void setSslCert(String file) { + PGProperty.SSL_CERT.set(properties, file); + } + + /** + * @return SSL certificate + * @see PGProperty#SSL_KEY + */ + public String getSslKey() { + return PGProperty.SSL_KEY.getOrDefault(properties); + } + + /** + * @param file SSL key + * @see PGProperty#SSL_KEY + */ + public void setSslKey(String file) { + PGProperty.SSL_KEY.set(properties, file); + } + + /** + * @return SSL root certificate + * @see PGProperty#SSL_ROOT_CERT + */ + public String getSslRootCert() { + return PGProperty.SSL_ROOT_CERT.getOrDefault(properties); + } + + /** + * @param file SSL root certificate + * @see PGProperty#SSL_ROOT_CERT + */ + public void setSslRootCert(String file) { + PGProperty.SSL_ROOT_CERT.set(properties, file); + } + + /** + * @return SSL password + * @see PGProperty#SSL_PASSWORD + */ + public String getSslPassword() { + return PGProperty.SSL_PASSWORD.getOrDefault(properties); + } + + /** + * @param password SSL password + * @see PGProperty#SSL_PASSWORD + */ + public void setSslPassword(String password) { + PGProperty.SSL_PASSWORD.set(properties, password); + } + + /** + * @return SSL password callback + * @see PGProperty#SSL_PASSWORD_CALLBACK + */ + public String getSslPasswordCallback() { + return PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(properties); + } + + /** + * @param className SSL password callback class name + * @see PGProperty#SSL_PASSWORD_CALLBACK + */ + public void setSslPasswordCallback(String className) { + PGProperty.SSL_PASSWORD_CALLBACK.set(properties, className); + } + + /** + * @param applicationName application name + * @see PGProperty#APPLICATION_NAME + */ + public void setApplicationName(String applicationName) { + PGProperty.APPLICATION_NAME.set(properties, applicationName); + } + + /** + * @return application name + * @see PGProperty#APPLICATION_NAME + */ + public String getApplicationName() { + return PGProperty.APPLICATION_NAME.getOrDefault(properties); + } + + /** + * @param targetServerType target server type + * @see PGProperty#TARGET_SERVER_TYPE + */ + public void setTargetServerType(String targetServerType) { + PGProperty.TARGET_SERVER_TYPE.set(properties, targetServerType); + } + + /** + * @return target server type + * @see PGProperty#TARGET_SERVER_TYPE + */ + public String getTargetServerType() { + return PGProperty.TARGET_SERVER_TYPE.getOrDefault(properties); + } + + /** + * @param loadBalanceHosts load balance hosts + * @see PGProperty#LOAD_BALANCE_HOSTS + */ + public void setLoadBalanceHosts(boolean loadBalanceHosts) { + PGProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts); + } + + /** + * @return load balance hosts + * @see PGProperty#LOAD_BALANCE_HOSTS + */ + public boolean getLoadBalanceHosts() { + return PGProperty.LOAD_BALANCE_HOSTS.isPresent(properties); + } + + /** + * @param hostRecheckSeconds host recheck seconds + * @see PGProperty#HOST_RECHECK_SECONDS + */ + public void setHostRecheckSeconds(int hostRecheckSeconds) { + PGProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds); + } + + /** + * @return host recheck seconds + * @see PGProperty#HOST_RECHECK_SECONDS + */ + public int getHostRecheckSeconds() { + return PGProperty.HOST_RECHECK_SECONDS.getIntNoCheck(properties); + } + + /** + * @param enabled if TCP keep alive should be enabled + * @see PGProperty#TCP_KEEP_ALIVE + */ + public void setTcpKeepAlive(boolean enabled) { + PGProperty.TCP_KEEP_ALIVE.set(properties, enabled); + } + + /** + * @return true if TCP keep alive is enabled + * @see PGProperty#TCP_KEEP_ALIVE + */ + public boolean getTcpKeepAlive() { + return PGProperty.TCP_KEEP_ALIVE.getBoolean(properties); + } + + /** + * @param enabled if TCP no delay should be enabled + * @see PGProperty#TCP_NO_DELAY + */ + public void setTcpNoDelay(boolean enabled) { + PGProperty.TCP_NO_DELAY.set(properties, enabled); + } + + /** + * @return true if TCP no delay is enabled + * @see PGProperty#TCP_NO_DELAY + */ + public boolean getTcpNoDelay() { + return PGProperty.TCP_NO_DELAY.getBoolean(properties); + } + + /** + * @param enabled if binary transfer should be enabled + * @see PGProperty#BINARY_TRANSFER + */ + public void setBinaryTransfer(boolean enabled) { + PGProperty.BINARY_TRANSFER.set(properties, enabled); + } + + /** + * @return true if binary transfer is enabled + * @see PGProperty#BINARY_TRANSFER + */ + public boolean getBinaryTransfer() { + return PGProperty.BINARY_TRANSFER.getBoolean(properties); + } + + /** + * @param oidList list of OIDs that are allowed to use binary transfer + * @see PGProperty#BINARY_TRANSFER_ENABLE + */ + public void setBinaryTransferEnable(String oidList) { + PGProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList); + } + + /** + * @return list of OIDs that are allowed to use binary transfer + * @see PGProperty#BINARY_TRANSFER_ENABLE + */ + public String getBinaryTransferEnable() { + return PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(properties); + } + + /** + * @param oidList list of OIDs that are not allowed to use binary transfer + * @see PGProperty#BINARY_TRANSFER_DISABLE + */ + public void setBinaryTransferDisable(String oidList) { + PGProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList); + } + + /** + * @return list of OIDs that are not allowed to use binary transfer + * @see PGProperty#BINARY_TRANSFER_DISABLE + */ + public String getBinaryTransferDisable() { + return PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(properties); + } + + /** + * @return string type + * @see PGProperty#STRING_TYPE + */ + public String getStringType() { + return PGProperty.STRING_TYPE.getOrDefault(properties); + } + + /** + * @param stringType string type + * @see PGProperty#STRING_TYPE + */ + public void setStringType(String stringType) { + PGProperty.STRING_TYPE.set(properties, stringType); + } + + /** + * @return true if column sanitizer is disabled + * @see PGProperty#DISABLE_COLUMN_SANITISER + */ + public boolean isColumnSanitiserDisabled() { + return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties); + } + + /** + * @return true if column sanitizer is disabled + * @see PGProperty#DISABLE_COLUMN_SANITISER + */ + public boolean getDisableColumnSanitiser() { + return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties); + } + + /** + * @param disableColumnSanitiser if column sanitizer should be disabled + * @see PGProperty#DISABLE_COLUMN_SANITISER + */ + public void setDisableColumnSanitiser(boolean disableColumnSanitiser) { + PGProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser); + } + + /** + * @return current schema + * @see PGProperty#CURRENT_SCHEMA + */ + public String getCurrentSchema() { + return PGProperty.CURRENT_SCHEMA.getOrDefault(properties); + } + + /** + * @param currentSchema current schema + * @see PGProperty#CURRENT_SCHEMA + */ + public void setCurrentSchema(String currentSchema) { + PGProperty.CURRENT_SCHEMA.set(properties, currentSchema); + } + + /** + * @return true if connection is readonly + * @see PGProperty#READ_ONLY + */ + public boolean getReadOnly() { + return PGProperty.READ_ONLY.getBoolean(properties); + } + + /** + * @param readOnly if connection should be readonly + * @see PGProperty#READ_ONLY + */ + public void setReadOnly(boolean readOnly) { + PGProperty.READ_ONLY.set(properties, readOnly); + } + + /** + * @return The behavior when set read only + * @see PGProperty#READ_ONLY_MODE + */ + public String getReadOnlyMode() { + return PGProperty.READ_ONLY_MODE.getOrDefault(properties); + } + + /** + * @param mode the behavior when set read only + * @see PGProperty#READ_ONLY_MODE + */ + public void setReadOnlyMode(String mode) { + PGProperty.READ_ONLY_MODE.set(properties, mode); + } + + /** + * @return true if driver should log unclosed connections + * @see PGProperty#LOG_UNCLOSED_CONNECTIONS + */ + public boolean getLogUnclosedConnections() { + return PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(properties); + } + + /** + * @param enabled true if driver should log unclosed connections + * @see PGProperty#LOG_UNCLOSED_CONNECTIONS + */ + public void setLogUnclosedConnections(boolean enabled) { + PGProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled); + } + + /** + * @return true if driver should log include detail in server error messages + * @see PGProperty#LOG_SERVER_ERROR_DETAIL + */ + public boolean getLogServerErrorDetail() { + return PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(properties); + } + + /** + * @param enabled true if driver should include detail in server error messages + * @see PGProperty#LOG_SERVER_ERROR_DETAIL + */ + public void setLogServerErrorDetail(boolean enabled) { + PGProperty.LOG_SERVER_ERROR_DETAIL.set(properties, enabled); + } + + /** + * @return assumed minimal server version + * @see PGProperty#ASSUME_MIN_SERVER_VERSION + */ + public String getAssumeMinServerVersion() { + return PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(properties); + } + + /** + * @param minVersion assumed minimal server version + * @see PGProperty#ASSUME_MIN_SERVER_VERSION + */ + public void setAssumeMinServerVersion(String minVersion) { + PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, minVersion); + } + + /** + * This is important in pool-by-transaction scenarios in order to make sure that all the statements + * reaches the same connection that is being initialized. If set then we will group the startup + * parameters in a transaction + * @return whether to group startup parameters or not + * @see PGProperty#GROUP_STARTUP_PARAMETERS + */ + public boolean getGroupStartupParameters() { + return PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(properties); + } + + /** + * + * @param groupStartupParameters whether to group startup Parameters in a transaction or not + * @see PGProperty#GROUP_STARTUP_PARAMETERS + */ + public void setGroupStartupParameters(boolean groupStartupParameters) { + PGProperty.GROUP_STARTUP_PARAMETERS.set(properties, groupStartupParameters); + } + + /** + * @return JAAS application name + * @see PGProperty#JAAS_APPLICATION_NAME + */ + public String getJaasApplicationName() { + return PGProperty.JAAS_APPLICATION_NAME.getOrDefault(properties); + } + + /** + * @param name JAAS application name + * @see PGProperty#JAAS_APPLICATION_NAME + */ + public void setJaasApplicationName(String name) { + PGProperty.JAAS_APPLICATION_NAME.set(properties, name); + } + + /** + * @return true if perform JAAS login before GSS authentication + * @see PGProperty#JAAS_LOGIN + */ + public boolean getJaasLogin() { + return PGProperty.JAAS_LOGIN.getBoolean(properties); + } + + /** + * @param doLogin true if perform JAAS login before GSS authentication + * @see PGProperty#JAAS_LOGIN + */ + public void setJaasLogin(boolean doLogin) { + PGProperty.JAAS_LOGIN.set(properties, doLogin); + } + + /** + * @return Kerberos server name + * @see PGProperty#KERBEROS_SERVER_NAME + */ + public String getKerberosServerName() { + return PGProperty.KERBEROS_SERVER_NAME.getOrDefault(properties); + } + + /** + * @param serverName Kerberos server name + * @see PGProperty#KERBEROS_SERVER_NAME + */ + public void setKerberosServerName(String serverName) { + PGProperty.KERBEROS_SERVER_NAME.set(properties, serverName); + } + + /** + * @return true if use SPNEGO + * @see PGProperty#USE_SPNEGO + */ + public boolean getUseSpNego() { + return PGProperty.USE_SPNEGO.getBoolean(properties); + } + + /** + * @param use true if use SPNEGO + * @see PGProperty#USE_SPNEGO + */ + public void setUseSpNego(boolean use) { + PGProperty.USE_SPNEGO.set(properties, use); + } + + /** + * @return GSS mode: auto, sspi, or gssapi + * @see PGProperty#GSS_LIB + */ + public String getGssLib() { + return PGProperty.GSS_LIB.getOrDefault(properties); + } + + /** + * @param lib GSS mode: auto, sspi, or gssapi + * @see PGProperty#GSS_LIB + */ + public void setGssLib(String lib) { + PGProperty.GSS_LIB.set(properties, lib); + } + + /** + * + * @return GSS encryption mode: disable, prefer or require + */ + public String getGssEncMode() { + return PGProperty.GSS_ENC_MODE.getOrDefault(properties); + } + + /** + * + * @param mode encryption mode: disable, prefer or require + */ + public void setGssEncMode(String mode) { + PGProperty.GSS_ENC_MODE.set(properties, mode); + } + + /** + * @return SSPI service class + * @see PGProperty#SSPI_SERVICE_CLASS + */ + public String getSspiServiceClass() { + return PGProperty.SSPI_SERVICE_CLASS.getOrDefault(properties); + } + + /** + * @param serviceClass SSPI service class + * @see PGProperty#SSPI_SERVICE_CLASS + */ + public void setSspiServiceClass(String serviceClass) { + PGProperty.SSPI_SERVICE_CLASS.set(properties, serviceClass); + } + + /** + * @return if connection allows encoding changes + * @see PGProperty#ALLOW_ENCODING_CHANGES + */ + public boolean getAllowEncodingChanges() { + return PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(properties); + } + + /** + * @param allow if connection allows encoding changes + * @see PGProperty#ALLOW_ENCODING_CHANGES + */ + public void setAllowEncodingChanges(boolean allow) { + PGProperty.ALLOW_ENCODING_CHANGES.set(properties, allow); + } + + /** + * @return socket factory class name + * @see PGProperty#SOCKET_FACTORY + */ + public String getSocketFactory() { + return PGProperty.SOCKET_FACTORY.getOrDefault(properties); + } + + /** + * @param socketFactoryClassName socket factory class name + * @see PGProperty#SOCKET_FACTORY + */ + public void setSocketFactory(String socketFactoryClassName) { + PGProperty.SOCKET_FACTORY.set(properties, socketFactoryClassName); + } + + /** + * @return socket factory argument + * @see PGProperty#SOCKET_FACTORY_ARG + */ + @SuppressWarnings("deprecation") + public String getSocketFactoryArg() { + return PGProperty.SOCKET_FACTORY_ARG.getOrDefault(properties); + } + + /** + * @param socketFactoryArg socket factory argument + * @see PGProperty#SOCKET_FACTORY_ARG + */ + @SuppressWarnings("deprecation") + public void setSocketFactoryArg(String socketFactoryArg) { + PGProperty.SOCKET_FACTORY_ARG.set(properties, socketFactoryArg); + } + + /** + * @param replication set to 'database' for logical replication or 'true' for physical replication + * @see PGProperty#REPLICATION + */ + public void setReplication(String replication) { + PGProperty.REPLICATION.set(properties, replication); + } + + /** + * @return 'select', "callIfNoReturn', or 'call' + * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE + */ + public String getEscapeSyntaxCallMode() { + return PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(properties); + } + + /** + * @param callMode the call mode to use for JDBC escape call syntax + * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE + */ + public void setEscapeSyntaxCallMode(String callMode) { + PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(properties, callMode); + } + + /** + * @return null, 'database', or 'true + * @see PGProperty#REPLICATION + */ + public String getReplication() { + return PGProperty.REPLICATION.getOrDefault(properties); + } + + /** + * @return the localSocketAddress + * @see PGProperty#LOCAL_SOCKET_ADDRESS + */ + public String getLocalSocketAddress() { + return PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(properties); + } + + /** + * @param localSocketAddress local address to bind client side to + * @see PGProperty#LOCAL_SOCKET_ADDRESS + */ + public void setLocalSocketAddress(String localSocketAddress) { + PGProperty.LOCAL_SOCKET_ADDRESS.set(properties, localSocketAddress); + } + + /** + * This property is no longer used by the driver and will be ignored. + * @return loggerLevel in properties + * @deprecated Configure via java.util.logging + */ + @Deprecated + public String getLoggerLevel() { + return PGProperty.LOGGER_LEVEL.getOrDefault(properties); + } + + /** + * This property is no longer used by the driver and will be ignored. + * @param loggerLevel loggerLevel to set, will be ignored + * @deprecated Configure via java.util.logging + */ + @Deprecated + public void setLoggerLevel(String loggerLevel) { + PGProperty.LOGGER_LEVEL.set(properties, loggerLevel); + } + + /** + * This property is no longer used by the driver and will be ignored. + * @return loggerFile in properties + * @deprecated Configure via java.util.logging + */ + @Deprecated + public String getLoggerFile() { + ExpressionProperties exprProps = new ExpressionProperties(properties, System.getProperties()); + return PGProperty.LOGGER_FILE.getOrDefault(exprProps); + } + + /** + * This property is no longer used by the driver and will be ignored. + * @param loggerFile will be ignored + * @deprecated Configure via java.util.logging + */ + @Deprecated + public void setLoggerFile(String loggerFile) { + PGProperty.LOGGER_FILE.set(properties, loggerFile); + } + + /** + * Generates a {@link DriverManager} URL from the other properties supplied. + * + * @return {@link DriverManager} URL from the other properties supplied + */ + public String getUrl() { + StringBuilder url = new StringBuilder(100); + url.append("jdbc:postgresql://"); + for (int i = 0; i < serverNames.length; i++) { + if (i > 0) { + url.append(","); + } + url.append(serverNames[i]); + if (portNumbers != null) { + if (serverNames.length != portNumbers.length) { + throw new IllegalArgumentException( + String.format("Invalid argument: number of port %s entries must equal number of serverNames %s", + Arrays.toString(portNumbers), Arrays.toString(serverNames))); + } + if (portNumbers.length >= i && portNumbers[i] != 0) { + url.append(":").append(portNumbers[i]); + } + + } + } + url.append("/"); + if (databaseName != null) { + url.append(URLCoder.encode(databaseName)); + } + + StringBuilder query = new StringBuilder(100); + for (PGProperty property : PGProperty.values()) { + if (property.isPresent(properties)) { + if (query.length() != 0) { + query.append("&"); + } + query.append(property.getName()); + query.append("="); + String value = property.getOrDefault(properties); + query.append(URLCoder.encode(value)); + } + } + + if (query.length() > 0) { + url.append("?"); + url.append(query); + } + + return url.toString(); + } + + /** + * Generates a {@link DriverManager} URL from the other properties supplied. + * + * @return {@link DriverManager} URL from the other properties supplied + */ + public String getURL() { + return getUrl(); + } + + /** + * Sets properties from a {@link DriverManager} URL. + * + * @param url properties to set + */ + public void setUrl(String url) { + + Properties p = Driver.parseURL(url, null); + + if (p == null) { + throw new IllegalArgumentException("URL invalid " + url); + } + for (PGProperty property : PGProperty.values()) { + if (!this.properties.containsKey(property.getName())) { + setProperty(property, property.getOrDefault(p)); + } + } + } + + /** + * Sets properties from a {@link DriverManager} URL. + * Added to follow convention used in other DBMS. + * + * @param url properties to set + */ + public void setURL(String url) { + setUrl(url); + } + + /** + * + * @return the class name to use for the Authentication Plugin. + * This can be null in which case the default password authentication plugin will be used + */ + public String getAuthenticationPluginClassName() { + return PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(properties); + } + + /** + * + * @param className name of a class which implements {@link org.postgresql.plugin.AuthenticationPlugin} + * This class will be used to get the encoded bytes to be sent to the server as the + * password to authenticate the user. + * + */ + public void setAuthenticationPluginClassName(String className) { + PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.set(properties, className); + } + + public String getProperty(String name) throws SQLException { + PGProperty pgProperty = PGProperty.forName(name); + if (pgProperty != null) { + return getProperty(pgProperty); + } else { + throw new PSQLException(GT.tr("Unsupported property name: {0}", name), + PSQLState.INVALID_PARAMETER_VALUE); + } + } + + public void setProperty(String name, String value) throws SQLException { + PGProperty pgProperty = PGProperty.forName(name); + if (pgProperty != null) { + setProperty(pgProperty, value); + } else { + throw new PSQLException(GT.tr("Unsupported property name: {0}", name), + PSQLState.INVALID_PARAMETER_VALUE); + } + } + + public String getProperty(PGProperty property) { + return property.getOrDefault(properties); + } + + public void setProperty(PGProperty property, String value) { + if (value == null) { + // TODO: this is not consistent with PGProperty.PROPERTY.set(prop, null) + // PGProperty removes an entry for put(null) call, however here we just ignore null + return; + } + switch (property) { + case PG_HOST: + setServerNames(value.split(",")); + break; + case PG_PORT: + String[] ps = value.split(","); + int[] ports = new int[ps.length]; + for (int i = 0; i < ps.length; i++) { + try { + ports[i] = Integer.parseInt(ps[i]); + } catch (NumberFormatException e) { + ports[i] = 0; + } + } + setPortNumbers(ports); + break; + case PG_DBNAME: + setDatabaseName(value); + break; + case USER: + setUser(value); + break; + case PASSWORD: + setPassword(value); + break; + default: + properties.setProperty(property.getName(), value); + } + } + + /** + * Generates a reference using the appropriate object factory. + * + * @return reference using the appropriate object factory + */ + protected Reference createReference() { + return new Reference(getClass().getName(), PGObjectFactory.class.getName(), null); + } + + @Override + public Reference getReference() throws NamingException { + Reference ref = createReference(); + StringBuilder serverString = new StringBuilder(); + for (int i = 0; i < serverNames.length; i++) { + if (i > 0) { + serverString.append(","); + } + String serverName = serverNames[i]; + serverString.append(serverName); + } + ref.add(new StringRefAddr("serverName", serverString.toString())); + + StringBuilder portString = new StringBuilder(); + for (int i = 0; i < portNumbers.length; i++) { + if (i > 0) { + portString.append(","); + } + int p = portNumbers[i]; + portString.append(Integer.toString(p)); + } + ref.add(new StringRefAddr("portNumber", portString.toString())); + ref.add(new StringRefAddr("databaseName", databaseName)); + if (user != null) { + ref.add(new StringRefAddr("user", user)); + } + if (password != null) { + ref.add(new StringRefAddr("password", password)); + } + + for (PGProperty property : PGProperty.values()) { + if (property.isPresent(properties)) { + String value = property.getOrDefault(properties); + ref.add(new StringRefAddr(property.getName(), value)); + } + } + + return ref; + } + + public void setFromReference(Reference ref) { + databaseName = getReferenceProperty(ref, "databaseName"); + String portNumberString = getReferenceProperty(ref, "portNumber"); + if (portNumberString != null) { + String[] ps = portNumberString.split(","); + int[] ports = new int[ps.length]; + for (int i = 0; i < ps.length; i++) { + try { + ports[i] = Integer.parseInt(ps[i]); + } catch (NumberFormatException e) { + ports[i] = 0; + } + } + setPortNumbers(ports); + } else { + setPortNumbers(null); + } + String serverName = getReferenceProperty(ref, "serverName"); + setServerNames(serverName.split(",")); + + for (PGProperty property : PGProperty.values()) { + setProperty(property, getReferenceProperty(ref, property.getName())); + } + } + + private static String getReferenceProperty(Reference ref, String propertyName) { + RefAddr addr = ref.get(propertyName); + if (addr == null) { + return null; + } + return (String) addr.getContent(); + } + + protected void writeBaseObject(ObjectOutputStream out) throws IOException { + out.writeObject(serverNames); + out.writeObject(databaseName); + out.writeObject(user); + out.writeObject(password); + out.writeObject(portNumbers); + + out.writeObject(properties); + } + + protected void readBaseObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + serverNames = (String[]) in.readObject(); + databaseName = (String) in.readObject(); + user = (String) in.readObject(); + password = (String) in.readObject(); + portNumbers = (int[]) in.readObject(); + + properties = (Properties) in.readObject(); + } + + public void initializeFrom(BaseDataSource source) throws IOException, ClassNotFoundException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + source.writeBaseObject(oos); + oos.close(); + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + ObjectInputStream ois = new ObjectInputStream(bais); + readBaseObject(ois); + } + + /** + * @return preferred query execution mode + * @see PGProperty#PREFER_QUERY_MODE + */ + public PreferQueryMode getPreferQueryMode() { + return PreferQueryMode.of(PGProperty.PREFER_QUERY_MODE.getOrDefault(properties)); + } + + /** + * @param preferQueryMode extended, simple, extendedForPrepared, or extendedCacheEverything + * @see PGProperty#PREFER_QUERY_MODE + */ + public void setPreferQueryMode(PreferQueryMode preferQueryMode) { + PGProperty.PREFER_QUERY_MODE.set(properties, preferQueryMode.value()); + } + + /** + * @return connection configuration regarding automatic per-query savepoints + * @see PGProperty#AUTOSAVE + */ + public AutoSave getAutosave() { + return AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(properties)); + } + + /** + * @param autoSave connection configuration regarding automatic per-query savepoints + * @see PGProperty#AUTOSAVE + */ + public void setAutosave(AutoSave autoSave) { + PGProperty.AUTOSAVE.set(properties, autoSave.value()); + } + + /** + * see PGProperty#CLEANUP_SAVEPOINTS + * + * @return boolean indicating property set + */ + public boolean getCleanupSavepoints() { + return PGProperty.CLEANUP_SAVEPOINTS.getBoolean(properties); + } + + /** + * see PGProperty#CLEANUP_SAVEPOINTS + * + * @param cleanupSavepoints will cleanup savepoints after a successful transaction + */ + public void setCleanupSavepoints(boolean cleanupSavepoints) { + PGProperty.CLEANUP_SAVEPOINTS.set(properties, cleanupSavepoints); + } + + /** + * @return boolean indicating property is enabled or not. + * @see PGProperty#REWRITE_BATCHED_INSERTS + */ + public boolean getReWriteBatchedInserts() { + return PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(properties); + } + + /** + * @param reWrite boolean value to set the property in the properties collection + * @see PGProperty#REWRITE_BATCHED_INSERTS + */ + public void setReWriteBatchedInserts(boolean reWrite) { + PGProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite); + } + + /** + * @return boolean indicating property is enabled or not. + * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS + */ + public boolean getHideUnprivilegedObjects() { + return PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(properties); + } + + /** + * @param hideUnprivileged boolean value to set the property in the properties collection + * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS + */ + public void setHideUnprivilegedObjects(boolean hideUnprivileged) { + PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(properties, hideUnprivileged); + } + + public String getMaxResultBuffer() { + return PGProperty.MAX_RESULT_BUFFER.getOrDefault(properties); + } + + public void setMaxResultBuffer(String maxResultBuffer) { + PGProperty.MAX_RESULT_BUFFER.set(properties, maxResultBuffer); + } + + public boolean getAdaptiveFetch() { + return PGProperty.ADAPTIVE_FETCH.getBoolean(properties); + } + + public void setAdaptiveFetch(boolean adaptiveFetch) { + PGProperty.ADAPTIVE_FETCH.set(properties, adaptiveFetch); + } + + public int getAdaptiveFetchMaximum() { + return PGProperty.ADAPTIVE_FETCH_MAXIMUM.getIntNoCheck(properties); + } + + public void setAdaptiveFetchMaximum(int adaptiveFetchMaximum) { + PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, adaptiveFetchMaximum); + } + + public int getAdaptiveFetchMinimum() { + return PGProperty.ADAPTIVE_FETCH_MINIMUM.getIntNoCheck(properties); + } + + public void setAdaptiveFetchMinimum(int adaptiveFetchMinimum) { + PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, adaptiveFetchMinimum); + } + + @Override + public Logger getParentLogger() { + return Logger.getLogger("org.postgresql"); + } + + public String getXmlFactoryFactory() { + return PGProperty.XML_FACTORY_FACTORY.getOrDefault(properties); + } + + public void setXmlFactoryFactory(String xmlFactoryFactory) { + PGProperty.XML_FACTORY_FACTORY.set(properties, xmlFactoryFactory); + } + + /* + * Alias methods below, these are to help with ease-of-use with other database tools / frameworks + * which expect normal java bean getters / setters to exist for the property names. + */ + + public boolean isSsl() { + return getSsl(); + } + + public String getSslfactoryarg() { + return getSslFactoryArg(); + } + + public void setSslfactoryarg(final String arg) { + setSslFactoryArg(arg); + } + + public String getSslcert() { + return getSslCert(); + } + + public void setSslcert(final String file) { + setSslCert(file); + } + + public String getSslmode() { + return getSslMode(); + } + + public void setSslmode(final String mode) { + setSslMode(mode); + } + + public String getSslhostnameverifier() { + return getSslHostnameVerifier(); + } + + public void setSslhostnameverifier(final String className) { + setSslHostnameVerifier(className); + } + + public String getSslkey() { + return getSslKey(); + } + + public void setSslkey(final String file) { + setSslKey(file); + } + + public String getSslrootcert() { + return getSslRootCert(); + } + + public void setSslrootcert(final String file) { + setSslRootCert(file); + } + + public String getSslpasswordcallback() { + return getSslPasswordCallback(); + } + + public void setSslpasswordcallback(final String className) { + setSslPasswordCallback(className); + } + + public String getSslpassword() { + return getSslPassword(); + } + + public void setSslpassword(final String sslpassword) { + setSslPassword(sslpassword); + } + + public int getRecvBufferSize() { + return getReceiveBufferSize(); + } + + public void setRecvBufferSize(final int nbytes) { + setReceiveBufferSize(nbytes); + } + + public boolean isAllowEncodingChanges() { + return getAllowEncodingChanges(); + } + + public boolean isLogUnclosedConnections() { + return getLogUnclosedConnections(); + } + + public boolean isTcpKeepAlive() { + return getTcpKeepAlive(); + } + + public boolean isReadOnly() { + return getReadOnly(); + } + + public boolean isDisableColumnSanitiser() { + return getDisableColumnSanitiser(); + } + + public boolean isLoadBalanceHosts() { + return getLoadBalanceHosts(); + } + + public boolean isCleanupSavePoints() { + return getCleanupSavepoints(); + } + + public void setCleanupSavePoints(final boolean cleanupSavepoints) { + setCleanupSavepoints(cleanupSavepoints); + } + + public boolean isReWriteBatchedInserts() { + return getReWriteBatchedInserts(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java b/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java new file mode 100644 index 0000000..d02613a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ds.common; + +import org.postgresql.ds.PGConnectionPoolDataSource; +import org.postgresql.ds.PGPoolingDataSource; +import org.postgresql.ds.PGSimpleDataSource; + +import java.util.Hashtable; + +import javax.naming.Context; +import javax.naming.Name; +import javax.naming.RefAddr; +import javax.naming.Reference; +import javax.naming.spi.ObjectFactory; + +/** + * Returns a DataSource-ish thing based on a JNDI reference. In the case of a SimpleDataSource or + * ConnectionPool, a new instance is created each time, as there is no connection state to maintain. + * In the case of a PoolingDataSource, the same DataSource will be returned for every invocation + * within the same VM/ClassLoader, so that the state of the connections in the pool will be + * consistent. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public class PGObjectFactory implements ObjectFactory { + /** + * Dereferences a PostgreSQL DataSource. Other types of references are ignored. + */ + @Override + public Object getObjectInstance(Object obj, Name name, Context nameCtx, + Hashtable environment) throws Exception { + Reference ref = (Reference) obj; + String className = ref.getClassName(); + // Old names are here for those who still use them + if ("org.postgresql.ds.PGSimpleDataSource".equals(className) + || "org.postgresql.jdbc2.optional.SimpleDataSource".equals(className) + || "org.postgresql.jdbc3.Jdbc3SimpleDataSource".equals(className)) { + return loadSimpleDataSource(ref); + } else if ("org.postgresql.ds.PGConnectionPoolDataSource".equals(className) + || "org.postgresql.jdbc2.optional.ConnectionPool".equals(className) + || "org.postgresql.jdbc3.Jdbc3ConnectionPool".equals(className)) { + return loadConnectionPool(ref); + } else if ("org.postgresql.ds.PGPoolingDataSource".equals(className) + || "org.postgresql.jdbc2.optional.PoolingDataSource".equals(className) + || "org.postgresql.jdbc3.Jdbc3PoolingDataSource".equals(className)) { + return loadPoolingDataSource(ref); + } else { + return null; + } + } + + @SuppressWarnings("deprecation") + private Object loadPoolingDataSource(Reference ref) { + // If DataSource exists, return it + String name = getProperty(ref, "dataSourceName"); + PGPoolingDataSource pds = PGPoolingDataSource.getDataSource(name); + if (pds != null) { + return pds; + } + // Otherwise, create a new one + pds = new PGPoolingDataSource(); + pds.setDataSourceName(name); + loadBaseDataSource(pds, ref); + String min = getProperty(ref, "initialConnections"); + if (min != null) { + pds.setInitialConnections(Integer.parseInt(min)); + } + String max = getProperty(ref, "maxConnections"); + if (max != null) { + pds.setMaxConnections(Integer.parseInt(max)); + } + return pds; + } + + private Object loadSimpleDataSource(Reference ref) { + PGSimpleDataSource ds = new PGSimpleDataSource(); + return loadBaseDataSource(ds, ref); + } + + private Object loadConnectionPool(Reference ref) { + PGConnectionPoolDataSource cp = new PGConnectionPoolDataSource(); + return loadBaseDataSource(cp, ref); + } + + protected Object loadBaseDataSource(BaseDataSource ds, Reference ref) { + ds.setFromReference(ref); + + return ds; + } + + protected String getProperty(Reference ref, String s) { + RefAddr addr = ref.get(s); + if (addr == null) { + return null; + } + return (String) addr.getContent(); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java b/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java new file mode 100644 index 0000000..92a8028 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.fastpath; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ParameterList; +import org.postgresql.core.QueryExecutor; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Level; + +/** + *

This class implements the Fastpath api.

+ * + *

This is a means of executing functions embedded in the backend from within a java application.

+ * + *

It is based around the file src/interfaces/libpq/fe-exec.c

+ * + * @deprecated This API is somewhat obsolete, as one may achieve similar performance + * and greater functionality by setting up a prepared statement to define + * the function call. Then, executing the statement with binary transmission of parameters + * and results substitutes for a fast-path function call. + */ +@Deprecated +public class Fastpath { + // Java passes oids around as longs, but in the backend + // it's an unsigned int, so we use this to make the conversion + // of long -> signed int which the backend interprets as unsigned. + private static final long NUM_OIDS = 4294967296L; // 2^32 + + // This maps the functions names to their id's (possible unique just + // to a connection). + private final Map func = new HashMap<>(); + private final QueryExecutor executor; + private final BaseConnection connection; + + /** + * Initialises the fastpath system. + * + * @param conn BaseConnection to attach to + */ + public Fastpath(BaseConnection conn) { + this.connection = conn; + this.executor = conn.getQueryExecutor(); + } + + /** + * Send a function call to the PostgreSQL backend. + * + * @param fnId Function id + * @param resultType True if the result is a numeric (Integer or Long) + * @param args FastpathArguments to pass to fastpath + * @return null if no data, Integer if an integer result, Long if a long result, or byte[] + * otherwise + * @throws SQLException if a database-access error occurs. + * @deprecated please use {@link #fastpath(int, FastpathArg[])} + */ + @Deprecated + public Object fastpath(int fnId, boolean resultType, FastpathArg[] args) + throws SQLException { + // Run it. + byte[] returnValue = fastpath(fnId, args); + + // Interpret results. + if (!resultType || returnValue == null) { + return returnValue; + } + + if (returnValue.length == 4) { + return ByteConverter.int4(returnValue, 0); + } else if (returnValue.length == 8) { + return ByteConverter.int8(returnValue, 0); + } else { + throw new PSQLException( + GT.tr("Fastpath call {0} - No result was returned and we expected a numeric.", fnId), + PSQLState.NO_DATA); + } + } + + /** + * Send a function call to the PostgreSQL backend. + * + * @param fnId Function id + * @param args FastpathArguments to pass to fastpath + * @return null if no data, byte[] otherwise + * @throws SQLException if a database-access error occurs. + */ + public byte [] fastpath(int fnId, FastpathArg[] args) throws SQLException { + // Turn fastpath array into a parameter list. + ParameterList params = executor.createFastpathParameters(args.length); + for (int i = 0; i < args.length; i++) { + args[i].populateParameter(params, i + 1); + } + + // Run it. + return executor.fastpathCall(fnId, params, connection.getAutoCommit()); + } + + /** + * @param name Function name + * @param resulttype True if the result is a numeric (Integer or Long) + * @param args FastpathArguments to pass to fastpath + * @return null if no data, Integer if an integer result, Long if a long result, or byte[] + * otherwise + * @throws SQLException if something goes wrong + * @see #fastpath(int, FastpathArg[]) + * @see #fastpath(String, FastpathArg[]) + * @deprecated Use {@link #getData(String, FastpathArg[])} if you expect a binary result, or one + * of {@link #getInteger(String, FastpathArg[])} or + * {@link #getLong(String, FastpathArg[])} if you expect a numeric one + */ + @Deprecated + public Object fastpath(String name, boolean resulttype, FastpathArg[] args) + throws SQLException { + connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name); + return fastpath(getID(name), resulttype, args); + } + + /** + *

Send a function call to the PostgreSQL backend by name.

+ * + *

Note: the mapping for the procedure name to function id needs to exist, usually to an earlier + * call to addfunction().

+ * + *

This is the preferred method to call, as function id's can/may change between versions of the + * backend.

+ * + *

For an example of how this works, refer to org.postgresql.largeobject.LargeObject

+ * + * @param name Function name + * @param args FastpathArguments to pass to fastpath + * @return null if no data, byte[] otherwise + * @throws SQLException if name is unknown or if a database-access error occurs. + * @see org.postgresql.largeobject.LargeObject + */ + public byte [] fastpath(String name, FastpathArg[] args) throws SQLException { + connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name); + return fastpath(getID(name), args); + } + + /** + * This convenience method assumes that the return value is an integer. + * + * @param name Function name + * @param args Function arguments + * @return integer result + * @throws SQLException if a database-access error occurs or no result + */ + public int getInteger(String name, FastpathArg[] args) throws SQLException { + byte[] returnValue = fastpath(name, args); + if (returnValue == null) { + throw new PSQLException( + GT.tr("Fastpath call {0} - No result was returned and we expected an integer.", name), + PSQLState.NO_DATA); + } + + if (returnValue.length == 4) { + return ByteConverter.int4(returnValue, 0); + } else { + throw new PSQLException(GT.tr( + "Fastpath call {0} - No result was returned or wrong size while expecting an integer.", + name), PSQLState.NO_DATA); + } + } + + /** + * This convenience method assumes that the return value is a long (bigint). + * + * @param name Function name + * @param args Function arguments + * @return long result + * @throws SQLException if a database-access error occurs or no result + */ + public long getLong(String name, FastpathArg[] args) throws SQLException { + byte[] returnValue = fastpath(name, args); + if (returnValue == null) { + throw new PSQLException( + GT.tr("Fastpath call {0} - No result was returned and we expected a long.", name), + PSQLState.NO_DATA); + } + if (returnValue.length == 8) { + return ByteConverter.int8(returnValue, 0); + + } else { + throw new PSQLException( + GT.tr("Fastpath call {0} - No result was returned or wrong size while expecting a long.", + name), + PSQLState.NO_DATA); + } + } + + /** + * This convenience method assumes that the return value is an oid. + * + * @param name Function name + * @param args Function arguments + * @return oid of the given call + * @throws SQLException if a database-access error occurs or no result + */ + public long getOID(String name, FastpathArg[] args) throws SQLException { + long oid = getInteger(name, args); + if (oid < 0) { + oid += NUM_OIDS; + } + return oid; + } + + /** + * This convenience method assumes that the return value is not an Integer. + * + * @param name Function name + * @param args Function arguments + * @return byte[] array containing result + * @throws SQLException if a database-access error occurs or no result + */ + public byte [] getData(String name, FastpathArg[] args) throws SQLException { + return fastpath(name, args); + } + + /** + *

This adds a function to our lookup table.

+ * + *

User code should use the addFunctions method, which is based upon a query, rather than hard + * coding the oid. The oid for a function is not guaranteed to remain static, even on different + * servers of the same version.

+ * + * @param name Function name + * @param fnid Function id + */ + public void addFunction(String name, int fnid) { + func.put(name, fnid); + } + + /** + *

This takes a ResultSet containing two columns. Column 1 contains the function name, Column 2 + * the oid.

+ * + *

It reads the entire ResultSet, loading the values into the function table.

+ * + *

REMEMBER to close() the resultset after calling this!!

+ * + *

Implementation note about function name lookups:

+ * + *

PostgreSQL stores the function id's and their corresponding names in the pg_proc table. To + * speed things up locally, instead of querying each function from that table when required, a + * HashMap is used. Also, only the function's required are entered into this table, keeping + * connection times as fast as possible.

+ * + *

The org.postgresql.largeobject.LargeObject class performs a query upon it's startup, and passes + * the returned ResultSet to the addFunctions() method here.

+ * + *

Once this has been done, the LargeObject api refers to the functions by name.

+ * + *

Don't think that manually converting them to the oid's will work. Ok, they will for now, but + * they can change during development (there was some discussion about this for V7.0), so this is + * implemented to prevent any unwarranted headaches in the future.

+ * + * @param rs ResultSet + * @throws SQLException if a database-access error occurs. + * @see org.postgresql.largeobject.LargeObjectManager + */ + public void addFunctions(ResultSet rs) throws SQLException { + while (rs.next()) { + func.put(rs.getString(1), rs.getInt(2)); + } + } + + /** + *

This returns the function id associated by its name.

+ * + *

If addFunction() or addFunctions() have not been called for this name, then an SQLException is + * thrown.

+ * + * @param name Function name to lookup + * @return Function ID for fastpath call + * @throws SQLException is function is unknown. + */ + public int getID(String name) throws SQLException { + Integer id = func.get(name); + + // may be we could add a lookup to the database here, and store the result + // in our lookup table, throwing the exception if that fails. + // We must, however, ensure that if we do, any existing ResultSet is + // unaffected, otherwise we could break user code. + // + // so, until we know we can do this (needs testing, on the TODO list) + // for now, we throw the exception and do no lookups. + if (id == null) { + throw new PSQLException(GT.tr("The fastpath function {0} is unknown.", name), + PSQLState.UNEXPECTED_ERROR); + } + + return id; + } + + /** + * Creates a FastpathArg with an oid parameter. This is here instead of a constructor of + * FastpathArg because the constructor can't tell the difference between an long that's really + * int8 and a long thats an oid. + * + * @param oid input oid + * @return FastpathArg with an oid parameter + */ + public static FastpathArg createOIDArg(long oid) { + if (oid > Integer.MAX_VALUE) { + oid -= NUM_OIDS; + } + return new FastpathArg((int) oid); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java b/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java new file mode 100644 index 0000000..a739a29 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.fastpath; + +import org.postgresql.core.ParameterList; +import org.postgresql.util.ByteStreamWriter; + +import java.sql.SQLException; + +// Not a very clean mapping to the new QueryExecutor/ParameterList +// stuff, but it seems hard to support both v2 and v3 cleanly with +// the same model while retaining API compatibility. So I've just +// done it the ugly way.. + +/** + * Each fastpath call requires an array of arguments, the number and type dependent on the function + * being called. + * + * @deprecated This API is somewhat obsolete, as one may achieve similar performance + * and greater functionality by setting up a prepared statement to define + * the function call. Then, executing the statement with binary transmission of parameters + * and results substitutes for a fast-path function call. + */ +@Deprecated +public class FastpathArg { + /** + * Encoded byte value of argument. + */ + private final byte [] bytes; + private final int bytesStart; + private final int bytesLength; + + static class ByteStreamWriterFastpathArg extends FastpathArg { + private final ByteStreamWriter writer; + + ByteStreamWriterFastpathArg(ByteStreamWriter writer) { + super(null, 0, 0); + this.writer = writer; + } + + @Override + void populateParameter(ParameterList params, int index) throws SQLException { + params.setBytea(index, writer); + } + } + + /** + * Constructs an argument that consists of an integer value. + * + * @param value int value to set + */ + public FastpathArg(int value) { + bytes = new byte[4]; + bytes[3] = (byte) (value); + bytes[2] = (byte) (value >> 8); + bytes[1] = (byte) (value >> 16); + bytes[0] = (byte) (value >> 24); + bytesStart = 0; + bytesLength = 4; + } + + /** + * Constructs an argument that consists of an integer value. + * + * @param value int value to set + */ + public FastpathArg(long value) { + bytes = new byte[8]; + bytes[7] = (byte) (value); + bytes[6] = (byte) (value >> 8); + bytes[5] = (byte) (value >> 16); + bytes[4] = (byte) (value >> 24); + bytes[3] = (byte) (value >> 32); + bytes[2] = (byte) (value >> 40); + bytes[1] = (byte) (value >> 48); + bytes[0] = (byte) (value >> 56); + bytesStart = 0; + bytesLength = 8; + } + + /** + * Constructs an argument that consists of an array of bytes. + * + * @param bytes array to store + */ + public FastpathArg(byte[] bytes) { + this(bytes, 0, bytes.length); + } + + /** + * Constructs an argument that consists of part of a byte array. + * + * @param buf source array + * @param off offset within array + * @param len length of data to include + */ + public FastpathArg(byte [] buf, int off, int len) { + this.bytes = buf; + this.bytesStart = off; + this.bytesLength = len; + } + + /** + * Constructs an argument that consists of a String. + * + * @param s String to store + */ + public FastpathArg(String s) { + this(s.getBytes()); + } + + public static FastpathArg of(ByteStreamWriter writer) { + return new ByteStreamWriterFastpathArg(writer); + } + + void populateParameter(ParameterList params, int index) throws SQLException { + if (bytes == null) { + params.setNull(index, 0); + } else { + params.setBytea(index, bytes, bytesStart, bytesLength); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java new file mode 100644 index 0000000..7127a41 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.geometric; + +import org.postgresql.util.GT; +import org.postgresql.util.PGBinaryObject; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * This represents the box datatype within org.postgresql. + */ +@SuppressWarnings("serial") +public class PGbox extends PGobject implements PGBinaryObject, Serializable, Cloneable { + /** + * These are the two points. + */ + public PGpoint [] point; + + /** + * @param x1 first x coordinate + * @param y1 first y coordinate + * @param x2 second x coordinate + * @param y2 second y coordinate + */ + public PGbox(double x1, double y1, double x2, double y2) { + this(new PGpoint(x1, y1), new PGpoint(x2, y2)); + } + + /** + * @param p1 first point + * @param p2 second point + */ + public PGbox(PGpoint p1, PGpoint p2) { + this(); + this.point = new PGpoint[]{p1, p2}; + } + + /** + * @param s Box definition in PostgreSQL syntax + * @throws SQLException if definition is invalid + */ + public PGbox(String s) throws SQLException { + this(); + setValue(s); + } + + /** + * Required constructor. + */ + public PGbox() { + type = "box"; + } + + /** + * This method sets the value of this object. It should be overridden, but still called by + * subclasses. + * + * @param value a string representation of the value of the object + * @throws SQLException thrown if value is invalid for this type + */ + @Override + public void setValue(String value) throws SQLException { + if (value == null) { + this.point = null; + return; + } + PGtokenizer t = new PGtokenizer(value, ','); + if (t.getSize() != 2) { + throw new PSQLException( + GT.tr("Conversion to type {0} failed: {1}.", type, value), + PSQLState.DATA_TYPE_MISMATCH); + } + + PGpoint[] point = this.point; + if (point == null) { + this.point = point = new PGpoint[2]; + } + point[0] = new PGpoint(t.getToken(0)); + point[1] = new PGpoint(t.getToken(1)); + } + + /** + * @param b Definition of this point in PostgreSQL's binary syntax + */ + @Override + public void setByteValue(byte[] b, int offset) { + PGpoint[] point = this.point; + if (point == null) { + this.point = point = new PGpoint[2]; + } + point[0] = new PGpoint(); + point[0].setByteValue(b, offset); + point[1] = new PGpoint(); + point[1].setByteValue(b, offset + point[0].lengthInBytes()); + this.point = point; + } + + /** + * @param obj Object to compare with + * @return true if the two boxes are identical + */ + @Override + public boolean equals(Object obj) { + if (obj instanceof PGbox) { + PGbox p = (PGbox) obj; + + // Same points. + PGpoint[] point = this.point; + PGpoint[] pPoint = p.point; + if (point == null) { + return pPoint == null; + } else if (pPoint == null) { + return false; + } + + if (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1])) { + return true; + } + + // Points swapped. + if (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0])) { + return true; + } + + // Using the opposite two points of the box: + // (x1,y1),(x2,y2) -> (x1,y2),(x2,y1) + if (pPoint[0].x == point[0].x && pPoint[0].y == point[1].y + && pPoint[1].x == point[1].x && pPoint[1].y == point[0].y) { + return true; + } + + // Using the opposite two points of the box, and the points are swapped + // (x1,y1),(x2,y2) -> (x2,y1),(x1,y2) + if (pPoint[0].x == point[1].x && pPoint[0].y == point[0].y + && pPoint[1].x == point[0].x && pPoint[1].y == point[1].y) { + return true; + } + } + + return false; + } + + @Override + public int hashCode() { + // This relies on the behaviour of point's hashcode being an exclusive-OR of + // its X and Y components; we end up with an exclusive-OR of the two X and + // two Y components, which is equal whenever equals() would return true + // since xor is commutative. + PGpoint[] point = this.point; + return point == null ? 0 : point[0].hashCode() ^ point[1].hashCode(); + } + + @Override + public Object clone() throws CloneNotSupportedException { + PGbox newPGbox = (PGbox) super.clone(); + if (newPGbox.point != null) { + newPGbox.point = newPGbox.point.clone(); + for (int i = 0; i < newPGbox.point.length; i++) { + if (newPGbox.point[i] != null) { + newPGbox.point[i] = (PGpoint) newPGbox.point[i].clone(); + } + } + } + return newPGbox; + } + + /** + * @return the PGbox in the syntax expected by org.postgresql + */ + @Override + public String getValue() { + PGpoint[] point = this.point; + return point == null ? null : point[0].toString() + "," + point[1].toString(); + } + + @Override + public int lengthInBytes() { + PGpoint[] point = this.point; + if (point == null) { + return 0; + } + return point[0].lengthInBytes() + point[1].lengthInBytes(); + } + + @Override + public void toBytes(byte[] bytes, int offset) { + PGpoint[] point = this.point; + point[0].toBytes(bytes, offset); + point[1].toBytes(bytes, offset + point[0].lengthInBytes()); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java new file mode 100644 index 0000000..995023a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.geometric; + +import org.postgresql.util.GT; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * This represents org.postgresql's circle datatype, consisting of a point and a radius. + */ +@SuppressWarnings("serial") +public class PGcircle extends PGobject implements Serializable, Cloneable { + /** + * This is the center point. + */ + public PGpoint center; + + /** + * This is the radius. + */ + public double radius; + + /** + * @param x coordinate of center + * @param y coordinate of center + * @param r radius of circle + */ + public PGcircle(double x, double y, double r) { + this(new PGpoint(x, y), r); + } + + /** + * @param c PGpoint describing the circle's center + * @param r radius of circle + */ + public PGcircle(PGpoint c, double r) { + this(); + this.center = c; + this.radius = r; + } + + /** + * @param s definition of the circle in PostgreSQL's syntax. + * @throws SQLException on conversion failure + */ + public PGcircle(String s) throws SQLException { + this(); + setValue(s); + } + + /** + * This constructor is used by the driver. + */ + public PGcircle() { + type = "circle"; + } + + /** + * @param s definition of the circle in PostgreSQL's syntax. + * @throws SQLException on conversion failure + */ + @Override + public void setValue(String s) throws SQLException { + if (s == null) { + center = null; + return; + } + PGtokenizer t = new PGtokenizer(PGtokenizer.removeAngle(s), ','); + if (t.getSize() != 2) { + throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s), + PSQLState.DATA_TYPE_MISMATCH); + } + + try { + center = new PGpoint(t.getToken(0)); + radius = Double.parseDouble(t.getToken(1)); + } catch (NumberFormatException e) { + throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s), + PSQLState.DATA_TYPE_MISMATCH, e); + } + } + + /** + * @param obj Object to compare with + * @return true if the two circles are identical + */ + @Override + public boolean equals(Object obj) { + if (obj instanceof PGcircle) { + PGcircle p = (PGcircle) obj; + PGpoint center = this.center; + PGpoint pCenter = p.center; + if (center == null) { + return pCenter == null; + } else if (pCenter == null) { + return false; + } + + return p.radius == radius && equals(pCenter, center); + } + return false; + } + + @Override + public int hashCode() { + if (center == null) { + return 0; + } + long bits = Double.doubleToLongBits(radius); + int v = (int) (bits ^ (bits >>> 32)); + v = v * 31 + center.hashCode(); + return v; + } + + @Override + public Object clone() throws CloneNotSupportedException { + PGcircle newPGcircle = (PGcircle) super.clone(); + if (newPGcircle.center != null) { + newPGcircle.center = (PGpoint) newPGcircle.center.clone(); + } + return newPGcircle; + } + + /** + * @return the PGcircle in the syntax expected by org.postgresql + */ + @Override + public String getValue() { + return center == null ? null : "<" + center + "," + radius + ">"; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java new file mode 100644 index 0000000..9ee8ffb --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.geometric; + +import org.postgresql.util.GT; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * This implements a line represented by the linear equation Ax + By + C = 0. + **/ +@SuppressWarnings("serial") +public class PGline extends PGobject implements Serializable, Cloneable { + + /** + * Coefficient of x. + */ + public double a; + + /** + * Coefficient of y. + */ + public double b; + + /** + * Constant. + */ + public double c; + + private boolean isNull; + + /** + * @param a coefficient of x + * @param b coefficient of y + * @param c constant + */ + public PGline(double a, double b, double c) { + this(); + this.a = a; + this.b = b; + this.c = c; + } + + /** + * @param x1 coordinate for first point on the line + * @param y1 coordinate for first point on the line + * @param x2 coordinate for second point on the line + * @param y2 coordinate for second point on the line + */ + public PGline(double x1, double y1, double x2, double y2) { + this(); + setValue(x1, y1, x2, y2); + } + + /** + * @param p1 first point on the line + * @param p2 second point on the line + */ + public PGline(PGpoint p1, PGpoint p2) { + this(); + setValue(p1, p2); + } + + /** + * @param lseg Line segment which calls on this line. + */ + public PGline(PGlseg lseg) { + this(); + if (lseg == null) { + isNull = true; + return; + } + PGpoint[] point = lseg.point; + if (point == null) { + isNull = true; + return; + } + setValue(point[0], point[1]); + } + + private void setValue(PGpoint p1, PGpoint p2) { + if (p1 == null || p2 == null) { + isNull = true; + } else { + setValue(p1.x, p1.y, p2.x, p2.y); + } + } + + private void setValue(double x1, double y1, double x2, double y2) { + if (x1 == x2) { + a = -1; + b = 0; + } else { + a = (y2 - y1) / (x2 - x1); + b = -1; + } + c = y1 - a * x1; + } + + /** + * @param s definition of the line in PostgreSQL's syntax. + * @throws SQLException on conversion failure + */ + public PGline(String s) throws SQLException { + this(); + setValue(s); + } + + /** + * required by the driver. + */ + public PGline() { + type = "line"; + } + + /** + * @param s Definition of the line in PostgreSQL's syntax + * @throws SQLException on conversion failure + */ + @Override + public void setValue(String s) throws SQLException { + isNull = s == null; + if (s == null) { + return; + } + if (s.trim().startsWith("{")) { + PGtokenizer t = new PGtokenizer(PGtokenizer.removeCurlyBrace(s), ','); + if (t.getSize() != 3) { + throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s), + PSQLState.DATA_TYPE_MISMATCH); + } + a = Double.parseDouble(t.getToken(0)); + b = Double.parseDouble(t.getToken(1)); + c = Double.parseDouble(t.getToken(2)); + } else if (s.trim().startsWith("[")) { + PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ','); + if (t.getSize() != 2) { + throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s), + PSQLState.DATA_TYPE_MISMATCH); + } + PGpoint point1 = new PGpoint(t.getToken(0)); + PGpoint point2 = new PGpoint(t.getToken(1)); + a = point2.x - point1.x; + b = point2.y - point1.y; + c = point1.y; + } + } + + /** + * @param obj Object to compare with + * @return true if the two lines are identical + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + if (!super.equals(obj)) { + return false; + } + + PGline pGline = (PGline) obj; + if (isNull) { + return pGline.isNull; + } else if (pGline.isNull) { + return false; + } + + return Double.compare(pGline.a, a) == 0 + && Double.compare(pGline.b, b) == 0 + && Double.compare(pGline.c, c) == 0; + } + + @Override + public int hashCode() { + if (isNull) { + return 0; + } + int result = super.hashCode(); + long temp; + temp = Double.doubleToLongBits(a); + result = 31 * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(b); + result = 31 * result + (int) (temp ^ (temp >>> 32)); + temp = Double.doubleToLongBits(c); + result = 31 * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + /** + * @return the PGline in the syntax expected by org.postgresql + */ + @Override + public String getValue() { + return isNull ? null : "{" + a + "," + b + "," + c + "}"; + } + + @Override + public Object clone() throws CloneNotSupportedException { + // squid:S2157 "Cloneables" should implement "clone + return super.clone(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java new file mode 100644 index 0000000..da1c158 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.geometric; + +import org.postgresql.util.GT; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * This implements a lseg (line segment) consisting of two points. + */ +@SuppressWarnings("serial") +public class PGlseg extends PGobject implements Serializable, Cloneable { + /** + * These are the two points. + */ + public PGpoint [] point; + + /** + * @param x1 coordinate for first point + * @param y1 coordinate for first point + * @param x2 coordinate for second point + * @param y2 coordinate for second point + */ + public PGlseg(double x1, double y1, double x2, double y2) { + this(new PGpoint(x1, y1), new PGpoint(x2, y2)); + } + + /** + * @param p1 first point + * @param p2 second point + */ + public PGlseg(PGpoint p1, PGpoint p2) { + this(); + point = new PGpoint[]{p1, p2}; + } + + /** + * @param s definition of the line segment in PostgreSQL's syntax. + * @throws SQLException on conversion failure + */ + public PGlseg(String s) throws SQLException { + this(); + setValue(s); + } + + /** + * required by the driver. + */ + public PGlseg() { + type = "lseg"; + } + + /** + * @param s Definition of the line segment in PostgreSQL's syntax + * @throws SQLException on conversion failure + */ + @Override + public void setValue(String s) throws SQLException { + if (s == null) { + point = null; + return; + } + PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ','); + if (t.getSize() != 2) { + throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s), + PSQLState.DATA_TYPE_MISMATCH); + } + + PGpoint[] point = this.point; + if (point == null) { + this.point = point = new PGpoint[2]; + } + point[0] = new PGpoint(t.getToken(0)); + point[1] = new PGpoint(t.getToken(1)); + } + + /** + * @param obj Object to compare with + * @return true if the two line segments are identical + */ + @Override + public boolean equals(Object obj) { + if (obj instanceof PGlseg) { + PGlseg p = (PGlseg) obj; + PGpoint[] point = this.point; + PGpoint[] pPoint = p.point; + if (point == null) { + return pPoint == null; + } else if (pPoint == null) { + return false; + } + return (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1])) + || (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0])); + } + return false; + } + + @Override + public int hashCode() { + PGpoint[] point = this.point; + if (point == null) { + return 0; + } + return point[0].hashCode() ^ point[1].hashCode(); + } + + @Override + public Object clone() throws CloneNotSupportedException { + PGlseg newPGlseg = (PGlseg) super.clone(); + if (newPGlseg.point != null) { + newPGlseg.point = newPGlseg.point.clone(); + for (int i = 0; i < newPGlseg.point.length; i++) { + if (newPGlseg.point[i] != null) { + newPGlseg.point[i] = (PGpoint) newPGlseg.point[i].clone(); + } + } + } + return newPGlseg; + } + + /** + * @return the PGlseg in the syntax expected by org.postgresql + */ + @Override + public String getValue() { + PGpoint[] point = this.point; + if (point == null) { + return null; + } + return "[" + point[0] + "," + point[1] + "]"; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java new file mode 100644 index 0000000..807ee86 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.geometric; + +import org.postgresql.util.GT; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * This implements a path (a multiple segmented line, which may be closed). + */ +@SuppressWarnings("serial") +public class PGpath extends PGobject implements Serializable, Cloneable { + /** + * True if the path is open, false if closed. + */ + public boolean open; + + /** + * The points defining this path. + */ + public PGpoint [] points; + + /** + * @param points the PGpoints that define the path + * @param open True if the path is open, false if closed + */ + public PGpath(PGpoint [] points, boolean open) { + this(); + this.points = points; + this.open = open; + } + + /** + * Required by the driver. + */ + public PGpath() { + type = "path"; + } + + /** + * @param s definition of the path in PostgreSQL's syntax. + * @throws SQLException on conversion failure + */ + public PGpath(String s) throws SQLException { + this(); + setValue(s); + } + + /** + * @param s Definition of the path in PostgreSQL's syntax + * @throws SQLException on conversion failure + */ + @Override + public void setValue(String s) throws SQLException { + if (s == null) { + points = null; + return; + } + // First test to see if were open + if (s.startsWith("[") && s.endsWith("]")) { + open = true; + s = PGtokenizer.removeBox(s); + } else if (s.startsWith("(") && s.endsWith(")")) { + open = false; + s = PGtokenizer.removePara(s); + } else { + throw new PSQLException(GT.tr("Cannot tell if path is open or closed: {0}.", s), + PSQLState.DATA_TYPE_MISMATCH); + } + + PGtokenizer t = new PGtokenizer(s, ','); + int npoints = t.getSize(); + PGpoint[] points = new PGpoint[npoints]; + this.points = points; + for (int p = 0; p < npoints; p++) { + points[p] = new PGpoint(t.getToken(p)); + } + } + + /** + * @param obj Object to compare with + * @return true if the two paths are identical + */ + @Override + public boolean equals(Object obj) { + if (obj instanceof PGpath) { + PGpath p = (PGpath) obj; + + PGpoint[] points = this.points; + PGpoint[] pPoints = p.points; + if (points == null) { + return pPoints == null; + } else if (pPoints == null) { + return false; + } + + if (p.open != open) { + return false; + } + + if (pPoints.length != points.length) { + return false; + } + + for (int i = 0; i < points.length; i++) { + if (!points[i].equals(pPoints[i])) { + return false; + } + } + + return true; + } + return false; + } + + @Override + public int hashCode() { + PGpoint[] points = this.points; + if (points == null) { + return 0; + } + // XXX not very good.. + int hash = open ? 1231 : 1237; + for (int i = 0; i < points.length && i < 5; i++) { + hash = hash * 31 + points[i].hashCode(); + } + return hash; + } + + @Override + public Object clone() throws CloneNotSupportedException { + PGpath newPGpath = (PGpath) super.clone(); + if (newPGpath.points != null) { + PGpoint[] newPoints = newPGpath.points.clone(); + newPGpath.points = newPoints; + for (int i = 0; i < newPGpath.points.length; i++) { + newPoints[i] = (PGpoint) newPGpath.points[i].clone(); + } + } + return newPGpath; + } + + /** + * This returns the path in the syntax expected by org.postgresql. + * @return the value of this object + */ + @Override + public String getValue() { + PGpoint[] points = this.points; + if (points == null) { + return null; + } + StringBuilder b = new StringBuilder(open ? "[" : "("); + + for (int p = 0; p < points.length; p++) { + if (p > 0) { + b.append(","); + } + b.append(points[p].toString()); + } + b.append(open ? "]" : ")"); + + return b.toString(); + } + + public boolean isOpen() { + return open && points != null; + } + + public boolean isClosed() { + return !open && points != null; + } + + public void closePath() { + open = false; + } + + public void openPath() { + open = true; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java new file mode 100644 index 0000000..7744a30 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.geometric; + +import org.postgresql.util.ByteConverter; +import org.postgresql.util.GT; +import org.postgresql.util.PGBinaryObject; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + *

It maps to the point datatype in org.postgresql.

+ * + *

This implements a version of java.awt.Point, except it uses double to represent the coordinates.

+ */ +@SuppressWarnings("serial") +public class PGpoint extends PGobject implements PGBinaryObject, Serializable, Cloneable { + /** + * The X coordinate of the point. + */ + public double x; + + /** + * The Y coordinate of the point. + */ + public double y; + + /** + * True if the point represents {@code null::point}. + */ + public boolean isNull; + + /** + * @param x coordinate + * @param y coordinate + */ + public PGpoint(double x, double y) { + this(); + this.x = x; + this.y = y; + } + + /** + * This is called mainly from the other geometric types, when a point is embedded within their + * definition. + * + * @param value Definition of this point in PostgreSQL's syntax + * @throws SQLException if something goes wrong + */ + public PGpoint(String value) throws SQLException { + this(); + setValue(value); + } + + /** + * Required by the driver. + */ + public PGpoint() { + type = "point"; + } + + /** + * @param s Definition of this point in PostgreSQL's syntax + * @throws SQLException on conversion failure + */ + @Override + public void setValue(String s) throws SQLException { + isNull = s == null; + if (s == null) { + return; + } + PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ','); + try { + x = Double.parseDouble(t.getToken(0)); + y = Double.parseDouble(t.getToken(1)); + } catch (NumberFormatException e) { + throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s), + PSQLState.DATA_TYPE_MISMATCH, e); + } + } + + /** + * @param b Definition of this point in PostgreSQL's binary syntax + */ + @Override + public void setByteValue(byte[] b, int offset) { + this.isNull = false; + x = ByteConverter.float8(b, offset); + y = ByteConverter.float8(b, offset + 8); + } + + /** + * @param obj Object to compare with + * @return true if the two points are identical + */ + @Override + public boolean equals(Object obj) { + if (obj instanceof PGpoint) { + PGpoint p = (PGpoint) obj; + if (isNull) { + return p.isNull; + } else if (p.isNull) { + return false; + } + return x == p.x && y == p.y; + } + return false; + } + + @Override + public int hashCode() { + if (isNull) { + return 0; + } + long v1 = Double.doubleToLongBits(x); + long v2 = Double.doubleToLongBits(y); + return (int) (v1 ^ v2 ^ (v1 >>> 32) ^ (v2 >>> 32)); + } + + /** + * @return the PGpoint in the syntax expected by org.postgresql + */ + @Override + public String getValue() { + return isNull ? null : "(" + x + "," + y + ")"; + } + + @Override + public int lengthInBytes() { + return isNull ? 0 : 16; + } + + /** + * Populate the byte array with PGpoint in the binary syntax expected by org.postgresql. + */ + @Override + public void toBytes(byte[] b, int offset) { + if (isNull) { + return; + } + ByteConverter.float8(b, offset, x); + ByteConverter.float8(b, offset + 8, y); + } + + /** + * Translate the point by the supplied amount. + * + * @param x integer amount to add on the x axis + * @param y integer amount to add on the y axis + */ + public void translate(int x, int y) { + translate((double) x, (double) y); + } + + /** + * Translate the point by the supplied amount. + * + * @param x double amount to add on the x axis + * @param y double amount to add on the y axis + */ + public void translate(double x, double y) { + this.isNull = false; + this.x += x; + this.y += y; + } + + /** + * Moves the point to the supplied coordinates. + * + * @param x integer coordinate + * @param y integer coordinate + */ + public void move(int x, int y) { + setLocation(x, y); + } + + /** + * Moves the point to the supplied coordinates. + * + * @param x double coordinate + * @param y double coordinate + */ + public void move(double x, double y) { + this.isNull = false; + this.x = x; + this.y = y; + } + + /** + * Moves the point to the supplied coordinates. refer to java.awt.Point for description of this. + * + * @param x integer coordinate + * @param y integer coordinate + */ + public void setLocation(int x, int y) { + move((double) x, (double) y); + } + + @Override + public Object clone() throws CloneNotSupportedException { + // squid:S2157 "Cloneables" should implement "clone + return super.clone(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java new file mode 100644 index 0000000..3a0d7b9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.geometric; + +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * This implements the polygon datatype within PostgreSQL. + */ +@SuppressWarnings("serial") +public class PGpolygon extends PGobject implements Serializable, Cloneable { + /** + * The points defining the polygon. + */ + public PGpoint [] points; + + /** + * Creates a polygon using an array of PGpoints. + * + * @param points the points defining the polygon + */ + public PGpolygon(PGpoint[] points) { + this(); + this.points = points; + } + + /** + * @param s definition of the polygon in PostgreSQL's syntax. + * @throws SQLException on conversion failure + */ + public PGpolygon(String s) throws SQLException { + this(); + setValue(s); + } + + /** + * Required by the driver. + */ + public PGpolygon() { + type = "polygon"; + } + + /** + * @param s Definition of the polygon in PostgreSQL's syntax + * @throws SQLException on conversion failure + */ + @Override + public void setValue(String s) throws SQLException { + if (s == null) { + points = null; + return; + } + PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ','); + int npoints = t.getSize(); + PGpoint[] points = this.points; + if (points == null || points.length != npoints) { + this.points = points = new PGpoint[npoints]; + } + for (int p = 0; p < npoints; p++) { + points[p] = new PGpoint(t.getToken(p)); + } + } + + /** + * @param obj Object to compare with + * @return true if the two polygons are identical + */ + @Override + public boolean equals(Object obj) { + if (obj instanceof PGpolygon) { + PGpolygon p = (PGpolygon) obj; + + PGpoint[] points = this.points; + PGpoint[] pPoints = p.points; + if (points == null) { + return pPoints == null; + } else if (pPoints == null) { + return false; + } + + if (pPoints.length != points.length) { + return false; + } + + for (int i = 0; i < points.length; i++) { + if (!points[i].equals(pPoints[i])) { + return false; + } + } + + return true; + } + return false; + } + + @Override + public int hashCode() { + int hash = 0; + PGpoint[] points = this.points; + if (points == null) { + return hash; + } + for (int i = 0; i < points.length && i < 5; i++) { + hash = hash * 31 + points[i].hashCode(); + } + return hash; + } + + @Override + public Object clone() throws CloneNotSupportedException { + PGpolygon newPGpolygon = (PGpolygon) super.clone(); + if (newPGpolygon.points != null) { + PGpoint[] newPoints = newPGpolygon.points.clone(); + newPGpolygon.points = newPoints; + for (int i = 0; i < newPGpolygon.points.length; i++) { + if (newPGpolygon.points[i] != null) { + newPoints[i] = (PGpoint) newPGpolygon.points[i].clone(); + } + } + } + return newPGpolygon; + } + + /** + * @return the PGpolygon in the syntax expected by org.postgresql + */ + @Override + public String getValue() { + PGpoint[] points = this.points; + if (points == null) { + return null; + } + StringBuilder b = new StringBuilder(); + b.append("("); + for (int p = 0; p < points.length; p++) { + if (p > 0) { + b.append(","); + } + b.append(points[p].toString()); + } + b.append(")"); + return b.toString(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java new file mode 100644 index 0000000..9ec36fe --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.gss; + +import java.io.IOException; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.TextOutputCallback; +import javax.security.auth.callback.UnsupportedCallbackException; + +/* + provide a more or less redundant callback handler +*/ + +class GSSCallbackHandler implements CallbackHandler { + + private final String user; + private final char [] password; + + GSSCallbackHandler(String user, char [] password) { + this.user = user; + this.password = password; + } + + @Override + public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { + for (Callback callback : callbacks) { + if (callback instanceof TextOutputCallback) { + TextOutputCallback toc = (TextOutputCallback) callback; + switch (toc.getMessageType()) { + case TextOutputCallback.INFORMATION: + System.out.println("INFO: " + toc.getMessage()); + break; + case TextOutputCallback.ERROR: + System.out.println("ERROR: " + toc.getMessage()); + break; + case TextOutputCallback.WARNING: + System.out.println("WARNING: " + toc.getMessage()); + break; + default: + throw new IOException("Unsupported message type: " + toc.getMessageType()); + } + } else if (callback instanceof NameCallback) { + NameCallback nc = (NameCallback) callback; + nc.setName(user); + } else if (callback instanceof PasswordCallback) { + PasswordCallback pc = (PasswordCallback) callback; + if (password == null) { + throw new IOException("No cached kerberos ticket found and no password supplied."); + } + pc.setPassword(password); + } else { + throw new UnsupportedCallbackException(callback, "Unrecognized Callback"); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java new file mode 100644 index 0000000..2007b82 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.gss; + +import org.ietf.jgss.GSSContext; +import org.ietf.jgss.GSSException; +import org.ietf.jgss.MessageProp; + +import java.io.IOException; +import java.io.InputStream; + +public class GSSInputStream extends InputStream { + private final GSSContext gssContext; + private final MessageProp messageProp; + private final InputStream wrapped; + byte [] unencrypted; + int unencryptedPos; + int unencryptedLength; + + public GSSInputStream(InputStream wrapped, GSSContext gssContext, MessageProp messageProp) { + this.wrapped = wrapped; + this.gssContext = gssContext; + this.messageProp = messageProp; + } + + @Override + public int read() throws IOException { + return 0; + } + + @Override + public int read(byte [] buffer, int pos, int len) throws IOException { + byte[] int4Buf = new byte[4]; + int encryptedLength; + int copyLength = 0; + + if ( unencryptedLength > 0 ) { + copyLength = Math.min(len, unencryptedLength); + System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength); + unencryptedLength -= copyLength; + unencryptedPos += copyLength; + } else { + if (wrapped.read(int4Buf, 0, 4) == 4 ) { + + encryptedLength = (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8 + | int4Buf[3] & 0xFF; + + byte[] encryptedBuffer = new byte[encryptedLength]; + wrapped.read(encryptedBuffer, 0, encryptedLength); + + try { + byte[] unencrypted = gssContext.unwrap(encryptedBuffer, 0, encryptedLength, messageProp); + this.unencrypted = unencrypted; + unencryptedLength = unencrypted.length; + unencryptedPos = 0; + + copyLength = Math.min(len, unencrypted.length); + System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength); + unencryptedLength -= copyLength; + unencryptedPos += copyLength; + + } catch (GSSException e) { + throw new IOException(e); + } + return copyLength; + } + } + return copyLength; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java new file mode 100644 index 0000000..197ddb7 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.gss; + +import org.ietf.jgss.GSSContext; +import org.ietf.jgss.GSSException; +import org.ietf.jgss.MessageProp; + +import java.io.IOException; +import java.io.OutputStream; + +public class GSSOutputStream extends OutputStream { + private final GSSContext gssContext; + private final MessageProp messageProp; + private final byte[] buffer; + private final byte[] int4Buf = new byte[4]; + private int index; + private final OutputStream wrapped; + + public GSSOutputStream(OutputStream out, GSSContext gssContext, MessageProp messageProp, int bufferSize) { + wrapped = out; + this.gssContext = gssContext; + this.messageProp = messageProp; + buffer = new byte[bufferSize]; + } + + @Override + public void write(int b) throws IOException { + buffer[index++] = (byte) b; + if (index >= buffer.length) { + flush(); + } + } + + @Override + public void write(byte[] buf) throws IOException { + write(buf, 0, buf.length); + } + + @Override + public void write(byte[] b, int pos, int len) throws IOException { + int max; + + while ( len > 0 ) { + int roomToWrite = buffer.length - index; + if ( len < roomToWrite ) { + System.arraycopy(b, pos, buffer, index, len); + index += len; + len -= roomToWrite; + } else { + System.arraycopy(b, pos, buffer, index, roomToWrite); + index += roomToWrite; + len -= roomToWrite; + } + if (roomToWrite == 0) { + flush(); + } + } + } + + @Override + public void flush() throws IOException { + try { + byte[] token = gssContext.wrap(buffer, 0, index, messageProp); + sendInteger4Raw(token.length); + wrapped.write(token, 0, token.length); + index = 0; + } catch ( GSSException ex ) { + throw new IOException(ex); + } + wrapped.flush(); + } + + private void sendInteger4Raw(int val) throws IOException { + int4Buf[0] = (byte) (val >>> 24); + int4Buf[1] = (byte) (val >>> 16); + int4Buf[2] = (byte) (val >>> 8); + int4Buf[3] = (byte) (val); + wrapped.write(int4Buf); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java b/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java new file mode 100644 index 0000000..b1ad40f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.gss; + +import org.postgresql.core.PGStream; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.ServerErrorMessage; + +import org.ietf.jgss.GSSContext; +import org.ietf.jgss.GSSCredential; +import org.ietf.jgss.GSSException; +import org.ietf.jgss.GSSManager; +import org.ietf.jgss.GSSName; +import org.ietf.jgss.Oid; + +import java.io.IOException; +import java.security.Principal; +import java.security.PrivilegedAction; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.security.auth.Subject; + +class GssAction implements PrivilegedAction, Callable { + + private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName()); + private final PGStream pgStream; + private final String host; + private final String kerberosServerName; + private final String user; + private final boolean useSpnego; + private final Subject subject; + private final boolean logServerErrorDetail; + + GssAction(PGStream pgStream, Subject subject, String host, String user, + String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) { + this.pgStream = pgStream; + this.subject = subject; + this.host = host; + this.user = user; + this.kerberosServerName = kerberosServerName; + this.useSpnego = useSpnego; + this.logServerErrorDetail = logServerErrorDetail; + } + + private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException { + Oid spnego = new Oid("1.3.6.1.5.5.2"); + Oid[] mechs = manager.getMechs(); + + for (Oid mech : mechs) { + if (mech.equals(spnego)) { + return true; + } + } + + return false; + } + + @Override + public Exception run() { + try { + GSSManager manager = GSSManager.getInstance(); + GSSCredential clientCreds = null; + Oid[] desiredMechs = new Oid[1]; + + //Try to get credential from subject first. + GSSCredential gssCredential = null; + if (subject != null) { + Set gssCreds = subject.getPrivateCredentials(GSSCredential.class); + if (gssCreds != null && !gssCreds.isEmpty()) { + gssCredential = gssCreds.iterator().next(); + } + } + + //If failed to get credential from subject, + //then call createCredential to create one. + if (gssCredential == null) { + if (useSpnego && hasSpnegoSupport(manager)) { + desiredMechs[0] = new Oid("1.3.6.1.5.5.2"); + } else { + desiredMechs[0] = new Oid("1.2.840.113554.1.2.2"); + } + String principalName = this.user; + if (subject != null) { + Set principals = subject.getPrincipals(); + Iterator principalIterator = principals.iterator(); + + Principal principal = null; + if (principalIterator.hasNext()) { + principal = principalIterator.next(); + principalName = principal.getName(); + } + } + + GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME); + clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs, + GSSCredential.INITIATE_ONLY); + } else { + desiredMechs[0] = new Oid("1.2.840.113554.1.2.2"); + clientCreds = gssCredential; + } + + GSSName serverName = + manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE); + + GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds, + GSSContext.DEFAULT_LIFETIME); + secContext.requestMutualAuth(true); + + byte[] inToken = new byte[0]; + byte[] outToken = null; + + boolean established = false; + while (!established) { + outToken = secContext.initSecContext(inToken, 0, inToken.length); + + if (outToken != null) { + LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)"); + + pgStream.sendChar('p'); + pgStream.sendInteger4(4 + outToken.length); + pgStream.send(outToken); + pgStream.flush(); + } + + if (!secContext.isEstablished()) { + int response = pgStream.receiveChar(); + // Error + switch (response) { + case 'E': + int elen = pgStream.receiveInteger4(); + ServerErrorMessage errorMsg + = new ServerErrorMessage(pgStream.receiveErrorString(elen - 4)); + + LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg); + + return new PSQLException(errorMsg, logServerErrorDetail); + case 'R': + LOGGER.log(Level.FINEST, " <=BE AuthenticationGSSContinue"); + int len = pgStream.receiveInteger4(); + int type = pgStream.receiveInteger4(); + // should check type = 8 + inToken = pgStream.receive(len - 8); + break; + default: + // Unknown/unexpected message type. + return new PSQLException(GT.tr("Protocol error. Session setup failed."), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } + } else { + established = true; + } + } + + } catch (IOException e) { + return e; + } catch (GSSException gsse) { + return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, + gsse); + } + return null; + } + + @Override + public Exception call() throws Exception { + return run(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java b/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java new file mode 100644 index 0000000..37cc0d8 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.gss; + +import org.postgresql.core.PGStream; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.ietf.jgss.GSSContext; +import org.ietf.jgss.GSSCredential; +import org.ietf.jgss.GSSException; +import org.ietf.jgss.GSSManager; +import org.ietf.jgss.GSSName; +import org.ietf.jgss.Oid; + +import java.io.IOException; +import java.security.Principal; +import java.security.PrivilegedAction; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.security.auth.Subject; + +public class GssEncAction implements PrivilegedAction, Callable { + private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName()); + private final PGStream pgStream; + private final String host; + private final String user; + private final String kerberosServerName; + private final boolean useSpnego; + private final Subject subject; + private final boolean logServerErrorDetail; + + public GssEncAction(PGStream pgStream, Subject subject, + String host, String user, + String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) { + this.pgStream = pgStream; + this.subject = subject; + this.host = host; + this.user = user; + this.kerberosServerName = kerberosServerName; + this.useSpnego = useSpnego; + this.logServerErrorDetail = logServerErrorDetail; + } + + private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException { + Oid spnego = new Oid("1.3.6.1.5.5.2"); + Oid[] mechs = manager.getMechs(); + + for (Oid mech : mechs) { + if (mech.equals(spnego)) { + return true; + } + } + + return false; + } + + @Override + public Exception run() { + try { + GSSManager manager = GSSManager.getInstance(); + GSSCredential clientCreds = null; + Oid[] desiredMechs = new Oid[1]; + + //Try to get credential from subject first. + GSSCredential gssCredential = null; + if (subject != null) { + Set gssCreds = subject.getPrivateCredentials(GSSCredential.class); + if (gssCreds != null && !gssCreds.isEmpty()) { + gssCredential = gssCreds.iterator().next(); + } + } + + //If failed to get credential from subject, + //then call createCredential to create one. + if (gssCredential == null) { + if (useSpnego && hasSpnegoSupport(manager)) { + desiredMechs[0] = new Oid("1.3.6.1.5.5.2"); + } else { + desiredMechs[0] = new Oid("1.2.840.113554.1.2.2"); + } + String principalName = this.user; + if (subject != null) { + Set principals = subject.getPrincipals(); + Iterator principalIterator = principals.iterator(); + + Principal principal = null; + if (principalIterator.hasNext()) { + principal = principalIterator.next(); + principalName = principal.getName(); + } + } + + GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME); + clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs, + GSSCredential.INITIATE_ONLY); + } else { + desiredMechs[0] = new Oid("1.2.840.113554.1.2.2"); + clientCreds = gssCredential; + } + GSSName serverName = + manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE); + + GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds, + GSSContext.DEFAULT_LIFETIME); + secContext.requestMutualAuth(true); + secContext.requestConf(true); + secContext.requestInteg(true); + + byte[] inToken = new byte[0]; + byte[] outToken = null; + + boolean established = false; + while (!established) { + outToken = secContext.initSecContext(inToken, 0, inToken.length); + + if (outToken != null) { + LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)"); + + pgStream.sendInteger4(outToken.length); + pgStream.send(outToken); + pgStream.flush(); + } + + if (!secContext.isEstablished()) { + int len = pgStream.receiveInteger4(); + // should check type = 8 + inToken = pgStream.receive(len); + } else { + established = true; + pgStream.setSecContext(secContext); + } + } + + } catch (IOException e) { + return e; + } catch (GSSException gsse) { + return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, + gsse); + } + + return null; + } + + @Override + public Exception call() throws Exception { + return run(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java b/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java new file mode 100644 index 0000000..a548275 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.gss; + +import org.postgresql.PGProperty; +import org.postgresql.core.PGStream; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.ietf.jgss.GSSCredential; + +import java.io.IOException; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.security.PrivilegedAction; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.security.auth.Subject; +import javax.security.auth.login.LoginContext; + +public class MakeGSS { + private static final Logger LOGGER = Logger.getLogger(MakeGSS.class.getName()); + private static final MethodHandle SUBJECT_CURRENT; + private static final MethodHandle ACCESS_CONTROLLER_GET_CONTEXT; + private static final MethodHandle SUBJECT_GET_SUBJECT; + // Java <18 + private static final MethodHandle SUBJECT_DO_AS; + // Java 18+, see https://bugs.openjdk.org/browse/JDK-8267108 + private static final MethodHandle SUBJECT_CALL_AS; + + static { + MethodHandle subjectCurrent = null; + try { + subjectCurrent = MethodHandles.lookup() + .findStatic(Subject.class, "current", MethodType.methodType(Subject.class)); + } catch (NoSuchMethodException | IllegalAccessException ignore) { + // E.g. pre Java 18 + } + SUBJECT_CURRENT = subjectCurrent; + + MethodHandle accessControllerGetContext = null; + MethodHandle subjectGetSubject = null; + + try { + Class accessControllerClass = Class.forName("java.security.AccessController"); + Class accessControlContextClass = + Class.forName("java.security.AccessControlContext"); + accessControllerGetContext = MethodHandles.lookup() + .findStatic(accessControllerClass, "getContext", + MethodType.methodType(accessControlContextClass)); + subjectGetSubject = MethodHandles.lookup() + .findStatic(Subject.class, "getSubject", + MethodType.methodType(Subject.class, accessControlContextClass)); + } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) { + // E.g. pre Java 18+ + } + + ACCESS_CONTROLLER_GET_CONTEXT = accessControllerGetContext; + SUBJECT_GET_SUBJECT = subjectGetSubject; + + MethodHandle subjectDoAs = null; + try { + subjectDoAs = MethodHandles.lookup().findStatic(Subject.class, "doAs", + MethodType.methodType(Object.class, Subject.class, PrivilegedAction.class)); + } catch (NoSuchMethodException | IllegalAccessException ignore) { + } + SUBJECT_DO_AS = subjectDoAs; + + MethodHandle subjectCallAs = null; + try { + subjectCallAs = MethodHandles.lookup().findStatic(Subject.class, "callAs", + MethodType.methodType(Object.class, Subject.class, Callable.class)); + } catch (NoSuchMethodException | IllegalAccessException ignore) { + } + SUBJECT_CALL_AS = subjectCallAs; + } + + /** + * Use {@code Subject.current()} in Java 18+, and + * {@code Subject.getSubject(AccessController.getContext())} in Java before 18. + * @return current Subject or null + */ + @SuppressWarnings("deprecation") + private static Subject getCurrentSubject() { + try { + if (SUBJECT_CURRENT != null) { + return (Subject) SUBJECT_CURRENT.invokeExact(); + } + if (SUBJECT_GET_SUBJECT == null || ACCESS_CONTROLLER_GET_CONTEXT == null) { + return null; + } + return (Subject) SUBJECT_GET_SUBJECT.invoke( + ACCESS_CONTROLLER_GET_CONTEXT.invoke() + ); + } catch (Throwable e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } + if (e instanceof Error) { + throw (Error) e; + } + throw new RuntimeException(e); + } + } + + public static void authenticate(boolean encrypted, + PGStream pgStream, String host, String user, char [] password, + String jaasApplicationName, String kerberosServerName, + boolean useSpnego, boolean jaasLogin, + boolean logServerErrorDetail) + throws IOException, PSQLException { + LOGGER.log(Level.FINEST, " <=BE AuthenticationReqGSS"); + + if (jaasApplicationName == null) { + jaasApplicationName = PGProperty.JAAS_APPLICATION_NAME.getDefaultValue(); + } + if (kerberosServerName == null) { + kerberosServerName = "postgres"; + } + + Exception result; + try { + boolean performAuthentication = jaasLogin; + + //Check if we can get credential from subject to avoid login. + Subject sub = getCurrentSubject(); + if (sub != null) { + Set gssCreds = sub.getPrivateCredentials(GSSCredential.class); + if (gssCreds != null && !gssCreds.isEmpty()) { + performAuthentication = false; + } + } + if (performAuthentication) { + LoginContext lc = new LoginContext(jaasApplicationName, new GSSCallbackHandler(user, password)); + lc.login(); + sub = lc.getSubject(); + } + + PrivilegedAction action; + if ( encrypted ) { + action = new GssEncAction(pgStream, sub, host, user, + kerberosServerName, useSpnego, logServerErrorDetail); + } else { + action = new GssAction(pgStream, sub, host, user, + kerberosServerName, useSpnego, logServerErrorDetail); + } + @SuppressWarnings({"cast.unsafe", "assignment"}) + Subject subject = sub; + if (SUBJECT_DO_AS != null) { + result = (Exception) SUBJECT_DO_AS.invoke(subject, action); + } else if (SUBJECT_CALL_AS != null) { + result = (Exception) SUBJECT_CALL_AS.invoke(subject, action); + } else { + throw new PSQLException( + GT.tr("Neither Subject.doAs (Java before 18) nor Subject.callAs (Java 18+) method found"), + PSQLState.OBJECT_NOT_IN_STATE); + } + } catch (Throwable e) { + throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, e); + } + + if (result instanceof IOException) { + throw (IOException) result; + } else if (result instanceof PSQLException) { + throw (PSQLException) result; + } else if (result != null) { + throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, + result); + } + + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java new file mode 100644 index 0000000..b0303e3 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +import org.postgresql.util.HostSpec; + +/** + * Candidate host to be connected. + */ +public class CandidateHost { + public final HostSpec hostSpec; + public final HostRequirement targetServerType; + + public CandidateHost(HostSpec hostSpec, HostRequirement targetServerType) { + this.hostSpec = hostSpec; + this.targetServerType = targetServerType; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java new file mode 100644 index 0000000..d8a93b7 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2014, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.util.HostSpec; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Keeps track of HostSpec targets in a global map. + */ +@SuppressWarnings("try") +public class GlobalHostStatusTracker { + private static final Map hostStatusMap = + new HashMap<>(); + private static final ResourceLock lock = new ResourceLock(); + + /** + * Store the actual observed host status. + * + * @param hostSpec The host whose status is known. + * @param hostStatus Latest known status for the host. + */ + public static void reportHostStatus(HostSpec hostSpec, HostStatus hostStatus) { + long now = System.nanoTime() / 1000000; + try (ResourceLock ignore = lock.obtain()) { + HostSpecStatus hostSpecStatus = hostStatusMap.get(hostSpec); + if (hostSpecStatus == null) { + hostSpecStatus = new HostSpecStatus(hostSpec); + hostStatusMap.put(hostSpec, hostSpecStatus); + } + hostSpecStatus.status = hostStatus; + hostSpecStatus.lastUpdated = now; + } + } + + /** + * Returns a list of candidate hosts that have the required targetServerType. + * + * @param hostSpecs The potential list of hosts. + * @param targetServerType The required target server type. + * @param hostRecheckMillis How stale information is allowed. + * @return candidate hosts to connect to. + */ + static List getCandidateHosts(HostSpec[] hostSpecs, + HostRequirement targetServerType, long hostRecheckMillis) { + List candidates = new ArrayList<>(hostSpecs.length); + long latestAllowedUpdate = System.nanoTime() / 1000000 - hostRecheckMillis; + try (ResourceLock ignore = lock.obtain()) { + for (HostSpec hostSpec : hostSpecs) { + HostSpecStatus hostInfo = hostStatusMap.get(hostSpec); + // candidates are nodes we do not know about and the nodes with correct type + if (hostInfo == null + || hostInfo.lastUpdated < latestAllowedUpdate + || targetServerType.allowConnectingTo(hostInfo.status)) { + candidates.add(hostSpec); + } + } + } + return candidates; + } + + static class HostSpecStatus { + final HostSpec host; + HostStatus status; + long lastUpdated; + + HostSpecStatus(HostSpec host) { + this.host = host; + } + + @Override + public String toString() { + return host.toString() + '=' + status; + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java new file mode 100644 index 0000000..a506b7b --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2014, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +import java.util.Iterator; + +/** + * Lists connections in preferred order. + */ +public interface HostChooser extends Iterable { + /** + * Lists connection hosts in preferred order. + * + * @return connection hosts in preferred order. + */ + @Override + Iterator iterator(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java new file mode 100644 index 0000000..4099fa0 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2014, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +import org.postgresql.util.HostSpec; + +import java.util.Properties; + +/** + * Chooses a {@link HostChooser} instance based on the number of hosts and properties. + */ +public class HostChooserFactory { + + public static HostChooser createHostChooser(HostSpec[] hostSpecs, + HostRequirement targetServerType, Properties info) { + if (hostSpecs.length == 1) { + return new SingleHostChooser(hostSpecs[0], targetServerType); + } + return new MultiHostChooser(hostSpecs, targetServerType, info); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java new file mode 100644 index 0000000..666bb9f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2014, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +/** + * Describes the required server type. + */ +public enum HostRequirement { + any { + @Override + public boolean allowConnectingTo(HostStatus status) { + return status != HostStatus.ConnectFail; + } + }, + /** + * @deprecated we no longer use the terms master or slave in the driver, or the PostgreSQL + * project. + */ + @Deprecated + master { + @Override + public boolean allowConnectingTo(HostStatus status) { + return primary.allowConnectingTo(status); + } + }, + primary { + @Override + public boolean allowConnectingTo(HostStatus status) { + return status == HostStatus.Primary || status == HostStatus.ConnectOK; + } + }, + secondary { + @Override + public boolean allowConnectingTo(HostStatus status) { + return status == HostStatus.Secondary || status == HostStatus.ConnectOK; + } + }, + preferSecondary { + @Override + public boolean allowConnectingTo(HostStatus status) { + return status != HostStatus.ConnectFail; + } + }, + preferPrimary { + @Override + public boolean allowConnectingTo(HostStatus status) { + return status != HostStatus.ConnectFail; + } + }; + + public abstract boolean allowConnectingTo(HostStatus status); + + /** + *

The postgreSQL project has decided not to use the term slave to refer to alternate servers. + * secondary or standby is preferred. We have arbitrarily chosen secondary. + * As of Jan 2018 in order not to break existing code we are going to accept both slave or + * secondary for names of alternate servers.

+ * + *

The current policy is to keep accepting this silently but not document slave, or slave preferSlave

+ * + *

As of Jul 2018 silently deprecate the use of the word master as well

+ * + * @param targetServerType the value of {@code targetServerType} connection property + * @return HostRequirement + */ + + public static HostRequirement getTargetServerType(String targetServerType) { + + String allowSlave = targetServerType.replace("lave", "econdary").replace("master", "primary"); + return valueOf(allowSlave); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java new file mode 100644 index 0000000..d303e8d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2014, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +/** + * Known state of a server. + */ +public enum HostStatus { + ConnectFail, + ConnectOK, + Primary, + Secondary +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java new file mode 100644 index 0000000..953417f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2014, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +import static java.util.Collections.shuffle; + +import org.postgresql.PGProperty; +import org.postgresql.util.HostSpec; +import org.postgresql.util.PSQLException; + +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; + +/** + * HostChooser that keeps track of known host statuses. + */ +class MultiHostChooser implements HostChooser { + private final HostSpec[] hostSpecs; + private final HostRequirement targetServerType; + private int hostRecheckTime; + private boolean loadBalance; + + MultiHostChooser(HostSpec[] hostSpecs, HostRequirement targetServerType, + Properties info) { + this.hostSpecs = hostSpecs; + this.targetServerType = targetServerType; + try { + hostRecheckTime = PGProperty.HOST_RECHECK_SECONDS.getInt(info) * 1000; + loadBalance = PGProperty.LOAD_BALANCE_HOSTS.getBoolean(info); + } catch (PSQLException e) { + throw new RuntimeException(e); + } + } + + @Override + public Iterator iterator() { + Iterator res = candidateIterator(); + if (!res.hasNext()) { + // In case all the candidate hosts are unavailable or do not match, try all the hosts just in case + List allHosts = Arrays.asList(hostSpecs); + if (loadBalance) { + allHosts = new ArrayList<>(allHosts); + shuffle(allHosts); + } + res = withReqStatus(targetServerType, allHosts).iterator(); + } + return res; + } + + private Iterator candidateIterator() { + if ( targetServerType != HostRequirement.preferSecondary + && targetServerType != HostRequirement.preferPrimary ) { + return getCandidateHosts(targetServerType).iterator(); + } + + HostRequirement preferredServerType = + targetServerType == HostRequirement.preferSecondary + ? HostRequirement.secondary + : HostRequirement.primary; + + // preferSecondary tries to find secondary hosts first + // Note: sort does not work here since there are "unknown" hosts, + // and that "unknown" might turn out to be master, so we should discard that + // if other secondaries exist + // Same logic as the above works for preferPrimary if we replace "secondary" + // with "primary" and vice versa + List preferred = getCandidateHosts(preferredServerType); + List any = getCandidateHosts(HostRequirement.any); + + if ( !preferred.isEmpty() && !any.isEmpty() + && preferred.get(preferred.size() - 1).hostSpec.equals(any.get(0).hostSpec)) { + // When the last preferred host's hostspec is the same as the first in "any" list, there's no need + // to attempt to connect it as "preferred" + // Note: this is only an optimization + preferred = rtrim(1, preferred); + } + return append(preferred, any).iterator(); + } + + private List getCandidateHosts(HostRequirement hostRequirement) { + List candidates = + GlobalHostStatusTracker.getCandidateHosts(hostSpecs, hostRequirement, hostRecheckTime); + if (loadBalance) { + shuffle(candidates); + } + return withReqStatus(hostRequirement, candidates); + } + + private List withReqStatus(final HostRequirement requirement, final List hosts) { + return new AbstractList() { + @Override + public CandidateHost get(int index) { + return new CandidateHost(hosts.get(index), requirement); + } + + @Override + public int size() { + return hosts.size(); + } + }; + } + + private List append(final List a, final List b) { + return new AbstractList() { + @Override + public T get(int index) { + return index < a.size() ? a.get(index) : b.get(index - a.size()); + } + + @Override + public int size() { + return a.size() + b.size(); + } + }; + } + + private List rtrim(final int size, final List a) { + return new AbstractList() { + @Override + public T get(int index) { + return a.get(index); + } + + @Override + public int size() { + return Math.max(0, a.size() - size); + } + }; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java new file mode 100644 index 0000000..e79e834 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2014, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.hostchooser; + +import org.postgresql.util.HostSpec; + +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; + +/** + * Host chooser that returns the single host. + */ +class SingleHostChooser implements HostChooser { + private final Collection candidateHost; + + SingleHostChooser(HostSpec hostSpec, HostRequirement targetServerType) { + this.candidateHost = Collections.singletonList(new CandidateHost(hostSpec, targetServerType)); + } + + @Override + public Iterator iterator() { + return candidateHost.iterator(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java b/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java new file mode 100644 index 0000000..d50ebf4 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.largeobject.LargeObject; +import org.postgresql.largeobject.LargeObjectManager; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.Blob; +import java.sql.SQLException; +import java.util.ArrayList; + +/** + * This class holds all of the methods common to both Blobs and Clobs. + * + * @author Michael Barker + */ +@SuppressWarnings("try") +public abstract class AbstractBlobClob { + protected BaseConnection conn; + + private LargeObject currentLo; + private boolean currentLoIsWriteable; + private final boolean support64bit; + + /** + * We create separate LargeObjects for methods that use streams so they won't interfere with each + * other. + */ + private ArrayList subLOs = new ArrayList(); + + protected final ResourceLock lock = new ResourceLock(); + private final long oid; + + public AbstractBlobClob(BaseConnection conn, long oid) throws SQLException { + this.conn = conn; + this.oid = oid; + this.currentLoIsWriteable = false; + + support64bit = conn.haveMinimumServerVersion(90300); + } + + public void free() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (currentLo != null) { + currentLo.close(); + currentLo = null; + currentLoIsWriteable = false; + } + if (subLOs != null) { + for (LargeObject subLO : subLOs) { + subLO.close(); + } + } + subLOs = null; + } + } + + /** + * For Blobs this should be in bytes while for Clobs it should be in characters. Since we really + * haven't figured out how to handle character sets for Clobs the current implementation uses + * bytes for both Blobs and Clobs. + * + * @param len maximum length + * @throws SQLException if operation fails + */ + public void truncate(long len) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + if (!conn.haveMinimumServerVersion(ServerVersion.v8_3)) { + throw new PSQLException( + GT.tr("Truncation of large objects is only implemented in 8.3 and later servers."), + PSQLState.NOT_IMPLEMENTED); + } + + if (len < 0) { + throw new PSQLException(GT.tr("Cannot truncate LOB to a negative length."), + PSQLState.INVALID_PARAMETER_VALUE); + } + if (len > Integer.MAX_VALUE) { + if (support64bit) { + getLo(true).truncate64(len); + } else { + throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else { + getLo(true).truncate((int) len); + } + } + } + + public long length() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + if (support64bit) { + return getLo(false).size64(); + } else { + return getLo(false).size(); + } + } + } + + public byte[] getBytes(long pos, int length) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + assertPosition(pos); + getLo(false).seek((int) (pos - 1), LargeObject.SEEK_SET); + return getLo(false).read(length); + } + } + + public InputStream getBinaryStream() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + LargeObject subLO = getLo(false).copy(); + addSubLO(subLO); + subLO.seek(0, LargeObject.SEEK_SET); + return subLO.getInputStream(); + } + } + + public OutputStream setBinaryStream(long pos) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + assertPosition(pos); + LargeObject subLO = getLo(true).copy(); + addSubLO(subLO); + subLO.seek((int) (pos - 1)); + return subLO.getOutputStream(); + } + } + + /** + * Iterate over the buffer looking for the specified pattern. + * + * @param pattern A pattern of bytes to search the blob for + * @param start The position to start reading from + * @return position of the specified pattern + * @throws SQLException if something wrong happens + */ + public long position(byte[] pattern, long start) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + assertPosition(start, pattern.length); + + int position = 1; + int patternIdx = 0; + long result = -1; + int tmpPosition = 1; + + for (LOIterator i = new LOIterator(start - 1); i.hasNext(); position++) { + byte b = i.next(); + if (b == pattern[patternIdx]) { + if (patternIdx == 0) { + tmpPosition = position; + } + patternIdx++; + if (patternIdx == pattern.length) { + result = tmpPosition; + break; + } + } else { + patternIdx = 0; + } + } + + return result; + } + } + + /** + * Iterates over a large object returning byte values. Will buffer the data from the large object. + */ + private class LOIterator { + private static final int BUFFER_SIZE = 8096; + private final byte[] buffer = new byte[BUFFER_SIZE]; + private int idx = BUFFER_SIZE; + private int numBytes = BUFFER_SIZE; + + LOIterator(long start) throws SQLException { + getLo(false).seek((int) start); + } + + public boolean hasNext() throws SQLException { + boolean result; + if (idx < numBytes) { + result = true; + } else { + numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE); + idx = 0; + result = numBytes > 0; + } + return result; + } + + private byte next() { + return buffer[idx++]; + } + } + + /** + * This is simply passing the byte value of the pattern Blob. + * + * @param pattern search pattern + * @param start start position + * @return position of given pattern + * @throws SQLException if something goes wrong + */ + public long position(Blob pattern, long start) throws SQLException { + return position(pattern.getBytes(1, (int) pattern.length()), start); + } + + /** + * Throws an exception if the pos value exceeds the max value by which the large object API can + * index. + * + * @param pos Position to write at. + * @throws SQLException if something goes wrong + */ + protected void assertPosition(long pos) throws SQLException { + assertPosition(pos, 0); + } + + /** + * Throws an exception if the pos value exceeds the max value by which the large object API can + * index. + * + * @param pos Position to write at. + * @param len number of bytes to write. + * @throws SQLException if something goes wrong + */ + protected void assertPosition(long pos, long len) throws SQLException { + checkFreed(); + if (pos < 1) { + throw new PSQLException(GT.tr("LOB positioning offsets start at 1."), + PSQLState.INVALID_PARAMETER_VALUE); + } + if (pos + len - 1 > Integer.MAX_VALUE) { + throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE), + PSQLState.INVALID_PARAMETER_VALUE); + } + } + + /** + * Checks that this LOB hasn't been free()d already. + * + * @throws SQLException if LOB has been freed. + */ + protected void checkFreed() throws SQLException { + if (subLOs == null) { + throw new PSQLException(GT.tr("free() was called on this LOB previously"), + PSQLState.OBJECT_NOT_IN_STATE); + } + } + + protected LargeObject getLo(boolean forWrite) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + LargeObject currentLo = this.currentLo; + if (currentLo != null) { + if (forWrite && !currentLoIsWriteable) { + // Reopen the stream in read-write, at the same pos. + int currentPos = currentLo.tell(); + + LargeObjectManager lom = conn.getLargeObjectAPI(); + LargeObject newLo = lom.open(oid, LargeObjectManager.READWRITE); + subLOs.add(currentLo); + this.currentLo = currentLo = newLo; + + if (currentPos != 0) { + currentLo.seek(currentPos); + } + } + + return currentLo; + } + LargeObjectManager lom = conn.getLargeObjectAPI(); + this.currentLo = currentLo = + lom.open(oid, forWrite ? LargeObjectManager.READWRITE : LargeObjectManager.READ); + currentLoIsWriteable = forWrite; + return currentLo; + } + } + + protected void addSubLO(LargeObject subLO) { + subLOs.add(subLO); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java new file mode 100644 index 0000000..9bee44d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java @@ -0,0 +1,804 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.Oid; +import org.postgresql.core.Parser; +import org.postgresql.jdbc2.ArrayAssistant; +import org.postgresql.jdbc2.ArrayAssistantRegistry; +import org.postgresql.util.GT; +import org.postgresql.util.PGbytea; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.lang.reflect.Array; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.sql.Date; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Utility for decoding arrays. + * + *

+ * See {@code ArrayEncoding} for description of the binary format of arrays. + *

+ * + * @author Brett Okken + */ +public final class ArrayDecoding { + + public ArrayDecoding() { + } + + /** + * Array list implementation specific for storing PG array elements. If + * {@link PgArrayList#dimensionsCount} is {@code 1}, the contents will be + * {@link String}. For all larger dimensionsCount, the values will be + * {@link PgArrayList} instances. + */ + @SuppressWarnings("serial") + public static final class PgArrayList extends ArrayList { + + /** + * How many dimensions. + */ + int dimensionsCount = 1; + + public PgArrayList() { + } + + } + + private interface ArrayDecoder { + + A createArray(int size); + + Object[] createMultiDimensionalArray(int[] sizes); + + boolean supportBinary(); + + void populateFromBinary(A array, int index, int count, ByteBuffer bytes, BaseConnection connection) + throws SQLException; + + void populateFromString(A array, List strings, BaseConnection connection) throws SQLException; + } + + private abstract static class AbstractObjectStringArrayDecoder implements ArrayDecoder { + final Class baseClazz; + + AbstractObjectStringArrayDecoder(Class baseClazz) { + this.baseClazz = baseClazz; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean supportBinary() { + return false; + } + + @SuppressWarnings("unchecked") + @Override + public A createArray(int size) { + return (A) Array.newInstance(baseClazz, size); + } + + /** + * {@inheritDoc} + */ + @Override + public Object[] createMultiDimensionalArray(int[] sizes) { + return (Object[]) Array.newInstance(baseClazz, sizes); + } + + @Override + public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void populateFromString(A arr, List strings, BaseConnection connection) throws SQLException { + final Object[] array = (Object[]) arr; + + for (int i = 0, j = strings.size(); i < j; i++) { + final String stringVal = strings.get(i); + array[i] = stringVal != null ? parseValue(stringVal, connection) : null; + } + } + + abstract Object parseValue(String stringVal, BaseConnection connection) throws SQLException; + } + + private abstract static class AbstractObjectArrayDecoder extends AbstractObjectStringArrayDecoder { + + AbstractObjectArrayDecoder(Class baseClazz) { + super(baseClazz); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean supportBinary() { + return true; + } + + @Override + public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection) + throws SQLException { + final Object[] array = (Object[]) arr; + + // skip through to the requested index + for (int i = 0; i < index; i++) { + final int length = bytes.getInt(); + if (length > 0) { + bytes.position(bytes.position() + length); + } + } + + for (int i = 0; i < count; i++) { + final int length = bytes.getInt(); + if (length != -1) { + array[i] = parseValue(length, bytes, connection); + } else { + // explicitly set to null for reader's clarity + array[i] = null; + } + } + } + + abstract Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException; + } + + private static final ArrayDecoder LONG_OBJ_ARRAY = new AbstractObjectArrayDecoder(Long.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) { + return bytes.getLong(); + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PgResultSet.toLong(stringVal); + } + }; + + private static final ArrayDecoder INT4_UNSIGNED_OBJ_ARRAY = new AbstractObjectArrayDecoder( + Long.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) { + return bytes.getInt() & 0xFFFFFFFFL; + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PgResultSet.toLong(stringVal); + } + }; + + private static final ArrayDecoder INTEGER_OBJ_ARRAY = new AbstractObjectArrayDecoder( + Integer.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) { + return bytes.getInt(); + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PgResultSet.toInt(stringVal); + } + }; + + private static final ArrayDecoder SHORT_OBJ_ARRAY = new AbstractObjectArrayDecoder(Short.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) { + return bytes.getShort(); + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PgResultSet.toShort(stringVal); + } + }; + + private static final ArrayDecoder DOUBLE_OBJ_ARRAY = new AbstractObjectArrayDecoder( + Double.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) { + return bytes.getDouble(); + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PgResultSet.toDouble(stringVal); + } + }; + + private static final ArrayDecoder FLOAT_OBJ_ARRAY = new AbstractObjectArrayDecoder(Float.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) { + return bytes.getFloat(); + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PgResultSet.toFloat(stringVal); + } + }; + + private static final ArrayDecoder BOOLEAN_OBJ_ARRAY = new AbstractObjectArrayDecoder( + Boolean.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) { + return bytes.get() == 1; + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return BooleanTypeUtil.fromString(stringVal); + } + }; + + private static final ArrayDecoder STRING_ARRAY = new AbstractObjectArrayDecoder<>(String.class) { + + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException { + assert bytes.hasArray(); + final byte[] byteArray = bytes.array(); + final int offset = bytes.arrayOffset() + bytes.position(); + + String val; + try { + val = connection.getEncoding().decode(byteArray, offset, length); + } catch (IOException e) { + throw new PSQLException(GT.tr( + "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), + PSQLState.DATA_ERROR, e); + } + bytes.position(bytes.position() + length); + return val; + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return stringVal; + } + }; + + private static final ArrayDecoder BYTE_ARRAY_ARRAY = new AbstractObjectArrayDecoder( + byte[].class) { + + /** + * {@inheritDoc} + */ + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException { + final byte[] array = new byte[length]; + bytes.get(array); + return array; + } + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PGbytea.toBytes(stringVal.getBytes(StandardCharsets.US_ASCII)); + } + }; + + private static final ArrayDecoder BIG_DECIMAL_STRING_DECODER = new AbstractObjectStringArrayDecoder( + BigDecimal.class) { + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return PgResultSet.toBigDecimal(stringVal); + } + }; + + private static final ArrayDecoder STRING_ONLY_DECODER = new AbstractObjectStringArrayDecoder( + String.class) { + + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return stringVal; + } + }; + + private static final ArrayDecoder DATE_DECODER = new AbstractObjectStringArrayDecoder( + Date.class) { + + @SuppressWarnings("deprecation") + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return connection.getTimestampUtils().toDate(null, stringVal); + } + }; + + private static final ArrayDecoder TIME_DECODER = new AbstractObjectStringArrayDecoder( + Time.class) { + + @SuppressWarnings("deprecation") + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return connection.getTimestampUtils().toTime(null, stringVal); + } + }; + + private static final ArrayDecoder TIMESTAMP_DECODER = new AbstractObjectStringArrayDecoder( + Timestamp.class) { + + @SuppressWarnings("deprecation") + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return connection.getTimestampUtils().toTimestamp(null, stringVal); + } + }; + + /** + * Maps from base type oid to {@link ArrayDecoder} capable of processing + * entries. + */ + @SuppressWarnings("rawtypes") + private static final Map OID_TO_DECODER = new HashMap<>( + (int) (21 / .75) + 1); + + static { + OID_TO_DECODER.put(Oid.OID, INT4_UNSIGNED_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.INT8, LONG_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.INT4, INTEGER_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.INT2, SHORT_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.MONEY, DOUBLE_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.FLOAT8, DOUBLE_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.FLOAT4, FLOAT_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.TEXT, STRING_ARRAY); + OID_TO_DECODER.put(Oid.VARCHAR, STRING_ARRAY); + // 42.2.x decodes jsonb array as String rather than PGobject + OID_TO_DECODER.put(Oid.JSONB, STRING_ONLY_DECODER); + OID_TO_DECODER.put(Oid.BIT, BOOLEAN_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.BOOL, BOOLEAN_OBJ_ARRAY); + OID_TO_DECODER.put(Oid.BYTEA, BYTE_ARRAY_ARRAY); + OID_TO_DECODER.put(Oid.NUMERIC, BIG_DECIMAL_STRING_DECODER); + OID_TO_DECODER.put(Oid.BPCHAR, STRING_ONLY_DECODER); + OID_TO_DECODER.put(Oid.CHAR, STRING_ONLY_DECODER); + OID_TO_DECODER.put(Oid.JSON, STRING_ONLY_DECODER); + OID_TO_DECODER.put(Oid.DATE, DATE_DECODER); + OID_TO_DECODER.put(Oid.TIME, TIME_DECODER); + OID_TO_DECODER.put(Oid.TIMETZ, TIME_DECODER); + OID_TO_DECODER.put(Oid.TIMESTAMP, TIMESTAMP_DECODER); + OID_TO_DECODER.put(Oid.TIMESTAMPTZ, TIMESTAMP_DECODER); + } + + @SuppressWarnings("rawtypes") + private static final class ArrayAssistantObjectArrayDecoder extends AbstractObjectArrayDecoder { + private final ArrayAssistant arrayAssistant; + + @SuppressWarnings("unchecked") + ArrayAssistantObjectArrayDecoder(ArrayAssistant arrayAssistant) { + super(arrayAssistant.baseType()); + this.arrayAssistant = arrayAssistant; + } + + /** + * {@inheritDoc} + */ + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException { + + assert bytes.hasArray(); + final byte[] byteArray = bytes.array(); + final int offset = bytes.arrayOffset() + bytes.position(); + + final Object val = arrayAssistant.buildElement(byteArray, offset, length); + + bytes.position(bytes.position() + length); + return val; + } + + /** + * {@inheritDoc} + */ + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return arrayAssistant.buildElement(stringVal); + } + } + + private static final class MappedTypeObjectArrayDecoder extends AbstractObjectArrayDecoder { + + private final String typeName; + + MappedTypeObjectArrayDecoder(String baseTypeName) { + super(Object.class); + this.typeName = baseTypeName; + } + + /** + * {@inheritDoc} + */ + @Override + Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException { + final byte[] copy = new byte[length]; + bytes.get(copy); + return connection.getObject(typeName, null, copy); + } + + /** + * {@inheritDoc} + */ + @Override + Object parseValue(String stringVal, BaseConnection connection) throws SQLException { + return connection.getObject(typeName, stringVal, null); + } + } + + @SuppressWarnings("unchecked") + private static ArrayDecoder getDecoder(int oid, BaseConnection connection) throws SQLException { + final Integer key = oid; + @SuppressWarnings("rawtypes") + final ArrayDecoder decoder = OID_TO_DECODER.get(key); + if (decoder != null) { + return decoder; + } + + final ArrayAssistant assistant = ArrayAssistantRegistry.getAssistant(oid); + + if (assistant != null) { + return new ArrayAssistantObjectArrayDecoder(assistant); + } + + final String typeName = connection.getTypeInfo().getPGType(oid); + if (typeName == null) { + throw Driver.notImplemented(PgArray.class, "readArray(data,oid)"); + } + + // 42.2.x should return enums as strings + int type = connection.getTypeInfo().getSQLType(typeName); + if (type == Types.CHAR || type == Types.VARCHAR) { + return (ArrayDecoder) STRING_ONLY_DECODER; + } + return (ArrayDecoder) new MappedTypeObjectArrayDecoder(typeName); + } + + /** + * Reads binary representation of array into object model. + * + * @param index + * 1 based index of where to start on outermost array. + * @param count + * The number of items to return from outermost array (beginning at + * index). + * @param bytes + * The binary representation of the array. + * @param connection + * The connection the bytes were retrieved from. + * @return The parsed array. + * @throws SQLException + * For failures encountered during parsing. + */ + @SuppressWarnings("unchecked") + public static Object readBinaryArray(int index, int count, byte[] bytes, BaseConnection connection) + throws SQLException { + final ByteBuffer buffer = ByteBuffer.wrap(bytes); + buffer.order(ByteOrder.BIG_ENDIAN); + final int dimensions = buffer.getInt(); + final boolean hasNulls = buffer.getInt() != 0; + final int elementOid = buffer.getInt(); + + @SuppressWarnings("rawtypes") + final ArrayDecoder decoder = getDecoder(elementOid, connection); + + if (!decoder.supportBinary()) { + throw Driver.notImplemented(PgArray.class, "readBinaryArray(data,oid)"); + } + + if (dimensions == 0) { + return decoder.createArray(0); + } + + final int adjustedSkipIndex = index > 0 ? index - 1 : 0; + + // optimize for single dimension array + if (dimensions == 1) { + int length = buffer.getInt(); + buffer.position(buffer.position() + 4); + if (count > 0) { + length = Math.min(length, count); + } + final Object array = decoder.createArray(length); + decoder.populateFromBinary(array, adjustedSkipIndex, length, buffer, connection); + return array; + } + + final int[] dimensionLengths = new int[dimensions]; + for (int i = 0; i < dimensions; i++) { + dimensionLengths[i] = buffer.getInt(); + buffer.position(buffer.position() + 4); + } + + if (count > 0) { + dimensionLengths[0] = Math.min(count, dimensionLengths[0]); + } + + final Object[] array = decoder.createMultiDimensionalArray(dimensionLengths); + + // TODO: in certain circumstances (no nulls, fixed size data types) + // if adjustedSkipIndex is > 0, we could advance through the buffer rather than + // parse our way through throwing away the results + + storeValues(array, decoder, buffer, adjustedSkipIndex, dimensionLengths, 0, connection); + + return array; + } + + @SuppressWarnings("unchecked") + private static void storeValues(A[] array, ArrayDecoder decoder, ByteBuffer bytes, + int skip, int[] dimensionLengths, int dim, BaseConnection connection) throws SQLException { + assert dim <= dimensionLengths.length - 2; + + for (int i = 0; i < skip; i++) { + if (dim == dimensionLengths.length - 2) { + decoder.populateFromBinary(array[0], 0, dimensionLengths[dim + 1], bytes, connection); + } else { + storeValues((A[]) array[0], decoder, bytes, 0, dimensionLengths, dim + 1, connection); + } + } + + for (int i = 0; i < dimensionLengths[dim]; i++) { + if (dim == dimensionLengths.length - 2) { + decoder.populateFromBinary(array[i], 0, dimensionLengths[dim + 1], bytes, connection); + } else { + storeValues((A[]) array[i], decoder, bytes, 0, dimensionLengths, dim + 1, connection); + } + } + } + + /** + * Parses the string representation of an array into a {@link PgArrayList}. + * + * @param fieldString + * The array value to parse. + * @param delim + * The delimiter character appropriate for the data type. + * @return A {@link PgArrayList} representing the parsed fieldString. + */ + static PgArrayList buildArrayList(String fieldString, char delim) { + + final PgArrayList arrayList = new PgArrayList(); + + if (fieldString == null) { + return arrayList; + } + + final char[] chars = fieldString.toCharArray(); + StringBuilder buffer = null; + boolean insideString = false; + + // needed for checking if NULL value occurred + boolean wasInsideString = false; + + // array dimension arrays + final List dims = new ArrayList<>(); + + // currently processed array + PgArrayList curArray = arrayList; + + // Starting with 8.0 non-standard (beginning index + // isn't 1) bounds the dimensions are returned in the + // data formatted like so "[0:3]={0,1,2,3,4}". + // Older versions simply do not return the bounds. + // + // Right now we ignore these bounds, but we could + // consider allowing these index values to be used + // even though the JDBC spec says 1 is the first + // index. I'm not sure what a client would like + // to see, so we just retain the old behavior. + int startOffset = 0; + { + if (chars[0] == '[') { + while (chars[startOffset] != '=') { + startOffset++; + } + startOffset++; // skip = + } + } + + for (int i = startOffset; i < chars.length; i++) { + + // escape character that we need to skip + if (chars[i] == '\\') { + i++; + } else if (!insideString && chars[i] == '{') { + // subarray start + if (dims.isEmpty()) { + dims.add(arrayList); + } else { + PgArrayList a = new PgArrayList(); + PgArrayList p = dims.get(dims.size() - 1); + p.add(a); + dims.add(a); + } + curArray = dims.get(dims.size() - 1); + + // number of dimensions + { + for (int t = i + 1; t < chars.length; t++) { + if (Character.isWhitespace(chars[t])) { + continue; + } else if (chars[t] == '{') { + curArray.dimensionsCount++; + } else { + break; + } + } + } + + buffer = new StringBuilder(); + continue; + } else if (chars[i] == '"') { + // quoted element + insideString = !insideString; + wasInsideString = true; + continue; + } else if (!insideString && Parser.isArrayWhiteSpace(chars[i])) { + // white space + continue; + } else if ((!insideString && (chars[i] == delim || chars[i] == '}')) || i == chars.length - 1) { + // array end or element end + // when character that is a part of array element + if (chars[i] != '"' && chars[i] != '}' && chars[i] != delim && buffer != null) { + buffer.append(chars[i]); + } + + String b = buffer == null ? null : buffer.toString(); + + // add element to current array + if (b != null && (!b.isEmpty() || wasInsideString)) { + curArray.add(!wasInsideString && "NULL".equals(b) ? null : b); + } + + wasInsideString = false; + buffer = new StringBuilder(); + + // when end of an array + if (chars[i] == '}') { + dims.remove(dims.size() - 1); + + // when multi-dimension + if (!dims.isEmpty()) { + curArray = dims.get(dims.size() - 1); + } + + buffer = null; + } + + continue; + } + + if (buffer != null) { + buffer.append(chars[i]); + } + } + + return arrayList; + } + + /** + * Reads {@code String} representation of array into object model. + * + * @param index + * 1 based index of where to start on outermost array. + * @param count + * The number of items to return from outermost array (beginning at + * index). + * @param oid + * The oid of the base type of the array. + * @param list + * The {@code #buildArrayList(String, char) processed} string + * representation of an array. + * @param connection + * The connection the bytes were retrieved from. + * @return The parsed array. + * @throws SQLException + * For failures encountered during parsing. + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + public static Object readStringArray(int index, int count, int oid, PgArrayList list, BaseConnection connection) + throws SQLException { + + final ArrayDecoder decoder = getDecoder(oid, connection); + + final int dims = list.dimensionsCount; + + if (dims == 0) { + return decoder.createArray(0); + } + + boolean sublist = false; + + int adjustedSkipIndex = 0; + if (index > 1) { + sublist = true; + adjustedSkipIndex = index - 1; + } + + int adjustedCount = list.size(); + if (count > 0 && count != adjustedCount) { + sublist = true; + adjustedCount = Math.min(adjustedCount, count); + } + + final List adjustedList = sublist ? list.subList(adjustedSkipIndex, adjustedSkipIndex + adjustedCount) : list; + + if (dims == 1) { + int length = adjustedList.size(); + if (count > 0) { + length = Math.min(length, count); + } + final Object array = decoder.createArray(length); + decoder.populateFromString(array, adjustedList, connection); + return array; + } + + // dimensions length array (to be used with + // java.lang.reflect.Array.newInstance(Class, int[])) + final int[] dimensionLengths = new int[dims]; + dimensionLengths[0] = adjustedCount; + { + List tmpList = (List) adjustedList.get(0); + for (int i = 1; i < dims; i++) { + // TODO: tmpList always non-null? + dimensionLengths[i] = tmpList.size(); + if (i != dims - 1) { + tmpList = (List) tmpList.get(0); + } + } + } + + final Object[] array = decoder.createMultiDimensionalArray(dimensionLengths); + + storeStringValues(array, decoder, adjustedList, dimensionLengths, 0, connection); + + return array; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static void storeStringValues(A[] array, ArrayDecoder decoder, List list, int [] dimensionLengths, + int dim, BaseConnection connection) throws SQLException { + assert dim <= dimensionLengths.length - 2; + + for (int i = 0; i < dimensionLengths[dim]; i++) { + Object element = list.get(i); + if (dim == dimensionLengths.length - 2) { + decoder.populateFromString(array[i], (List) element, connection); + } else { + storeStringValues((A[]) array[i], decoder, (List) element, dimensionLengths, dim + 1, connection); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayEncoding.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayEncoding.java new file mode 100644 index 0000000..1647c9a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayEncoding.java @@ -0,0 +1,1437 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.Encoding; +import org.postgresql.core.Oid; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.reflect.Array; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.HashMap; +import java.util.Map; + +/** + * Utility for using arrays in requests. + * + *

+ * Binary format: + *

    + *
  • 4 bytes with number of dimensions
  • + *
  • 4 bytes, boolean indicating nulls present or not
  • + *
  • 4 bytes type oid
  • + *
  • 8 bytes describing the length of each dimension (repeated for each dimension)
  • + *
      + *
    • 4 bytes for length
    • + *
    • 4 bytes for lower bound on length to check for overflow (it appears this value can always be 0)
    • + *
    + *
  • data in depth first element order corresponding number and length of dimensions
  • + *
      + *
    • 4 bytes describing length of element, {@code 0xFFFFFFFF} ({@code -1}) means {@code null}
    • + *
    • binary representation of element (iff not {@code null}). + *
    + *
+ *

+ * + * @author Brett Okken + */ +public final class ArrayEncoding { + + public ArrayEncoding() { + } + + public interface ArrayEncoder
{ + + /** + * The default array type oid supported by this instance. + * + * @return The default array type oid supported by this instance. + */ + int getDefaultArrayTypeOid(); + + /** + * Creates {@code String} representation of the array. + * + * @param delim + * The character to use to delimit between elements. + * @param array + * The array to represent as a {@code String}. + * @return {@code String} representation of the array. + */ + String toArrayString(char delim, A array); + + /** + * Indicates if an array can be encoded in binary form to array oid. + * + * @param oid + * The array oid to see check for binary support. + * @return Indication of whether + * {@link #toBinaryRepresentation(BaseConnection, Object, int)} is + * supported for oid. + */ + boolean supportBinaryRepresentation(int oid); + + /** + * Creates binary representation of the array. + * + * @param connection + * The connection the binary representation will be used on. Attributes + * from the connection might impact how values are translated to + * binary. + * @param array + * The array to binary encode. Must not be {@code null}, but may + * contain {@code null} elements. + * @param oid + * The array type oid to use. Calls to + * {@link #supportBinaryRepresentation(int)} must have returned + * {@code true}. + * @return The binary representation of array. + * @throws SQLFeatureNotSupportedException + * If {@link #supportBinaryRepresentation(int)} is false for + * oid. + */ + byte[] toBinaryRepresentation(BaseConnection connection, A array, int oid) + throws SQLException, SQLFeatureNotSupportedException; + + /** + * Append {@code String} representation of array to sb. + * + * @param sb + * The {@link StringBuilder} to append to. + * @param delim + * The delimiter between elements. + * @param array + * The array to represent. Will not be {@code null}, but may contain + * {@code null} elements. + */ + void appendArray(StringBuilder sb, char delim, A array); + } + + /** + * Base class to implement {@link ArrayEncoding.ArrayEncoder} and provide + * multi-dimensional support. + * + * @param + * Base array type supported. + */ + private abstract static class AbstractArrayEncoder + implements ArrayEncoder { + + private final int oid; + + final int arrayOid; + + /** + * + * @param oid + * The default/primary base oid type. + * @param arrayOid + * The default/primary array oid type. + */ + AbstractArrayEncoder(int oid, int arrayOid) { + this.oid = oid; + this.arrayOid = arrayOid; + } + + /** + * + * @param arrayOid + * The array oid to get base oid type for. + * @return The base oid type for the given array oid type given to + * {@link #toBinaryRepresentation(BaseConnection, Object, int)}. + */ + int getTypeOID(@SuppressWarnings("unused") int arrayOid) { + return oid; + } + + /** + * By default returns the arrayOid this instance was instantiated with. + */ + @Override + public int getDefaultArrayTypeOid() { + return arrayOid; + } + + /** + * Counts the number of {@code null} elements in array. + * + * @param array + * The array to count {@code null} elements in. + * @return The number of {@code null} elements in array. + */ + int countNulls(A array) { + int nulls = 0; + final int arrayLength = Array.getLength(array); + for (int i = 0; i < arrayLength; i++) { + if (Array.get(array, i) == null) { + ++nulls; + } + } + return nulls; + } + + /** + * Creates {@code byte[]} of just the raw data (no metadata). + * + * @param connection + * The connection the binary representation will be used on. + * @param array + * The array to create binary representation of. Will not be + * {@code null}, but may contain {@code null} elements. + * @return {@code byte[]} of just the raw data (no metadata). + * @throws SQLFeatureNotSupportedException + * If {@link #supportBinaryRepresentation(int)} is false for + * oid. + */ + abstract byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, A array) + throws SQLException, SQLFeatureNotSupportedException; + + /** + * {@inheritDoc} + */ + @Override + public String toArrayString(char delim, A array) { + final StringBuilder sb = new StringBuilder(1024); + appendArray(sb, delim, array); + return sb.toString(); + } + + /** + * By default returns {@code true} if oid matches the arrayOid + * this instance was instantiated with. + */ + @Override + public boolean supportBinaryRepresentation(int oid) { + return oid == arrayOid; + } + } + + /** + * Base class to provide support for {@code Number} based arrays. + * + * @param + * The base type of array. + */ + private abstract static class NumberArrayEncoder extends AbstractArrayEncoder { + + private final int fieldSize; + + /** + * + * @param fieldSize + * The fixed size to represent each value in binary. + * @param oid + * The base type oid. + * @param arrayOid + * The array type oid. + */ + NumberArrayEncoder(int fieldSize, int oid, int arrayOid) { + super(oid, arrayOid); + this.fieldSize = fieldSize; + } + + /** + * {@inheritDoc} + */ + @Override + final int countNulls(N[] array) { + int count = 0; + for (int i = 0; i < array.length; i++) { + if (array[i] == null) { + ++count; + } + } + return count; + } + + /** + * {@inheritDoc} + */ + @Override + public final byte[] toBinaryRepresentation(BaseConnection connection, N[] array, int oid) + throws SQLException, SQLFeatureNotSupportedException { + assert oid == this.arrayOid; + + final int nullCount = countNulls(array); + + final byte[] bytes = writeBytes(array, nullCount, 20); + + // 1 dimension + ByteConverter.int4(bytes, 0, 1); + // no null + ByteConverter.int4(bytes, 4, nullCount == 0 ? 0 : 1); + // oid + ByteConverter.int4(bytes, 8, getTypeOID(oid)); + // length + ByteConverter.int4(bytes, 12, array.length); + // postgresql uses 1 base by default + ByteConverter.int4(bytes, 16, 1); + + return bytes; + } + + /** + * {@inheritDoc} + */ + @Override + final byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, N[] array) + throws SQLException, SQLFeatureNotSupportedException { + + final int nullCount = countNulls(array); + + return writeBytes(array, nullCount, 0); + } + + private byte[] writeBytes(final N[] array, final int nullCount, final int offset) { + final int length = offset + (4 * array.length) + (fieldSize * (array.length - nullCount)); + final byte[] bytes = new byte[length]; + + int idx = offset; + for (int i = 0; i < array.length; i++) { + if (array[i] == null) { + ByteConverter.int4(bytes, idx, -1); + idx += 4; + } else { + ByteConverter.int4(bytes, idx, fieldSize); + idx += 4; + write(array[i], bytes, idx); + idx += fieldSize; + } + } + + return bytes; + } + + /** + * Write single value (number) to bytes beginning at + * offset. + * + * @param number + * The value to write to bytes. This will never be {@code null}. + * @param bytes + * The {@code byte[]} to write to. + * @param offset + * The offset into bytes to write the number value. + */ + protected abstract void write(N number, byte[] bytes, int offset); + + /** + * {@inheritDoc} + */ + @Override + public final void appendArray(StringBuilder sb, char delim, N[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i != 0) { + sb.append(delim); + } + if (array[i] == null) { + sb.append('N').append('U').append('L').append('L'); + } else { + sb.append('"'); + sb.append(array[i].toString()); + sb.append('"'); + } + } + sb.append('}'); + } + } + + /** + * Base support for primitive arrays. + * + * @param + * The primitive array to support. + */ + private abstract static class FixedSizePrimitiveArrayEncoder + extends AbstractArrayEncoder { + + private final int fieldSize; + + FixedSizePrimitiveArrayEncoder(int fieldSize, int oid, int arrayOid) { + super(oid, arrayOid); + this.fieldSize = fieldSize; + } + + /** + * {@inheritDoc} + * + *

+ * Always returns {@code 0}. + *

+ */ + @Override + final int countNulls(A array) { + return 0; + } + + /** + * {@inheritDoc} + */ + @Override + public final byte[] toBinaryRepresentation(BaseConnection connection, A array, int oid) + throws SQLException, SQLFeatureNotSupportedException { + assert oid == arrayOid; + + final int arrayLength = Array.getLength(array); + final int length = 20 + ((fieldSize + 4) * arrayLength); + final byte[] bytes = new byte[length]; + + // 1 dimension + ByteConverter.int4(bytes, 0, 1); + // no null + ByteConverter.int4(bytes, 4, 0); + // oid + ByteConverter.int4(bytes, 8, getTypeOID(oid)); + // length + ByteConverter.int4(bytes, 12, arrayLength); + // postgresql uses 1 base by default + ByteConverter.int4(bytes, 16, 1); + + write(array, bytes, 20); + + return bytes; + } + + /** + * {@inheritDoc} + */ + @Override + final byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, A array) + throws SQLException, SQLFeatureNotSupportedException { + final int length = (fieldSize + 4) * Array.getLength(array); + final byte[] bytes = new byte[length]; + + write(array, bytes, 0); + return bytes; + } + + /** + * Write the entire contents of array to bytes starting at + * offset without metadata describing type or length. + * + * @param array + * The array to write. + * @param bytes + * The {@code byte[]} to write to. + * @param offset + * The offset into bytes to start writing. + */ + protected abstract void write(A array, byte[] bytes, int offset); + } + + private static final AbstractArrayEncoder LONG_ARRAY = new FixedSizePrimitiveArrayEncoder(8, Oid.INT8, + Oid.INT8_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, long[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + sb.append(array[i]); + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + protected void write(long[] array, byte[] bytes, int offset) { + int idx = offset; + for (int i = 0; i < array.length; i++) { + bytes[idx + 3] = 8; + ByteConverter.int8(bytes, idx + 4, array[i]); + idx += 12; + } + } + }; + + private static final AbstractArrayEncoder LONG_OBJ_ARRAY = new NumberArrayEncoder(8, Oid.INT8, + Oid.INT8_ARRAY) { + + @Override + protected void write(Long number, byte[] bytes, int offset) { + ByteConverter.int8(bytes, offset, number.longValue()); + } + }; + + private static final AbstractArrayEncoder INT_ARRAY = new FixedSizePrimitiveArrayEncoder(4, Oid.INT4, + Oid.INT4_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, int[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + sb.append(array[i]); + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + protected void write(int[] array, byte[] bytes, int offset) { + int idx = offset; + for (int i = 0; i < array.length; i++) { + bytes[idx + 3] = 4; + ByteConverter.int4(bytes, idx + 4, array[i]); + idx += 8; + } + } + }; + + private static final AbstractArrayEncoder INT_OBJ_ARRAY = new NumberArrayEncoder(4, Oid.INT4, + Oid.INT4_ARRAY) { + + @Override + protected void write(Integer number, byte[] bytes, int offset) { + ByteConverter.int4(bytes, offset, number.intValue()); + } + }; + + private static final AbstractArrayEncoder SHORT_ARRAY = new FixedSizePrimitiveArrayEncoder(2, + Oid.INT2, Oid.INT2_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, short[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + sb.append(array[i]); + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + protected void write(short[] array, byte[] bytes, int offset) { + int idx = offset; + for (int i = 0; i < array.length; i++) { + bytes[idx + 3] = 2; + ByteConverter.int2(bytes, idx + 4, array[i]); + idx += 6; + } + } + }; + + private static final AbstractArrayEncoder SHORT_OBJ_ARRAY = new NumberArrayEncoder(2, Oid.INT2, + Oid.INT2_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + protected void write(Short number, byte[] bytes, int offset) { + ByteConverter.int2(bytes, offset, number.shortValue()); + } + }; + + private static final AbstractArrayEncoder DOUBLE_ARRAY = new FixedSizePrimitiveArrayEncoder(8, + Oid.FLOAT8, Oid.FLOAT8_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, double[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + // use quotes to account for any issues with scientific notation + sb.append('"'); + sb.append(array[i]); + sb.append('"'); + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + protected void write(double[] array, byte[] bytes, int offset) { + int idx = offset; + for (int i = 0; i < array.length; i++) { + bytes[idx + 3] = 8; + ByteConverter.float8(bytes, idx + 4, array[i]); + idx += 12; + } + } + }; + + private static final AbstractArrayEncoder DOUBLE_OBJ_ARRAY = new NumberArrayEncoder(8, Oid.FLOAT8, + Oid.FLOAT8_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + protected void write(Double number, byte[] bytes, int offset) { + ByteConverter.float8(bytes, offset, number.doubleValue()); + } + }; + + private static final AbstractArrayEncoder FLOAT_ARRAY = new FixedSizePrimitiveArrayEncoder(4, + Oid.FLOAT4, Oid.FLOAT4_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, float[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + // use quotes to account for any issues with scientific notation + sb.append('"'); + sb.append(array[i]); + sb.append('"'); + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + protected void write(float[] array, byte[] bytes, int offset) { + int idx = offset; + for (int i = 0; i < array.length; i++) { + bytes[idx + 3] = 4; + ByteConverter.float4(bytes, idx + 4, array[i]); + idx += 8; + } + } + }; + + private static final AbstractArrayEncoder FLOAT_OBJ_ARRAY = new NumberArrayEncoder(4, Oid.FLOAT4, + Oid.FLOAT4_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + protected void write(Float number, byte[] bytes, int offset) { + ByteConverter.float4(bytes, offset, number.floatValue()); + } + }; + + private static final AbstractArrayEncoder BOOLEAN_ARRAY = new FixedSizePrimitiveArrayEncoder(1, + Oid.BOOL, Oid.BOOL_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, boolean[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + sb.append(array[i] ? '1' : '0'); + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + protected void write(boolean[] array, byte[] bytes, int offset) { + int idx = offset; + for (int i = 0; i < array.length; i++) { + bytes[idx + 3] = 1; + ByteConverter.bool(bytes, idx + 4, array[i]); + idx += 5; + } + } + }; + + private static final AbstractArrayEncoder BOOLEAN_OBJ_ARRAY = new AbstractArrayEncoder(Oid.BOOL, + Oid.BOOL_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + public byte[] toBinaryRepresentation(BaseConnection connection, Boolean[] array, int oid) + throws SQLException, SQLFeatureNotSupportedException { + assert oid == arrayOid; + + final int nullCount = countNulls(array); + + final byte[] bytes = writeBytes(array, nullCount, 20); + + // 1 dimension + ByteConverter.int4(bytes, 0, 1); + // no null + ByteConverter.int4(bytes, 4, nullCount == 0 ? 0 : 1); + // oid + ByteConverter.int4(bytes, 8, getTypeOID(oid)); + // length + ByteConverter.int4(bytes, 12, array.length); + // postgresql uses 1 base by default + ByteConverter.int4(bytes, 16, 1); + + return bytes; + } + + private byte[] writeBytes(final Boolean[] array, final int nullCount, final int offset) { + final int length = offset + (4 * array.length) + (array.length - nullCount); + final byte[] bytes = new byte[length]; + + int idx = offset; + for (int i = 0; i < array.length; i++) { + if (array[i] == null) { + ByteConverter.int4(bytes, idx, -1); + idx += 4; + } else { + ByteConverter.int4(bytes, idx, 1); + idx += 4; + write(array[i], bytes, idx); + ++idx; + } + } + + return bytes; + } + + private void write(Boolean bool, byte[] bytes, int idx) { + ByteConverter.bool(bytes, idx, bool.booleanValue()); + } + + /** + * {@inheritDoc} + */ + @Override + byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, Boolean[] array) + throws SQLException, SQLFeatureNotSupportedException { + final int nullCount = countNulls(array); + return writeBytes(array, nullCount, 0); + } + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, Boolean[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i != 0) { + sb.append(delim); + } + if (array[i] == null) { + sb.append('N').append('U').append('L').append('L'); + } else { + sb.append(array[i].booleanValue() ? '1' : '0'); + } + } + sb.append('}'); + } + }; + + private static final AbstractArrayEncoder STRING_ARRAY = new AbstractArrayEncoder(Oid.VARCHAR, + Oid.VARCHAR_ARRAY) { + + /** + * {@inheritDoc} + */ + @Override + int countNulls(String[] array) { + int count = 0; + for (int i = 0; i < array.length; i++) { + if (array[i] == null) { + ++count; + } + } + return count; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean supportBinaryRepresentation(int oid) { + return oid == Oid.VARCHAR_ARRAY || oid == Oid.TEXT_ARRAY; + } + + /** + * {@inheritDoc} + */ + @Override + int getTypeOID(int arrayOid) { + if (arrayOid == Oid.VARCHAR_ARRAY) { + return Oid.VARCHAR; + } + + if (arrayOid == Oid.TEXT_ARRAY) { + return Oid.TEXT; + } + + // this should not be possible based on supportBinaryRepresentation returning + // false for all other types + throw new IllegalStateException("Invalid array oid: " + arrayOid); + } + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, String[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + if (array[i] == null) { + sb.append('N').append('U').append('L').append('L'); + } else { + PgArray.escapeArrayElement(sb, array[i]); + } + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + public byte[] toBinaryRepresentation(BaseConnection connection, String[] array, int oid) throws SQLException { + final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20)); + + assert supportBinaryRepresentation(oid); + + final byte[] buffer = new byte[4]; + + try { + // 1 dimension + ByteConverter.int4(buffer, 0, 1); + baos.write(buffer); + // null + ByteConverter.int4(buffer, 0, countNulls(array) > 0 ? 1 : 0); + baos.write(buffer); + // oid + ByteConverter.int4(buffer, 0, getTypeOID(oid)); + baos.write(buffer); + // length + ByteConverter.int4(buffer, 0, array.length); + baos.write(buffer); + + // postgresql uses 1 base by default + ByteConverter.int4(buffer, 0, 1); + baos.write(buffer); + + final Encoding encoding = connection.getEncoding(); + for (int i = 0; i < array.length; i++) { + final String string = array[i]; + if (string != null) { + final byte[] encoded; + try { + encoded = encoding.encode(string); + } catch (IOException e) { + throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."), + PSQLState.DATA_ERROR, e); + } + ByteConverter.int4(buffer, 0, encoded.length); + baos.write(buffer); + baos.write(encoded); + } else { + ByteConverter.int4(buffer, 0, -1); + baos.write(buffer); + } + } + + return baos.toByteArray(); + } catch (IOException e) { + // this IO exception is from writing to baos, which will never throw an + // IOException + throw new java.lang.AssertionError(e); + } + } + + /** + * {@inheritDoc} + */ + @Override + byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, String[] array) + throws SQLException, SQLFeatureNotSupportedException { + try { + final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20)); + final byte[] buffer = new byte[4]; + final Encoding encoding = connection.getEncoding(); + for (int i = 0; i < array.length; i++) { + final String string = array[i]; + if (string != null) { + final byte[] encoded; + try { + encoded = encoding.encode(string); + } catch (IOException e) { + throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."), + PSQLState.DATA_ERROR, e); + } + ByteConverter.int4(buffer, 0, encoded.length); + baos.write(buffer); + baos.write(encoded); + } else { + ByteConverter.int4(buffer, 0, -1); + baos.write(buffer); + } + } + + return baos.toByteArray(); + } catch (IOException e) { + // this IO exception is from writing to baos, which will never throw an + // IOException + throw new java.lang.AssertionError(e); + } + } + }; + + private static final AbstractArrayEncoder BYTEA_ARRAY = new AbstractArrayEncoder(Oid.BYTEA, + Oid.BYTEA_ARRAY) { + + /** + * The possible characters to use for representing hex binary data. + */ + private final char[] hexDigits = new char[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', + 'e', 'f'}; + + /** + * {@inheritDoc} + */ + @Override + public byte[] toBinaryRepresentation(BaseConnection connection, byte[][] array, int oid) + throws SQLException, SQLFeatureNotSupportedException { + + assert oid == arrayOid; + + int length = 20; + for (int i = 0; i < array.length; i++) { + length += 4; + if (array[i] != null) { + length += array[i].length; + } + } + final byte[] bytes = new byte[length]; + + // 1 dimension + ByteConverter.int4(bytes, 0, 1); + // no null + ByteConverter.int4(bytes, 4, 0); + // oid + ByteConverter.int4(bytes, 8, getTypeOID(oid)); + // length + ByteConverter.int4(bytes, 12, array.length); + // postgresql uses 1 base by default + ByteConverter.int4(bytes, 16, 1); + + write(array, bytes, 20); + + return bytes; + } + + /** + * {@inheritDoc} + */ + @Override + byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, byte[][] array) + throws SQLException, SQLFeatureNotSupportedException { + int length = 0; + for (int i = 0; i < array.length; i++) { + length += 4; + if (array[i] != null) { + length += array[i].length; + } + } + final byte[] bytes = new byte[length]; + + write(array, bytes, 0); + return bytes; + } + + /** + * {@inheritDoc} + */ + @Override + int countNulls(byte[][] array) { + int nulls = 0; + for (int i = 0; i < array.length; i++) { + if (array[i] == null) { + ++nulls; + } + } + return nulls; + } + + private void write(byte[][] array, byte[] bytes, int offset) { + int idx = offset; + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + ByteConverter.int4(bytes, idx, array[i].length); + idx += 4; + System.arraycopy(array[i], 0, bytes, idx, array[i].length); + idx += array[i].length; + } else { + ByteConverter.int4(bytes, idx, -1); + idx += 4; + } + } + } + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, byte[][] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + + if (array[i] != null) { + sb.append("\"\\\\x"); + for (int j = 0; j < array[i].length; j++) { + byte b = array[i][j]; + + // get the value for the left 4 bits (drop sign) + sb.append(hexDigits[(b & 0xF0) >>> 4]); + // get the value for the right 4 bits + sb.append(hexDigits[b & 0x0F]); + } + sb.append('"'); + } else { + sb.append("NULL"); + } + } + sb.append('}'); + } + }; + + private static final AbstractArrayEncoder OBJECT_ARRAY = new AbstractArrayEncoder(0, 0) { + + @Override + public int getDefaultArrayTypeOid() { + return 0; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean supportBinaryRepresentation(int oid) { + return false; + } + + @Override + public byte[] toBinaryRepresentation(BaseConnection connection, Object[] array, int oid) + throws SQLException, SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, Object[] array) + throws SQLException, SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void appendArray(StringBuilder sb, char delim, Object[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + if (array[i] == null) { + sb.append('N').append('U').append('L').append('L'); + } else if (array[i].getClass().isArray()) { + if (array[i] instanceof byte[]) { + throw new UnsupportedOperationException("byte[] nested inside Object[]"); + } + try { + getArrayEncoder(array[i]).appendArray(sb, delim, array[i]); + } catch (PSQLException e) { + // this should never happen + throw new IllegalStateException(e); + } + } else { + PgArray.escapeArrayElement(sb, array[i].toString()); + } + } + sb.append('}'); + } + }; + + @SuppressWarnings("rawtypes") + private static final Map ARRAY_CLASS_TO_ENCODER = new HashMap<>( + (int) (14 / .75) + 1); + + static { + ARRAY_CLASS_TO_ENCODER.put(long.class, LONG_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(Long.class, LONG_OBJ_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(int.class, INT_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(Integer.class, INT_OBJ_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(short.class, SHORT_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(Short.class, SHORT_OBJ_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(double.class, DOUBLE_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(Double.class, DOUBLE_OBJ_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(float.class, FLOAT_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(Float.class, FLOAT_OBJ_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(boolean.class, BOOLEAN_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(Boolean.class, BOOLEAN_OBJ_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(byte[].class, BYTEA_ARRAY); + ARRAY_CLASS_TO_ENCODER.put(String.class, STRING_ARRAY); + } + + /** + * Returns support for encoding array. + * + * @param array + * The array to encode. Must not be {@code null}. + * @return An instance capable of encoding array as a {@code String} at + * minimum. Some types may support binary encoding. + * @throws PSQLException + * if array is not a supported type. + * @see ArrayEncoding.ArrayEncoder#supportBinaryRepresentation(int) + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + public static
ArrayEncoder getArrayEncoder(A array) throws PSQLException { + final Class arrayClazz = array.getClass(); + Class subClazz = arrayClazz.getComponentType(); + if (subClazz == null) { + throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE); + } + AbstractArrayEncoder support = ARRAY_CLASS_TO_ENCODER.get(subClazz); + if (support != null) { + return support; + } + Class subSubClazz = subClazz.getComponentType(); + if (subSubClazz == null) { + if (Object.class.isAssignableFrom(subClazz)) { + return (ArrayEncoder) OBJECT_ARRAY; + } + throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE); + } + + subClazz = subSubClazz; + int dimensions = 2; + while (subClazz != null) { + support = ARRAY_CLASS_TO_ENCODER.get(subClazz); + if (support != null) { + if (dimensions == 2) { + return new TwoDimensionPrimitiveArrayEncoder(support); + } + return new RecursiveArrayEncoder(support, dimensions); + } + subSubClazz = subClazz.getComponentType(); + if (subSubClazz == null) { + if (Object.class.isAssignableFrom(subClazz)) { + if (dimensions == 2) { + return new TwoDimensionPrimitiveArrayEncoder(OBJECT_ARRAY); + } + return new RecursiveArrayEncoder(OBJECT_ARRAY, dimensions); + } + } + ++dimensions; + subClazz = subSubClazz; + } + + throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE); + } + + /** + * Wraps an {@link AbstractArrayEncoder} implementation and provides optimized + * support for 2 dimensions. + */ + private static final class TwoDimensionPrimitiveArrayEncoder implements ArrayEncoder { + private final AbstractArrayEncoder support; + + /** + * @param support + * The instance providing support for the base array type. + */ + TwoDimensionPrimitiveArrayEncoder(AbstractArrayEncoder support) { + super(); + this.support = support; + } + + /** + * {@inheritDoc} + */ + @Override + public int getDefaultArrayTypeOid() { + return support.getDefaultArrayTypeOid(); + } + + /** + * {@inheritDoc} + */ + @Override + public String toArrayString(char delim, A[] array) { + final StringBuilder sb = new StringBuilder(1024); + appendArray(sb, delim, array); + return sb.toString(); + } + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, A[] array) { + sb.append('{'); + for (int i = 0; i < array.length; i++) { + if (i > 0) { + sb.append(delim); + } + support.appendArray(sb, delim, array[i]); + } + sb.append('}'); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean supportBinaryRepresentation(int oid) { + return support.supportBinaryRepresentation(oid); + } + + /** + * {@inheritDoc} 4 bytes - dimension 4 bytes - oid 4 bytes - ? 8*d bytes - + * dimension length + */ + @Override + public byte[] toBinaryRepresentation(BaseConnection connection, A[] array, int oid) + throws SQLException, SQLFeatureNotSupportedException { + final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20)); + final byte[] buffer = new byte[4]; + + boolean hasNulls = false; + for (int i = 0; !hasNulls && i < array.length; i++) { + if (support.countNulls(array[i]) > 0) { + hasNulls = true; + } + } + + try { + // 2 dimension + ByteConverter.int4(buffer, 0, 2); + baos.write(buffer); + // nulls + ByteConverter.int4(buffer, 0, hasNulls ? 1 : 0); + baos.write(buffer); + // oid + ByteConverter.int4(buffer, 0, support.getTypeOID(oid)); + baos.write(buffer); + + // length + ByteConverter.int4(buffer, 0, array.length); + baos.write(buffer); + // postgres defaults to 1 based lower bound + ByteConverter.int4(buffer, 0, 1); + baos.write(buffer); + + ByteConverter.int4(buffer, 0, array.length > 0 ? Array.getLength(array[0]) : 0); + baos.write(buffer); + // postgresql uses 1 base by default + ByteConverter.int4(buffer, 0, 1); + baos.write(buffer); + + for (int i = 0; i < array.length; i++) { + baos.write(support.toSingleDimensionBinaryRepresentation(connection, array[i])); + } + + return baos.toByteArray(); + + } catch (IOException e) { + // this IO exception is from writing to baos, which will never throw an + // IOException + throw new java.lang.AssertionError(e); + } + } + } + + /** + * Wraps an {@link AbstractArrayEncoder} implementation and provides support for + * 2 or more dimensions using recursion. + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + private static final class RecursiveArrayEncoder implements ArrayEncoder { + + private final AbstractArrayEncoder support; + private final int dimensions; + + /** + * @param support + * The instance providing support for the base array type. + */ + RecursiveArrayEncoder(AbstractArrayEncoder support, int dimensions) { + super(); + this.support = support; + this.dimensions = dimensions; + assert dimensions >= 2; + } + + /** + * {@inheritDoc} + */ + @Override + public int getDefaultArrayTypeOid() { + return support.getDefaultArrayTypeOid(); + } + + /** + * {@inheritDoc} + */ + @Override + public String toArrayString(char delim, Object array) { + final StringBuilder sb = new StringBuilder(2048); + arrayString(sb, array, delim, dimensions); + return sb.toString(); + } + + /** + * {@inheritDoc} + */ + @Override + public void appendArray(StringBuilder sb, char delim, Object array) { + arrayString(sb, array, delim, dimensions); + } + + private void arrayString(StringBuilder sb, Object array, char delim, int depth) { + + if (depth > 1) { + sb.append('{'); + for (int i = 0, j = Array.getLength(array); i < j; i++) { + if (i > 0) { + sb.append(delim); + } + arrayString(sb, Array.get(array, i), delim, depth - 1); + } + sb.append('}'); + } else { + support.appendArray(sb, delim, array); + } + } + + /** + * {@inheritDoc} + */ + @Override + public boolean supportBinaryRepresentation(int oid) { + return support.supportBinaryRepresentation(oid); + } + + private boolean hasNulls(Object array, int depth) { + if (depth > 1) { + for (int i = 0, j = Array.getLength(array); i < j; i++) { + if (hasNulls(Array.get(array, i), depth - 1)) { + return true; + } + } + return false; + } + + return support.countNulls(array) > 0; + } + + /** + * {@inheritDoc} + */ + @Override + public byte[] toBinaryRepresentation(BaseConnection connection, Object array, int oid) + throws SQLException, SQLFeatureNotSupportedException { + + final boolean hasNulls = hasNulls(array, dimensions); + + final ByteArrayOutputStream baos = new ByteArrayOutputStream(1024 * dimensions); + final byte[] buffer = new byte[4]; + + try { + // dimensions + ByteConverter.int4(buffer, 0, dimensions); + baos.write(buffer); + // nulls + ByteConverter.int4(buffer, 0, hasNulls ? 1 : 0); + baos.write(buffer); + // oid + ByteConverter.int4(buffer, 0, support.getTypeOID(oid)); + baos.write(buffer); + + // length + ByteConverter.int4(buffer, 0, Array.getLength(array)); + baos.write(buffer); + // postgresql uses 1 base by default + ByteConverter.int4(buffer, 0, 1); + baos.write(buffer); + + writeArray(connection, buffer, baos, array, dimensions, true); + + return baos.toByteArray(); + + } catch (IOException e) { + // this IO exception is from writing to baos, which will never throw an + // IOException + throw new java.lang.AssertionError(e); + } + } + + private void writeArray(BaseConnection connection, byte[] buffer, ByteArrayOutputStream baos, + Object array, int depth, boolean first) throws IOException, SQLException { + final int length = Array.getLength(array); + + if (first) { + ByteConverter.int4(buffer, 0, length > 0 ? Array.getLength(Array.get(array, 0)) : 0); + baos.write(buffer); + // postgresql uses 1 base by default + ByteConverter.int4(buffer, 0, 1); + baos.write(buffer); + } + + for (int i = 0; i < length; i++) { + final Object subArray = Array.get(array, i); + if (depth > 2) { + writeArray(connection, buffer, baos, subArray, depth - 1, i == 0); + } else { + baos.write(support.toSingleDimensionBinaryRepresentation(connection, subArray)); + } + } + } + + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/AutoSave.java b/pgjdbc/src/main/java/org/postgresql/jdbc/AutoSave.java new file mode 100644 index 0000000..f4588c7 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/AutoSave.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import java.util.Locale; + +public enum AutoSave { + NEVER, + ALWAYS, + CONSERVATIVE; + + private final String value; + + AutoSave() { + value = this.name().toLowerCase(Locale.ROOT); + } + + public String value() { + return value; + } + + public static AutoSave of(String value) { + return valueOf(value.toUpperCase(Locale.ROOT)); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/BatchResultHandler.java b/pgjdbc/src/main/java/org/postgresql/jdbc/BatchResultHandler.java new file mode 100644 index 0000000..1d98932 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/BatchResultHandler.java @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Field; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.ResultCursor; +import org.postgresql.core.ResultHandlerBase; +import org.postgresql.core.Tuple; +import org.postgresql.core.v3.BatchedQuery; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.BatchUpdateException; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Internal class, it is not a part of public API. + */ +public class BatchResultHandler extends ResultHandlerBase { + + private final PgStatement pgStatement; + private int resultIndex; + + private final Query[] queries; + private final long[] longUpdateCounts; + private final ParameterList [] parameterLists; + private final boolean expectGeneratedKeys; + private PgResultSet generatedKeys; + private int committedRows; // 0 means no rows committed. 1 means row 0 was committed, and so on + private final List> allGeneratedRows; + private List latestGeneratedRows; + private PgResultSet latestGeneratedKeysRs; + + BatchResultHandler(PgStatement pgStatement, Query[] queries, + ParameterList [] parameterLists, + boolean expectGeneratedKeys) { + this.pgStatement = pgStatement; + this.queries = queries; + this.parameterLists = parameterLists; + this.longUpdateCounts = new long[queries.length]; + this.expectGeneratedKeys = expectGeneratedKeys; + this.allGeneratedRows = !expectGeneratedKeys ? null : new ArrayList>(); + } + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + // If SELECT, then handleCommandStatus call would just be missing + resultIndex++; + if (!expectGeneratedKeys) { + // No rows expected -> just ignore rows + return; + } + if (generatedKeys == null) { + try { + // If SELECT, the resulting ResultSet is not valid + // Thus it is up to handleCommandStatus to decide if resultSet is good enough + latestGeneratedKeysRs = (PgResultSet) pgStatement.createResultSet(fromQuery, fields, + new ArrayList<>(), cursor); + } catch (SQLException e) { + handleError(e); + } + } + latestGeneratedRows = tuples; + } + + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + List latestGeneratedRows = this.latestGeneratedRows; + if (latestGeneratedRows != null) { + // We have DML. Decrease resultIndex that was just increased in handleResultRows + resultIndex--; + // If exception thrown, no need to collect generated keys + // Note: some generated keys might be secured in generatedKeys + if (updateCount > 0 && (getException() == null || isAutoCommit())) { + List> allGeneratedRows = this.allGeneratedRows; + allGeneratedRows.add(latestGeneratedRows); + if (generatedKeys == null) { + generatedKeys = latestGeneratedKeysRs; + } + } + this.latestGeneratedRows = null; + } + + if (resultIndex >= queries.length) { + handleError(new PSQLException(GT.tr("Too many update results were returned."), + PSQLState.TOO_MANY_RESULTS)); + return; + } + latestGeneratedKeysRs = null; + + longUpdateCounts[resultIndex++] = updateCount; + } + + private boolean isAutoCommit() { + try { + return pgStatement.getConnection().getAutoCommit(); + } catch (SQLException e) { + assert false : "pgStatement.getConnection().getAutoCommit() should not throw"; + return false; + } + } + + @Override + public void secureProgress() { + if (isAutoCommit()) { + committedRows = resultIndex; + updateGeneratedKeys(); + } + } + + private void updateGeneratedKeys() { + List> allGeneratedRows = this.allGeneratedRows; + if (allGeneratedRows == null || allGeneratedRows.isEmpty()) { + return; + } + PgResultSet generatedKeys = this.generatedKeys; + for (List rows : allGeneratedRows) { + generatedKeys.addRows(rows); + } + allGeneratedRows.clear(); + } + + @Override + public void handleWarning(SQLWarning warning) { + pgStatement.addWarning(warning); + } + + @Override + public void handleError(SQLException newError) { + if (getException() == null) { + Arrays.fill(longUpdateCounts, committedRows, longUpdateCounts.length, Statement.EXECUTE_FAILED); + if (allGeneratedRows != null) { + allGeneratedRows.clear(); + } + + String queryString = ""; + if (pgStatement.getPGConnection().getLogServerErrorDetail()) { + if (resultIndex < queries.length) { + queryString = queries[resultIndex].toString( + parameterLists == null ? null : parameterLists[resultIndex]); + } + } + + BatchUpdateException batchException; + batchException = new BatchUpdateException( + GT.tr("Batch entry {0} {1} was aborted: {2} Call getNextException to see other errors in the batch.", + resultIndex, queryString, newError.getMessage()), + newError.getSQLState(), 0, uncompressLongUpdateCount(), newError); + + super.handleError(batchException); + } + resultIndex++; + + super.handleError(newError); + } + + @Override + public void handleCompletion() throws SQLException { + updateGeneratedKeys(); + SQLException batchException = getException(); + if (batchException != null) { + if (isAutoCommit()) { + // Re-create batch exception since rows after exception might indeed succeed. + BatchUpdateException newException; + newException = new BatchUpdateException( + batchException.getMessage(), + batchException.getSQLState(), 0, + uncompressLongUpdateCount(), + batchException.getCause() + ); + + SQLException next = batchException.getNextException(); + if (next != null) { + newException.setNextException(next); + } + batchException = newException; + } + throw batchException; + } + } + + public ResultSet getGeneratedKeys() { + return generatedKeys; + } + + private int[] uncompressUpdateCount() { + long[] original = uncompressLongUpdateCount(); + int[] copy = new int[original.length]; + for (int i = 0; i < original.length; i++) { + copy[i] = original[i] > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) original[i]; + } + return copy; + } + + public int[] getUpdateCount() { + return uncompressUpdateCount(); + } + + private long[] uncompressLongUpdateCount() { + if (!(queries[0] instanceof BatchedQuery)) { + return longUpdateCounts; + } + int totalRows = 0; + boolean hasRewrites = false; + for (Query query : queries) { + int batchSize = query.getBatchSize(); + totalRows += batchSize; + hasRewrites |= batchSize > 1; + } + if (!hasRewrites) { + return longUpdateCounts; + } + + /* In this situation there is a batch that has been rewritten. Substitute + * the running total returned by the database with a status code to + * indicate successful completion for each row the driver client added + * to the batch. + */ + long[] newUpdateCounts = new long[totalRows]; + int offset = 0; + for (int i = 0; i < queries.length; i++) { + Query query = queries[i]; + int batchSize = query.getBatchSize(); + long superBatchResult = longUpdateCounts[i]; + if (batchSize == 1) { + newUpdateCounts[offset++] = superBatchResult; + continue; + } + if (superBatchResult > 0) { + // If some rows inserted, we do not really know how did they spread over individual + // statements + superBatchResult = Statement.SUCCESS_NO_INFO; + } + Arrays.fill(newUpdateCounts, offset, offset + batchSize, superBatchResult); + offset += batchSize; + } + return newUpdateCounts; + } + + public long[] getLargeUpdateCount() { + return uncompressLongUpdateCount(); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/BooleanTypeUtil.java b/pgjdbc/src/main/java/org/postgresql/jdbc/BooleanTypeUtil.java new file mode 100644 index 0000000..b7b2225 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/BooleanTypeUtil.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + *

Helper class to handle boolean type of PostgreSQL.

+ * + *

Based on values accepted by the PostgreSQL server: + * https://www.postgresql.org/docs/current/static/datatype-boolean.html

+ */ +class BooleanTypeUtil { + + private static final Logger LOGGER = Logger.getLogger(BooleanTypeUtil.class.getName()); + + private BooleanTypeUtil() { + } + + /** + * Cast an Object value to the corresponding boolean value. + * + * @param in Object to cast into boolean + * @return boolean value corresponding to the cast of the object + * @throws PSQLException PSQLState.CANNOT_COERCE + */ + static boolean castToBoolean(final Object in) throws PSQLException { + if (LOGGER.isLoggable(Level.FINE)) { + LOGGER.log(Level.FINE, "Cast to boolean: \"{0}\"", String.valueOf(in)); + } + if (in instanceof Boolean) { + return (Boolean) in; + } + if (in instanceof String) { + return fromString((String) in); + } + if (in instanceof Character) { + return fromCharacter((Character) in); + } + if (in instanceof Number) { + return fromNumber((Number) in); + } + throw new PSQLException("Cannot cast to boolean", PSQLState.CANNOT_COERCE); + } + + static boolean fromString(final String strval) throws PSQLException { + // Leading or trailing whitespace is ignored, and case does not matter. + final String val = strval.trim(); + if ("1".equals(val) || "true".equalsIgnoreCase(val) + || "t".equalsIgnoreCase(val) || "yes".equalsIgnoreCase(val) + || "y".equalsIgnoreCase(val) || "on".equalsIgnoreCase(val)) { + return true; + } + if ("0".equals(val) || "false".equalsIgnoreCase(val) + || "f".equalsIgnoreCase(val) || "no".equalsIgnoreCase(val) + || "n".equalsIgnoreCase(val) || "off".equalsIgnoreCase(val)) { + return false; + } + throw cannotCoerceException(strval); + } + + private static boolean fromCharacter(final Character charval) throws PSQLException { + if ('1' == charval || 't' == charval || 'T' == charval + || 'y' == charval || 'Y' == charval) { + return true; + } + if ('0' == charval || 'f' == charval || 'F' == charval + || 'n' == charval || 'N' == charval) { + return false; + } + throw cannotCoerceException(charval); + } + + private static boolean fromNumber(final Number numval) throws PSQLException { + // Handles BigDecimal, Byte, Short, Integer, Long Float, Double + // based on the widening primitive conversions. + final double value = numval.doubleValue(); + if (value == 1.0d) { + return true; + } + if (value == 0.0d) { + return false; + } + throw cannotCoerceException(numval); + } + + private static PSQLException cannotCoerceException(final Object value) { + if (LOGGER.isLoggable(Level.FINE)) { + LOGGER.log(Level.FINE, "Cannot cast to boolean: \"{0}\"", String.valueOf(value)); + } + return new PSQLException(GT.tr("Cannot cast to boolean: \"{0}\"", String.valueOf(value)), + PSQLState.CANNOT_COERCE); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/CallableBatchResultHandler.java b/pgjdbc/src/main/java/org/postgresql/jdbc/CallableBatchResultHandler.java new file mode 100644 index 0000000..6ec60ca --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/CallableBatchResultHandler.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Field; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.ResultCursor; +import org.postgresql.core.Tuple; + +import java.util.List; + +class CallableBatchResultHandler extends BatchResultHandler { + CallableBatchResultHandler(PgStatement statement, Query[] queries, + ParameterList[] parameterLists) { + super(statement, queries, parameterLists, false); + } + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + /* ignore */ + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapeSyntaxCallMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapeSyntaxCallMode.java new file mode 100644 index 0000000..00200b1 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapeSyntaxCallMode.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +/** + *

Specifies whether a SELECT/CALL statement is used for the underlying SQL for JDBC escape call syntax: 'select' means to + * always use SELECT, 'callIfNoReturn' means to use CALL if there is no return parameter (otherwise use SELECT), and 'call' means + * to always use CALL.

+ * + * @see org.postgresql.PGProperty#ESCAPE_SYNTAX_CALL_MODE + */ +public enum EscapeSyntaxCallMode { + SELECT("select"), + CALL_IF_NO_RETURN("callIfNoReturn"), + CALL("call"); + + private final String value; + + EscapeSyntaxCallMode(String value) { + this.value = value; + } + + public static EscapeSyntaxCallMode of(String mode) { + for (EscapeSyntaxCallMode escapeSyntaxCallMode : values()) { + if (escapeSyntaxCallMode.value.equals(mode)) { + return escapeSyntaxCallMode; + } + } + return SELECT; + } + + public String value() { + return value; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions.java b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions.java new file mode 100644 index 0000000..9729315 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions.java @@ -0,0 +1,750 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.lang.reflect.Method; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * This class stores supported escaped function. + * + * @author Xavier Poinsard + * @deprecated see {@link EscapedFunctions2} + */ +@Deprecated +public class EscapedFunctions { + // numeric functions names + public static final String ABS = "abs"; + public static final String ACOS = "acos"; + public static final String ASIN = "asin"; + public static final String ATAN = "atan"; + public static final String ATAN2 = "atan2"; + public static final String CEILING = "ceiling"; + public static final String COS = "cos"; + public static final String COT = "cot"; + public static final String DEGREES = "degrees"; + public static final String EXP = "exp"; + public static final String FLOOR = "floor"; + public static final String LOG = "log"; + public static final String LOG10 = "log10"; + public static final String MOD = "mod"; + public static final String PI = "pi"; + public static final String POWER = "power"; + public static final String RADIANS = "radians"; + public static final String ROUND = "round"; + public static final String SIGN = "sign"; + public static final String SIN = "sin"; + public static final String SQRT = "sqrt"; + public static final String TAN = "tan"; + public static final String TRUNCATE = "truncate"; + + // string function names + public static final String ASCII = "ascii"; + public static final String CHAR = "char"; + public static final String CONCAT = "concat"; + public static final String INSERT = "insert"; // change arguments order + public static final String LCASE = "lcase"; + public static final String LEFT = "left"; + public static final String LENGTH = "length"; + public static final String LOCATE = "locate"; // the 3 args version duplicate args + public static final String LTRIM = "ltrim"; + public static final String REPEAT = "repeat"; + public static final String REPLACE = "replace"; + public static final String RIGHT = "right"; // duplicate args + public static final String RTRIM = "rtrim"; + public static final String SPACE = "space"; + public static final String SUBSTRING = "substring"; + public static final String UCASE = "ucase"; + // soundex is implemented on the server side by + // the contrib/fuzzystrmatch module. We provide a translation + // for this in the driver, but since we don't want to bother with run + // time detection of this module's installation we don't report this + // method as supported in DatabaseMetaData. + // difference is currently unsupported entirely. + + // date time function names + public static final String CURDATE = "curdate"; + public static final String CURTIME = "curtime"; + public static final String DAYNAME = "dayname"; + public static final String DAYOFMONTH = "dayofmonth"; + public static final String DAYOFWEEK = "dayofweek"; + public static final String DAYOFYEAR = "dayofyear"; + public static final String HOUR = "hour"; + public static final String MINUTE = "minute"; + public static final String MONTH = "month"; + public static final String MONTHNAME = "monthname"; + public static final String NOW = "now"; + public static final String QUARTER = "quarter"; + public static final String SECOND = "second"; + public static final String WEEK = "week"; + public static final String YEAR = "year"; + // for timestampadd and timestampdiff the fractional part of second is not supported + // by the backend + // timestampdiff is very partially supported + public static final String TIMESTAMPADD = "timestampadd"; + public static final String TIMESTAMPDIFF = "timestampdiff"; + + // constants for timestampadd and timestampdiff + public static final String SQL_TSI_ROOT = "SQL_TSI_"; + public static final String SQL_TSI_DAY = "DAY"; + public static final String SQL_TSI_FRAC_SECOND = "FRAC_SECOND"; + public static final String SQL_TSI_HOUR = "HOUR"; + public static final String SQL_TSI_MINUTE = "MINUTE"; + public static final String SQL_TSI_MONTH = "MONTH"; + public static final String SQL_TSI_QUARTER = "QUARTER"; + public static final String SQL_TSI_SECOND = "SECOND"; + public static final String SQL_TSI_WEEK = "WEEK"; + public static final String SQL_TSI_YEAR = "YEAR"; + + // system functions + public static final String DATABASE = "database"; + public static final String IFNULL = "ifnull"; + public static final String USER = "user"; + + /** + * storage for functions implementations. + */ + private static final Map functionMap = createFunctionMap(); + + public EscapedFunctions() { + } + + private static Map createFunctionMap() { + Method[] arrayMeths = EscapedFunctions.class.getDeclaredMethods(); + Map functionMap = new HashMap<>(arrayMeths.length * 2); + for (Method meth : arrayMeths) { + if (meth.getName().startsWith("sql")) { + functionMap.put(meth.getName().toLowerCase(Locale.US), meth); + } + } + return functionMap; + } + + /** + * get Method object implementing the given function. + * + * @param functionName name of the searched function + * @return a Method object or null if not found + */ + public static Method getFunction(String functionName) { + return functionMap.get("sql" + functionName.toLowerCase(Locale.US)); + } + + // ** numeric functions translations ** + + /** + * ceiling to ceil translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlceiling(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("ceil(", "ceiling", parsedArgs); + } + + /** + * log to ln translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqllog(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("ln(", "log", parsedArgs); + } + + /** + * log10 to log translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqllog10(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("log(", "log10", parsedArgs); + } + + /** + * power to pow translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlpower(List parsedArgs) throws SQLException { + return twoArgumentsFunctionCall("pow(", "power", parsedArgs); + } + + /** + * truncate to trunc translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqltruncate(List parsedArgs) throws SQLException { + return twoArgumentsFunctionCall("trunc(", "truncate", parsedArgs); + } + + // ** string functions translations ** + + /** + * char to chr translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlchar(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("chr(", "char", parsedArgs); + } + + /** + * concat translation. + * + * @param parsedArgs arguments + * @return sql call + */ + public static String sqlconcat(List parsedArgs) { + StringBuilder buf = new StringBuilder(); + buf.append('('); + for (int iArg = 0; iArg < parsedArgs.size(); iArg++) { + buf.append(parsedArgs.get(iArg)); + if (iArg != (parsedArgs.size() - 1)) { + buf.append(" || "); + } + } + return buf.append(')').toString(); + } + + /** + * insert to overlay translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlinsert(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 4) { + throw new PSQLException(GT.tr("{0} function takes four and only four argument.", "insert"), + PSQLState.SYNTAX_ERROR); + } + StringBuilder buf = new StringBuilder(); + buf.append("overlay("); + buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3)); + buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2)); + return buf.append(')').toString(); + } + + /** + * lcase to lower translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqllcase(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("lower(", "lcase", parsedArgs); + } + + /** + * left to substring translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlleft(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 2) { + throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "left"), + PSQLState.SYNTAX_ERROR); + } + StringBuilder buf = new StringBuilder(); + buf.append("substring("); + buf.append(parsedArgs.get(0)).append(" for ").append(parsedArgs.get(1)); + return buf.append(')').toString(); + } + + /** + * length translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqllength(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "length"), + PSQLState.SYNTAX_ERROR); + } + StringBuilder buf = new StringBuilder(); + buf.append("length(trim(trailing from "); + buf.append(parsedArgs.get(0)); + return buf.append("))").toString(); + } + + /** + * locate translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqllocate(List parsedArgs) throws SQLException { + if (parsedArgs.size() == 2) { + return "position(" + parsedArgs.get(0) + " in " + parsedArgs.get(1) + ")"; + } else if (parsedArgs.size() == 3) { + String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from " + + parsedArgs.get(2) + "))"; + return "(" + parsedArgs.get(2) + "*sign(" + tmp + ")+" + tmp + ")"; + } else { + throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "locate"), + PSQLState.SYNTAX_ERROR); + } + } + + /** + * ltrim translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlltrim(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("trim(leading from ", "ltrim", parsedArgs); + } + + /** + * right to substring translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlright(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 2) { + throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "right"), + PSQLState.SYNTAX_ERROR); + } + StringBuilder buf = new StringBuilder(); + buf.append("substring("); + buf.append(parsedArgs.get(0)) + .append(" from (length(") + .append(parsedArgs.get(0)) + .append(")+1-") + .append(parsedArgs.get(1)); + return buf.append("))").toString(); + } + + /** + * rtrim translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlrtrim(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("trim(trailing from ", "rtrim", parsedArgs); + } + + /** + * space translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlspace(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("repeat(' ',", "space", parsedArgs); + } + + /** + * substring to substr translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlsubstring(List parsedArgs) throws SQLException { + if (parsedArgs.size() == 2) { + return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + ")"; + } else if (parsedArgs.size() == 3) { + return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + "," + parsedArgs.get(2) + + ")"; + } else { + throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "substring"), + PSQLState.SYNTAX_ERROR); + } + } + + /** + * ucase to upper translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlucase(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("upper(", "ucase", parsedArgs); + } + + /** + * curdate to current_date translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlcurdate(List parsedArgs) throws SQLException { + if (!parsedArgs.isEmpty()) { + throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "curdate"), + PSQLState.SYNTAX_ERROR); + } + return "current_date"; + } + + /** + * curtime to current_time translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlcurtime(List parsedArgs) throws SQLException { + if (!parsedArgs.isEmpty()) { + throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "curtime"), + PSQLState.SYNTAX_ERROR); + } + return "current_time"; + } + + /** + * dayname translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqldayname(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayname"), + PSQLState.SYNTAX_ERROR); + } + return "to_char(" + parsedArgs.get(0) + ",'Day')"; + } + + /** + * dayofmonth translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqldayofmonth(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(day from ", "dayofmonth", parsedArgs); + } + + /** + * dayofweek translation adding 1 to postgresql function since we expect values from 1 to 7. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqldayofweek(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayofweek"), + PSQLState.SYNTAX_ERROR); + } + return "extract(dow from " + parsedArgs.get(0) + ")+1"; + } + + /** + * dayofyear translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqldayofyear(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(doy from ", "dayofyear", parsedArgs); + } + + /** + * hour translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlhour(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(hour from ", "hour", parsedArgs); + } + + /** + * minute translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlminute(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(minute from ", "minute", parsedArgs); + } + + /** + * month translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlmonth(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(month from ", "month", parsedArgs); + } + + /** + * monthname translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlmonthname(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "monthname"), + PSQLState.SYNTAX_ERROR); + } + return "to_char(" + parsedArgs.get(0) + ",'Month')"; + } + + /** + * quarter translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlquarter(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(quarter from ", "quarter", parsedArgs); + } + + /** + * second translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlsecond(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(second from ", "second", parsedArgs); + } + + /** + * week translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlweek(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(week from ", "week", parsedArgs); + } + + /** + * year translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlyear(List parsedArgs) throws SQLException { + return singleArgumentFunctionCall("extract(year from ", "year", parsedArgs); + } + + /** + * time stamp add. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + @SuppressWarnings("TypeParameterExplicitlyExtendsObject") + public static String sqltimestampadd(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 3) { + throw new PSQLException( + GT.tr("{0} function takes three and only three arguments.", "timestampadd"), + PSQLState.SYNTAX_ERROR); + } + String interval = EscapedFunctions.constantToInterval(parsedArgs.get(0).toString(), + parsedArgs.get(1).toString()); + StringBuilder buf = new StringBuilder(); + buf.append("(").append(interval).append("+"); + buf.append(parsedArgs.get(2)).append(")"); + return buf.toString(); + } + + private static String constantToInterval(String type, String value) throws SQLException { + if (!type.startsWith(SQL_TSI_ROOT)) { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.SYNTAX_ERROR); + } + String shortType = type.substring(SQL_TSI_ROOT.length()); + if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) { + return "CAST(" + value + " || ' day' as interval)"; + } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) { + return "CAST(" + value + " || ' second' as interval)"; + } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) { + return "CAST(" + value + " || ' hour' as interval)"; + } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) { + return "CAST(" + value + " || ' minute' as interval)"; + } else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) { + return "CAST(" + value + " || ' month' as interval)"; + } else if (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) { + return "CAST((" + value + "::int * 3) || ' month' as interval)"; + } else if (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) { + return "CAST(" + value + " || ' week' as interval)"; + } else if (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) { + return "CAST(" + value + " || ' year' as interval)"; + } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"), + PSQLState.SYNTAX_ERROR); + } else { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.SYNTAX_ERROR); + } + } + + /** + * time stamp diff. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + @SuppressWarnings("TypeParameterExplicitlyExtendsObject") + public static String sqltimestampdiff(List parsedArgs) throws SQLException { + if (parsedArgs.size() != 3) { + throw new PSQLException( + GT.tr("{0} function takes three and only three arguments.", "timestampdiff"), + PSQLState.SYNTAX_ERROR); + } + String datePart = EscapedFunctions.constantToDatePart(parsedArgs.get(0).toString()); + StringBuilder buf = new StringBuilder(); + buf.append("extract( ") + .append(datePart) + .append(" from (") + .append(parsedArgs.get(2)) + .append("-") + .append(parsedArgs.get(1)) + .append("))"); + return buf.toString(); + } + + private static String constantToDatePart(String type) throws SQLException { + if (!type.startsWith(SQL_TSI_ROOT)) { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.SYNTAX_ERROR); + } + String shortType = type.substring(SQL_TSI_ROOT.length()); + if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) { + return "day"; + } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) { + return "second"; + } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) { + return "hour"; + } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) { + return "minute"; + } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"), + PSQLState.SYNTAX_ERROR); + } else { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.SYNTAX_ERROR); + } + // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php + /* + * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if + * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if + * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if + * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year"; + */ + } + + /** + * database translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqldatabase(List parsedArgs) throws SQLException { + if (!parsedArgs.isEmpty()) { + throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "database"), + PSQLState.SYNTAX_ERROR); + } + return "current_database()"; + } + + /** + * ifnull translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqlifnull(List parsedArgs) throws SQLException { + return twoArgumentsFunctionCall("coalesce(", "ifnull", parsedArgs); + } + + /** + * user translation. + * + * @param parsedArgs arguments + * @return sql call + * @throws SQLException if something wrong happens + */ + public static String sqluser(List parsedArgs) throws SQLException { + if (!parsedArgs.isEmpty()) { + throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "user"), + PSQLState.SYNTAX_ERROR); + } + return "user"; + } + + private static String singleArgumentFunctionCall(String call, String functionName, + List parsedArgs) throws PSQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", functionName), + PSQLState.SYNTAX_ERROR); + } + StringBuilder buf = new StringBuilder(); + buf.append(call); + buf.append(parsedArgs.get(0)); + return buf.append(')').toString(); + } + + private static String twoArgumentsFunctionCall(String call, String functionName, + List parsedArgs) throws PSQLException { + if (parsedArgs.size() != 2) { + throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", functionName), + PSQLState.SYNTAX_ERROR); + } + StringBuilder buf = new StringBuilder(); + buf.append(call); + buf.append(parsedArgs.get(0)).append(',').append(parsedArgs.get(1)); + return buf.append(')').toString(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions2.java b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions2.java new file mode 100644 index 0000000..aa8a748 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions2.java @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.lang.reflect.Method; +import java.sql.SQLException; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * This class stores supported escaped function. + * Note: this is a pgjdbc-internal class, so it is not supposed to be used outside of the driver. + */ +public final class EscapedFunctions2 { + // constants for timestampadd and timestampdiff + private static final String SQL_TSI_ROOT = "SQL_TSI_"; + private static final String SQL_TSI_DAY = "SQL_TSI_DAY"; + private static final String SQL_TSI_FRAC_SECOND = "SQL_TSI_FRAC_SECOND"; + private static final String SQL_TSI_HOUR = "SQL_TSI_HOUR"; + private static final String SQL_TSI_MINUTE = "SQL_TSI_MINUTE"; + private static final String SQL_TSI_MONTH = "SQL_TSI_MONTH"; + private static final String SQL_TSI_QUARTER = "SQL_TSI_QUARTER"; + private static final String SQL_TSI_SECOND = "SQL_TSI_SECOND"; + private static final String SQL_TSI_WEEK = "SQL_TSI_WEEK"; + private static final String SQL_TSI_YEAR = "SQL_TSI_YEAR"; + + /** + * storage for functions implementations + */ + private static final ConcurrentMap FUNCTION_MAP = createFunctionMap("sql"); + + public EscapedFunctions2() { + } + + private static ConcurrentMap createFunctionMap(String prefix) { + Method[] methods = EscapedFunctions2.class.getMethods(); + ConcurrentMap functionMap = new ConcurrentHashMap<>(methods.length * 2); + for (Method method : methods) { + if (method.getName().startsWith(prefix)) { + functionMap.put(method.getName().substring(prefix.length()).toLowerCase(Locale.US), method); + } + } + return functionMap; + } + + /** + * get Method object implementing the given function + * + * @param functionName name of the searched function + * @return a Method object or null if not found + */ + public static Method getFunction(String functionName) { + Method method = FUNCTION_MAP.get(functionName); + if (method != null) { + return method; + } + //FIXME: this probably should not use the US locale + String nameLower = functionName.toLowerCase(Locale.US); + if (nameLower.equals(functionName)) { + // Input name was in lower case, the function is not there + return null; + } + method = FUNCTION_MAP.get(nameLower); + if (method != null && FUNCTION_MAP.size() < 1000) { + // Avoid OutOfMemoryError in case input function names are randomized + // The number of methods is finite, however the number of upper-lower case combinations + // is quite a few (e.g. substr, Substr, sUbstr, SUbstr, etc). + FUNCTION_MAP.putIfAbsent(functionName, method); + } + return method; + } + + // ** numeric functions translations ** + + /** + * ceiling to ceil translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlceiling(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "ceil(", "ceiling", parsedArgs); + } + + /** + * log to ln translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqllog(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "ln(", "log", parsedArgs); + } + + /** + * log10 to log translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqllog10(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "log(", "log10", parsedArgs); + } + + /** + * power to pow translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlpower(StringBuilder buf, List parsedArgs) throws SQLException { + twoArgumentsFunctionCall(buf, "pow(", "power", parsedArgs); + } + + /** + * truncate to trunc translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqltruncate(StringBuilder buf, List parsedArgs) throws SQLException { + twoArgumentsFunctionCall(buf, "trunc(", "truncate", parsedArgs); + } + + // ** string functions translations ** + + /** + * char to chr translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlchar(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "chr(", "char", parsedArgs); + } + + /** + * concat translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + */ + public static void sqlconcat(StringBuilder buf, List parsedArgs) { + appendCall(buf, "(", "||", ")", parsedArgs); + } + + /** + * insert to overlay translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlinsert(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 4) { + throw new PSQLException(GT.tr("{0} function takes four and only four argument.", "insert"), + PSQLState.SYNTAX_ERROR); + } + buf.append("overlay("); + buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3)); + buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2)); + buf.append(')'); + } + + /** + * lcase to lower translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqllcase(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "lower(", "lcase", parsedArgs); + } + + /** + * left to substring translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlleft(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 2) { + throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "left"), + PSQLState.SYNTAX_ERROR); + } + appendCall(buf, "substring(", " for ", ")", parsedArgs); + } + + /** + * length translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqllength(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "length"), + PSQLState.SYNTAX_ERROR); + } + appendCall(buf, "length(trim(trailing from ", "", "))", parsedArgs); + } + + /** + * locate translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqllocate(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() == 2) { + appendCall(buf, "position(", " in ", ")", parsedArgs); + } else if (parsedArgs.size() == 3) { + String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from " + + parsedArgs.get(2) + "))"; + buf.append("(") + .append(parsedArgs.get(2)) + .append("*sign(") + .append(tmp) + .append(")+") + .append(tmp) + .append(")"); + } else { + throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "locate"), + PSQLState.SYNTAX_ERROR); + } + } + + /** + * ltrim translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlltrim(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "trim(leading from ", "ltrim", parsedArgs); + } + + /** + * right to substring translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlright(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 2) { + throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "right"), + PSQLState.SYNTAX_ERROR); + } + buf.append("substring("); + buf.append(parsedArgs.get(0)) + .append(" from (length(") + .append(parsedArgs.get(0)) + .append(")+1-") + .append(parsedArgs.get(1)); + buf.append("))"); + } + + /** + * rtrim translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlrtrim(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "trim(trailing from ", "rtrim", parsedArgs); + } + + /** + * space translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlspace(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "repeat(' ',", "space", parsedArgs); + } + + /** + * substring to substr translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlsubstring(StringBuilder buf, List parsedArgs) throws SQLException { + int argSize = parsedArgs.size(); + if (argSize != 2 && argSize != 3) { + throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "substring"), + PSQLState.SYNTAX_ERROR); + } + appendCall(buf, "substr(", ",", ")", parsedArgs); + } + + /** + * ucase to upper translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlucase(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "upper(", "ucase", parsedArgs); + } + + /** + * curdate to current_date translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlcurdate(StringBuilder buf, List parsedArgs) throws SQLException { + zeroArgumentFunctionCall(buf, "current_date", "curdate", parsedArgs); + } + + /** + * curtime to current_time translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlcurtime(StringBuilder buf, List parsedArgs) throws SQLException { + zeroArgumentFunctionCall(buf, "current_time", "curtime", parsedArgs); + } + + /** + * dayname translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqldayname(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayname"), + PSQLState.SYNTAX_ERROR); + } + appendCall(buf, "to_char(", ",", ",'Day')", parsedArgs); + } + + /** + * dayofmonth translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqldayofmonth(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(day from ", "dayofmonth", parsedArgs); + } + + /** + * dayofweek translation adding 1 to postgresql function since we expect values from 1 to 7 + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqldayofweek(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayofweek"), + PSQLState.SYNTAX_ERROR); + } + appendCall(buf, "extract(dow from ", ",", ")+1", parsedArgs); + } + + /** + * dayofyear translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqldayofyear(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(doy from ", "dayofyear", parsedArgs); + } + + /** + * hour translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlhour(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(hour from ", "hour", parsedArgs); + } + + /** + * minute translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlminute(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(minute from ", "minute", parsedArgs); + } + + /** + * month translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlmonth(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(month from ", "month", parsedArgs); + } + + /** + * monthname translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlmonthname(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "monthname"), + PSQLState.SYNTAX_ERROR); + } + appendCall(buf, "to_char(", ",", ",'Month')", parsedArgs); + } + + /** + * quarter translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlquarter(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(quarter from ", "quarter", parsedArgs); + } + + /** + * second translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlsecond(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(second from ", "second", parsedArgs); + } + + /** + * week translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlweek(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(week from ", "week", parsedArgs); + } + + /** + * year translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlyear(StringBuilder buf, List parsedArgs) throws SQLException { + singleArgumentFunctionCall(buf, "extract(year from ", "year", parsedArgs); + } + + /** + * time stamp add + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqltimestampadd(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 3) { + throw new PSQLException( + GT.tr("{0} function takes three and only three arguments.", "timestampadd"), + PSQLState.SYNTAX_ERROR); + } + buf.append('('); + appendInterval(buf, parsedArgs.get(0).toString(), parsedArgs.get(1).toString()); + buf.append('+').append(parsedArgs.get(2)).append(')'); + } + + private static void appendInterval(StringBuilder buf, String type, String value) throws SQLException { + if (!isTsi(type)) { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.SYNTAX_ERROR); + } + if (appendSingleIntervalCast(buf, SQL_TSI_DAY, type, value, "day") + || appendSingleIntervalCast(buf, SQL_TSI_SECOND, type, value, "second") + || appendSingleIntervalCast(buf, SQL_TSI_HOUR, type, value, "hour") + || appendSingleIntervalCast(buf, SQL_TSI_MINUTE, type, value, "minute") + || appendSingleIntervalCast(buf, SQL_TSI_MONTH, type, value, "month") + || appendSingleIntervalCast(buf, SQL_TSI_WEEK, type, value, "week") + || appendSingleIntervalCast(buf, SQL_TSI_YEAR, type, value, "year") + ) { + return; + } + if (areSameTsi(SQL_TSI_QUARTER, type)) { + buf.append("CAST((").append(value).append("::int * 3) || ' month' as interval)"); + return; + } + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.NOT_IMPLEMENTED); + } + + private static boolean appendSingleIntervalCast(StringBuilder buf, String cmp, String type, String value, String pgType) { + if (!areSameTsi(type, cmp)) { + return false; + } + buf.ensureCapacity(buf.length() + 5 + 4 + 14 + value.length() + pgType.length()); + buf.append("CAST(").append(value).append("||' ").append(pgType).append("' as interval)"); + return true; + } + + /** + * Compares two TSI intervals. It is + * @param a first interval to compare + * @param b second interval to compare + * @return true when both intervals are equal (case insensitive) + */ + private static boolean areSameTsi(String a, String b) { + return a.length() == b.length() && b.length() > SQL_TSI_ROOT.length() + && a.regionMatches(true, SQL_TSI_ROOT.length(), b, SQL_TSI_ROOT.length(), b.length() - SQL_TSI_ROOT.length()); + } + + /** + * Checks if given input starts with {@link #SQL_TSI_ROOT} + * @param interval input string + * @return true if interval.startsWithIgnoreCase(SQL_TSI_ROOT) + */ + private static boolean isTsi(String interval) { + return interval.regionMatches(true, 0, SQL_TSI_ROOT, 0, SQL_TSI_ROOT.length()); + } + + /** + * time stamp diff + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqltimestampdiff(StringBuilder buf, List parsedArgs) throws SQLException { + if (parsedArgs.size() != 3) { + throw new PSQLException( + GT.tr("{0} function takes three and only three arguments.", "timestampdiff"), + PSQLState.SYNTAX_ERROR); + } + buf.append("extract( ") + .append(constantToDatePart(buf, parsedArgs.get(0).toString())) + .append(" from (") + .append(parsedArgs.get(2)) + .append("-") + .append(parsedArgs.get(1)) + .append("))"); + } + + private static String constantToDatePart(StringBuilder buf, String type) throws SQLException { + if (!isTsi(type)) { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.SYNTAX_ERROR); + } + if (areSameTsi(SQL_TSI_DAY, type)) { + return "day"; + } else if (areSameTsi(SQL_TSI_SECOND, type)) { + return "second"; + } else if (areSameTsi(SQL_TSI_HOUR, type)) { + return "hour"; + } else if (areSameTsi(SQL_TSI_MINUTE, type)) { + return "minute"; + } else { + throw new PSQLException(GT.tr("Interval {0} not yet implemented", type), + PSQLState.SYNTAX_ERROR); + } + // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php + /* + * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if + * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if + * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if + * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year"; + */ + } + + /** + * database translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqldatabase(StringBuilder buf, List parsedArgs) throws SQLException { + zeroArgumentFunctionCall(buf, "current_database()", "database", parsedArgs); + } + + /** + * ifnull translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqlifnull(StringBuilder buf, List parsedArgs) throws SQLException { + twoArgumentsFunctionCall(buf, "coalesce(", "ifnull", parsedArgs); + } + + /** + * user translation + * + * @param buf The buffer to append into + * @param parsedArgs arguments + * @throws SQLException if something wrong happens + */ + public static void sqluser(StringBuilder buf, List parsedArgs) throws SQLException { + zeroArgumentFunctionCall(buf, "user", "user", parsedArgs); + } + + private static void zeroArgumentFunctionCall(StringBuilder buf, String call, String functionName, + List parsedArgs) throws PSQLException { + if (!parsedArgs.isEmpty()) { + throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", functionName), + PSQLState.SYNTAX_ERROR); + } + buf.append(call); + } + + private static void singleArgumentFunctionCall(StringBuilder buf, String call, String functionName, + List parsedArgs) throws PSQLException { + if (parsedArgs.size() != 1) { + throw new PSQLException(GT.tr("{0} function takes one and only one argument.", functionName), + PSQLState.SYNTAX_ERROR); + } + CharSequence arg0 = parsedArgs.get(0); + buf.ensureCapacity(buf.length() + call.length() + arg0.length() + 1); + buf.append(call).append(arg0).append(')'); + } + + private static void twoArgumentsFunctionCall(StringBuilder buf, String call, String functionName, + List parsedArgs) throws PSQLException { + if (parsedArgs.size() != 2) { + throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", functionName), + PSQLState.SYNTAX_ERROR); + } + appendCall(buf, call, ",", ")", parsedArgs); + } + + /** + * Appends {@code begin arg0 separator arg1 separator end} sequence to the input {@link StringBuilder} + * @param sb destination StringBuilder + * @param begin begin string + * @param separator separator string + * @param end end string + * @param args arguments + */ + public static void appendCall(StringBuilder sb, String begin, String separator, + String end, List args) { + int size = begin.length(); + // Typically just-in-time compiler would eliminate Iterator in case foreach is used, + // however the code below uses indexed iteration to keep the code independent from + // various JIT implementations (== avoid Iterator allocations even for not-so-smart JITs) + // see https://bugs.openjdk.java.net/browse/JDK-8166840 + // see http://2016.jpoint.ru/talks/cheremin/ (video and slides) + int numberOfArguments = args.size(); + for (int i = 0; i < numberOfArguments; i++) { + size += args.get(i).length(); + } + size += separator.length() * (numberOfArguments - 1); + sb.ensureCapacity(sb.length() + size + 1); + sb.append(begin); + for (int i = 0; i < numberOfArguments; i++) { + if (i > 0) { + sb.append(separator); + } + sb.append(args.get(i)); + } + sb.append(end); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/FieldMetadata.java b/pgjdbc/src/main/java/org/postgresql/jdbc/FieldMetadata.java new file mode 100644 index 0000000..dac04f7 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/FieldMetadata.java @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.util.CanEstimateSize; + +/** + * This is an internal class to hold field metadata info like table name, column name, etc. + * This class is not meant to be used outside of pgjdbc. + */ +public class FieldMetadata implements CanEstimateSize { + public static class Key { + final int tableOid; + final int positionInTable; + + Key(int tableOid, int positionInTable) { + this.positionInTable = positionInTable; + this.tableOid = tableOid; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Key key = (Key) o; + + if (tableOid != key.tableOid) { + return false; + } + return positionInTable == key.positionInTable; + } + + @Override + public int hashCode() { + int result = tableOid; + result = 31 * result + positionInTable; + return result; + } + + @Override + public String toString() { + return "Key{" + + "tableOid=" + tableOid + + ", positionInTable=" + positionInTable + + '}'; + } + } + + final String columnName; + final String tableName; + final String schemaName; + final int nullable; + final boolean autoIncrement; + + public FieldMetadata(String columnName) { + this(columnName, "", "", PgResultSetMetaData.columnNullableUnknown, false); + } + + FieldMetadata(String columnName, String tableName, String schemaName, int nullable, + boolean autoIncrement) { + this.columnName = columnName; + this.tableName = tableName; + this.schemaName = schemaName; + this.nullable = nullable; + this.autoIncrement = autoIncrement; + } + + @Override + public long getSize() { + return columnName.length() * 2 + + tableName.length() * 2 + + schemaName.length() * 2 + + 4L + + 1L; + } + + @Override + public String toString() { + return "FieldMetadata{" + + "columnName='" + columnName + '\'' + + ", tableName='" + tableName + '\'' + + ", schemaName='" + schemaName + '\'' + + ", nullable=" + nullable + + ", autoIncrement=" + autoIncrement + + '}'; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/GSSEncMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/GSSEncMode.java new file mode 100644 index 0000000..ec755ba --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/GSSEncMode.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.PGProperty; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.util.Properties; + +public enum GSSEncMode { + + /** + * Do not use encrypted connections. + */ + DISABLE("disable"), + + /** + * Start with non-encrypted connection, then try encrypted one. + */ + ALLOW("allow"), + + /** + * Start with encrypted connection, fallback to non-encrypted (default). + */ + PREFER("prefer"), + + /** + * Ensure connection is encrypted. + */ + REQUIRE("require"); + + private static final GSSEncMode[] VALUES = values(); + + public final String value; + + GSSEncMode(String value) { + this.value = value; + } + + public boolean requireEncryption() { + return this.compareTo(REQUIRE) >= 0; + } + + public static GSSEncMode of(Properties info) throws PSQLException { + String gssEncMode = PGProperty.GSS_ENC_MODE.getOrDefault(info); + // If gssEncMode is not set, fallback to allow + if (gssEncMode == null) { + return ALLOW; + } + + for (GSSEncMode mode : VALUES) { + if (mode.value.equalsIgnoreCase(gssEncMode)) { + return mode; + } + } + throw new PSQLException(GT.tr("Invalid gssEncMode value: {0}", gssEncMode), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLSavepoint.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLSavepoint.java new file mode 100644 index 0000000..f4d3949 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLSavepoint.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Utils; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.SQLException; +import java.sql.Savepoint; + +public class PSQLSavepoint implements Savepoint { + + private boolean isValid; + private final boolean isNamed; + private int id; + private String name; + + public PSQLSavepoint(int id) { + this.isValid = true; + this.isNamed = false; + this.id = id; + } + + public PSQLSavepoint(String name) { + this.isValid = true; + this.isNamed = true; + this.name = name; + } + + @Override + public int getSavepointId() throws SQLException { + if (!isValid) { + throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."), + PSQLState.INVALID_SAVEPOINT_SPECIFICATION); + } + + if (isNamed) { + throw new PSQLException(GT.tr("Cannot retrieve the id of a named savepoint."), + PSQLState.WRONG_OBJECT_TYPE); + } + + return id; + } + + @Override + public String getSavepointName() throws SQLException { + if (!isValid) { + throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."), + PSQLState.INVALID_SAVEPOINT_SPECIFICATION); + } + + if (!isNamed || name == null) { + throw new PSQLException(GT.tr("Cannot retrieve the name of an unnamed savepoint."), + PSQLState.WRONG_OBJECT_TYPE); + } + + return name; + } + + public void invalidate() { + isValid = false; + } + + public String getPGName() throws SQLException { + if (!isValid) { + throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."), + PSQLState.INVALID_SAVEPOINT_SPECIFICATION); + } + + if (isNamed && name != null) { + // We need to quote and escape the name in case it + // contains spaces/quotes/etc. + // + return Utils.escapeIdentifier(null, name).toString(); + } + + return "JDBC_SAVEPOINT_" + id; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLWarningWrapper.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLWarningWrapper.java new file mode 100644 index 0000000..f93b92d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLWarningWrapper.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import java.sql.SQLWarning; + +/** + * Wrapper class for SQLWarnings that provides an optimisation to add + * new warnings to the tail of the SQLWarning singly linked list, avoiding Θ(n) insertion time + * of calling #setNextWarning on the head. By encapsulating this into a single object it allows + * users(ie PgStatement) to atomically set and clear the warning chain. + */ +public class PSQLWarningWrapper { + + private final SQLWarning firstWarning; + private SQLWarning lastWarning; + + public PSQLWarningWrapper(SQLWarning warning) { + firstWarning = warning; + lastWarning = warning; + } + + void addWarning(SQLWarning sqlWarning) { + lastWarning.setNextWarning(sqlWarning); + lastWarning = sqlWarning; + } + + SQLWarning getFirstWarning() { + return firstWarning; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgArray.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgArray.java new file mode 100644 index 0000000..a7eeed5 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgArray.java @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.BaseStatement; +import org.postgresql.core.Field; +import org.postgresql.core.Oid; +import org.postgresql.core.Tuple; +import org.postgresql.jdbc.ArrayDecoding.PgArrayList; +import org.postgresql.jdbc2.ArrayAssistantRegistry; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.Array; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + *

Array is used collect one column of query result data.

+ * + *

Read a field of type Array into either a natively-typed Java array object or a ResultSet. + * Accessor methods provide the ability to capture array slices.

+ * + *

Other than the constructor all methods are direct implementations of those specified for + * java.sql.Array. Please refer to the javadoc for java.sql.Array for detailed descriptions of the + * functionality and parameters of the methods of this class.

+ * + * @see ResultSet#getArray + */ +public class PgArray implements Array { + + static { + ArrayAssistantRegistry.register(Oid.UUID, new UUIDArrayAssistant()); + ArrayAssistantRegistry.register(Oid.UUID_ARRAY, new UUIDArrayAssistant()); + } + + /** + * A database connection. + */ + protected BaseConnection connection; + + /** + * The OID of this field. + */ + private final int oid; + + /** + * Field value as String. + */ + protected String fieldString; + + /** + * Value of field as {@link PgArrayList}. Will be initialized only once within + * {@link #buildArrayList(String)}. + */ + protected ArrayDecoding.PgArrayList arrayList; + + protected byte [] fieldBytes; + + private final ResourceLock lock = new ResourceLock(); + + private PgArray(BaseConnection connection, int oid) throws SQLException { + this.connection = connection; + this.oid = oid; + } + + /** + * Create a new Array. + * + * @param connection a database connection + * @param oid the oid of the array datatype + * @param fieldString the array data in string form + * @throws SQLException if something wrong happens + */ + public PgArray(BaseConnection connection, int oid, String fieldString) + throws SQLException { + this(connection, oid); + this.fieldString = fieldString; + } + + /** + * Create a new Array. + * + * @param connection a database connection + * @param oid the oid of the array datatype + * @param fieldBytes the array data in byte form + * @throws SQLException if something wrong happens + */ + public PgArray(BaseConnection connection, int oid, byte [] fieldBytes) + throws SQLException { + this(connection, oid); + this.fieldBytes = fieldBytes; + } + + private BaseConnection getConnection() { + return connection; + } + + @Override + public Object getArray() throws SQLException { + return getArrayImpl(1, 0, null); + } + + @Override + public Object getArray(long index, int count) throws SQLException { + return getArrayImpl(index, count, null); + } + + public Object getArrayImpl(Map> map) throws SQLException { + return getArrayImpl(1, 0, map); + } + + @Override + public Object getArray(Map> map) throws SQLException { + return getArrayImpl(map); + } + + @Override + public Object getArray(long index, int count, Map> map) + throws SQLException { + return getArrayImpl(index, count, map); + } + + public Object getArrayImpl(long index, int count, Map> map) + throws SQLException { + + // for now maps aren't supported. + if (map != null && !map.isEmpty()) { + throw Driver.notImplemented(this.getClass(), "getArrayImpl(long,int,Map)"); + } + + // array index is out of range + if (index < 1) { + throw new PSQLException(GT.tr("The array index is out of range: {0}", index), + PSQLState.DATA_ERROR); + } + + if (fieldBytes != null) { + return readBinaryArray(fieldBytes, (int) index, count); + } + + if (fieldString == null) { + return null; + } + + final PgArrayList arrayList = buildArrayList(fieldString); + + if (count == 0) { + count = arrayList.size(); + } + + // array index out of range + if ((index - 1) + count > arrayList.size()) { + throw new PSQLException( + GT.tr("The array index is out of range: {0}, number of elements: {1}.", + index + count, (long) arrayList.size()), + PSQLState.DATA_ERROR); + } + + return buildArray(arrayList, (int) index, count); + } + + private Object readBinaryArray(byte[] fieldBytes, int index, int count) throws SQLException { + return ArrayDecoding.readBinaryArray(index, count, fieldBytes, getConnection()); + } + + private ResultSet readBinaryResultSet(byte[] fieldBytes, int index, int count) + throws SQLException { + int dimensions = ByteConverter.int4(fieldBytes, 0); + // int flags = ByteConverter.int4(fieldBytes, 4); // bit 0: 0=no-nulls, 1=has-nulls + int elementOid = ByteConverter.int4(fieldBytes, 8); + int pos = 12; + int[] dims = new int[dimensions]; + for (int d = 0; d < dimensions; d++) { + dims[d] = ByteConverter.int4(fieldBytes, pos); + pos += 4; + /* int lbound = ByteConverter.int4(fieldBytes, pos); */ + pos += 4; + } + if (count > 0 && dimensions > 0) { + dims[0] = Math.min(count, dims[0]); + } + List rows = new ArrayList<>(); + Field[] fields = new Field[2]; + + storeValues(fieldBytes, rows, fields, elementOid, dims, pos, 0, index); + + BaseStatement stat = (BaseStatement) getConnection() + .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + return stat.createDriverResultSet(fields, rows); + } + + private int storeValues(byte[] fieldBytes, List rows, Field[] fields, int elementOid, + final int[] dims, + int pos, final int thisDimension, int index) throws SQLException { + // handle an empty array + if (dims.length == 0) { + fields[0] = new Field("INDEX", Oid.INT4); + fields[0].setFormat(Field.BINARY_FORMAT); + fields[1] = new Field("VALUE", elementOid); + fields[1].setFormat(Field.BINARY_FORMAT); + for (int i = 1; i < index; i++) { + int len = ByteConverter.int4(fieldBytes, pos); + pos += 4; + if (len != -1) { + pos += len; + } + } + } else if (thisDimension == dims.length - 1) { + fields[0] = new Field("INDEX", Oid.INT4); + fields[0].setFormat(Field.BINARY_FORMAT); + fields[1] = new Field("VALUE", elementOid); + fields[1].setFormat(Field.BINARY_FORMAT); + for (int i = 1; i < index; i++) { + int len = ByteConverter.int4(fieldBytes, pos); + pos += 4; + if (len != -1) { + pos += len; + } + } + for (int i = 0; i < dims[thisDimension]; i++) { + byte[][] rowData = new byte[2][]; + rowData[0] = new byte[4]; + ByteConverter.int4(rowData[0], 0, i + index); + rows.add(new Tuple(rowData)); + int len = ByteConverter.int4(fieldBytes, pos); + pos += 4; + if (len == -1) { + continue; + } + rowData[1] = new byte[len]; + System.arraycopy(fieldBytes, pos, rowData[1], 0, rowData[1].length); + pos += len; + } + } else { + fields[0] = new Field("INDEX", Oid.INT4); + fields[0].setFormat(Field.BINARY_FORMAT); + fields[1] = new Field("VALUE", oid); + fields[1].setFormat(Field.BINARY_FORMAT); + int nextDimension = thisDimension + 1; + int dimensionsLeft = dims.length - nextDimension; + for (int i = 1; i < index; i++) { + pos = calcRemainingDataLength(fieldBytes, dims, pos, elementOid, nextDimension); + } + for (int i = 0; i < dims[thisDimension]; i++) { + byte[][] rowData = new byte[2][]; + rowData[0] = new byte[4]; + ByteConverter.int4(rowData[0], 0, i + index); + rows.add(new Tuple(rowData)); + int dataEndPos = calcRemainingDataLength(fieldBytes, dims, pos, elementOid, nextDimension); + int dataLength = dataEndPos - pos; + rowData[1] = new byte[12 + 8 * dimensionsLeft + dataLength]; + ByteConverter.int4(rowData[1], 0, dimensionsLeft); + System.arraycopy(fieldBytes, 4, rowData[1], 4, 8); + System.arraycopy(fieldBytes, 12 + nextDimension * 8, rowData[1], 12, dimensionsLeft * 8); + System.arraycopy(fieldBytes, pos, rowData[1], 12 + dimensionsLeft * 8, dataLength); + pos = dataEndPos; + } + } + return pos; + } + + private int calcRemainingDataLength(byte[] fieldBytes, + int[] dims, int pos, int elementOid, int thisDimension) { + if (thisDimension == dims.length - 1) { + for (int i = 0; i < dims[thisDimension]; i++) { + int len = ByteConverter.int4(fieldBytes, pos); + pos += 4; + if (len == -1) { + continue; + } + pos += len; + } + } else { + pos = calcRemainingDataLength(fieldBytes, dims, elementOid, pos, thisDimension + 1); + } + return pos; + } + + /** + * Build {@link ArrayList} from field's string input. As a result of this method + * {@link #arrayList} is build. Method can be called many times in order to make sure that array + * list is ready to use, however {@link #arrayList} will be set only once during first call. + */ + @SuppressWarnings("try") + private PgArrayList buildArrayList(String fieldString) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (arrayList == null) { + arrayList = ArrayDecoding.buildArrayList(fieldString, getConnection().getTypeInfo().getArrayDelimiter(oid)); + } + return arrayList; + } + } + + /** + * Convert {@link ArrayList} to array. + * + * @param input list to be converted into array + */ + private Object buildArray(ArrayDecoding.PgArrayList input, int index, int count) throws SQLException { + final BaseConnection connection = getConnection(); + return ArrayDecoding.readStringArray(index, count, connection.getTypeInfo().getPGArrayElement(oid), input, connection); + } + + @Override + public int getBaseType() throws SQLException { + return getConnection().getTypeInfo().getSQLType(getBaseTypeName()); + } + + @Override + public String getBaseTypeName() throws SQLException { + int elementOID = getConnection().getTypeInfo().getPGArrayElement(oid); + return getConnection().getTypeInfo().getPGType(elementOID); + } + + @Override + public ResultSet getResultSet() throws SQLException { + return getResultSetImpl(1, 0, null); + } + + @Override + public ResultSet getResultSet(long index, int count) throws SQLException { + return getResultSetImpl(index, count, null); + } + + @Override + public ResultSet getResultSet(Map> map) throws SQLException { + return getResultSetImpl(map); + } + + @Override + public ResultSet getResultSet(long index, int count, Map> map) + throws SQLException { + return getResultSetImpl(index, count, map); + } + + public ResultSet getResultSetImpl(Map> map) throws SQLException { + return getResultSetImpl(1, 0, map); + } + + public ResultSet getResultSetImpl(long index, int count, Map> map) + throws SQLException { + + // for now maps aren't supported. + if (map != null && !map.isEmpty()) { + throw Driver.notImplemented(this.getClass(), "getResultSetImpl(long,int,Map)"); + } + + // array index is out of range + if (index < 1) { + throw new PSQLException(GT.tr("The array index is out of range: {0}", index), + PSQLState.DATA_ERROR); + } + + if (fieldBytes != null) { + return readBinaryResultSet(fieldBytes, (int) index, count); + } + + final PgArrayList arrayList = buildArrayList(fieldString); + + if (count == 0) { + count = arrayList.size(); + } + + // array index out of range + if ((--index) + count > arrayList.size()) { + throw new PSQLException( + GT.tr("The array index is out of range: {0}, number of elements: {1}.", + index + count, (long) arrayList.size()), + PSQLState.DATA_ERROR); + } + + List rows = new ArrayList<>(); + + Field[] fields = new Field[2]; + + // one dimensional array + if (arrayList.dimensionsCount <= 1) { + // array element type + final int baseOid = getConnection().getTypeInfo().getPGArrayElement(oid); + fields[0] = new Field("INDEX", Oid.INT4); + fields[1] = new Field("VALUE", baseOid); + + for (int i = 0; i < count; i++) { + int offset = (int) index + i; + byte[] [] t = new byte[2][0]; + String v = (String) arrayList.get(offset); + t[0] = getConnection().encodeString(Integer.toString(offset + 1)); + t[1] = v == null ? null : getConnection().encodeString(v); + rows.add(new Tuple(t)); + } + } else { + // when multi-dimensional + fields[0] = new Field("INDEX", Oid.INT4); + fields[1] = new Field("VALUE", oid); + for (int i = 0; i < count; i++) { + int offset = (int) index + i; + byte[] [] t = new byte[2][0]; + Object v = arrayList.get(offset); + + t[0] = getConnection().encodeString(Integer.toString(offset + 1)); + t[1] = v == null ? null : getConnection().encodeString(toString((ArrayDecoding.PgArrayList) v)); + rows.add(new Tuple(t)); + } + } + + BaseStatement stat = (BaseStatement) getConnection() + .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + return stat.createDriverResultSet(fields, rows); + } + + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public String toString() { + if (fieldString == null && fieldBytes != null) { + try { + Object array = readBinaryArray(fieldBytes, 1, 0); + + final ArrayEncoding.ArrayEncoder arraySupport = ArrayEncoding.getArrayEncoder(array); + assert arraySupport != null; + fieldString = arraySupport.toArrayString(connection.getTypeInfo().getArrayDelimiter(oid), array); + } catch (SQLException e) { + fieldString = "NULL"; // punt + } + } + return fieldString; + } + + /** + * Convert array list to PG String representation (e.g. {0,1,2}). + */ + private String toString(ArrayDecoding.PgArrayList list) throws SQLException { + if (list == null) { + return "NULL"; + } + + StringBuilder b = new StringBuilder().append('{'); + + char delim = getConnection().getTypeInfo().getArrayDelimiter(oid); + + for (int i = 0; i < list.size(); i++) { + Object v = list.get(i); + + if (i > 0) { + b.append(delim); + } + + if (v == null) { + b.append("NULL"); + } else if (v instanceof ArrayDecoding.PgArrayList) { + b.append(toString((ArrayDecoding.PgArrayList) v)); + } else { + escapeArrayElement(b, (String) v); + } + } + + b.append('}'); + + return b.toString(); + } + + public static void escapeArrayElement(StringBuilder b, String s) { + b.append('"'); + for (int j = 0; j < s.length(); j++) { + char c = s.charAt(j); + if (c == '"' || c == '\\') { + b.append('\\'); + } + + b.append(c); + } + b.append('"'); + } + + public boolean isBinary() { + return fieldBytes != null; + } + + public byte [] toBytes() { + return fieldBytes; + } + + @Override + public void free() throws SQLException { + connection = null; + fieldString = null; + fieldBytes = null; + arrayList = null; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgBlob.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgBlob.java new file mode 100644 index 0000000..c7800af --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgBlob.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.BaseConnection; +import org.postgresql.largeobject.LargeObject; + +import java.io.InputStream; +import java.sql.Blob; +import java.sql.SQLException; + +@SuppressWarnings("try") +public class PgBlob extends AbstractBlobClob implements Blob { + + public PgBlob(BaseConnection conn, long oid) throws SQLException { + super(conn, oid); + } + + @Override + public InputStream getBinaryStream(long pos, long length) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + LargeObject subLO = getLo(false).copy(); + addSubLO(subLO); + if (pos > Integer.MAX_VALUE) { + subLO.seek64(pos - 1, LargeObject.SEEK_SET); + } else { + subLO.seek((int) pos - 1, LargeObject.SEEK_SET); + } + return subLO.getInputStream(length); + } + } + + @Override + public int setBytes(long pos, byte[] bytes) throws SQLException { + return setBytes(pos, bytes, 0, bytes.length); + } + + @Override + public int setBytes(long pos, byte[] bytes, int offset, int len) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + assertPosition(pos); + getLo(true).seek((int) (pos - 1)); + getLo(true).write(bytes, offset, len); + return len; + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgCallableStatement.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgCallableStatement.java new file mode 100644 index 0000000..55b7222 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgCallableStatement.java @@ -0,0 +1,1008 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLType; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Calendar; +import java.util.Map; + +@SuppressWarnings("try") +class PgCallableStatement extends PgPreparedStatement implements CallableStatement { + // Used by the callablestatement style methods + private final boolean isFunction; + // functionReturnType contains the user supplied value to check + // testReturn contains a modified version to make it easier to + // check the getXXX methods.. + private int [] functionReturnType; + private int [] testReturn; + // returnTypeSet is true when a proper call to registerOutParameter has been made + private boolean returnTypeSet; + protected Object [] callResult; + private int lastIndex; + + PgCallableStatement(PgConnection connection, String sql, int rsType, int rsConcurrency, + int rsHoldability) throws SQLException { + super(connection, connection.borrowCallableQuery(sql), rsType, rsConcurrency, rsHoldability); + this.isFunction = preparedQuery.isFunction; + + if (this.isFunction) { + int inParamCount = this.preparedParameters.getInParameterCount() + 1; + this.testReturn = new int[inParamCount]; + this.functionReturnType = new int[inParamCount]; + } + } + + @Override + public int executeUpdate() throws SQLException { + if (isFunction) { + executeWithFlags(0); + return 0; + } + return super.executeUpdate(); + } + + @Override + public Object getObject(int i, Map> map) + throws SQLException { + return getObjectImpl(i, map); + } + + @Override + public Object getObject(String s, Map> map) throws SQLException { + return getObjectImpl(s, map); + } + + @Override + public boolean executeWithFlags(int flags) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + boolean hasResultSet = super.executeWithFlags(flags); + int[] functionReturnType = this.functionReturnType; + if (!isFunction || !returnTypeSet || functionReturnType == null) { + return hasResultSet; + } + + // If we are executing and there are out parameters + // callable statement function set the return data + if (!hasResultSet) { + throw new PSQLException(GT.tr("A CallableStatement was executed with nothing returned."), + PSQLState.NO_DATA); + } + + ResultSet rs = getResultSet(); + if (!rs.next()) { + throw new PSQLException(GT.tr("A CallableStatement was executed with nothing returned."), + PSQLState.NO_DATA); + } + + // figure out how many columns + int cols = rs.getMetaData().getColumnCount(); + + int outParameterCount = preparedParameters.getOutParameterCount(); + + if (cols != outParameterCount) { + throw new PSQLException( + GT.tr("A CallableStatement was executed with an invalid number of parameters"), + PSQLState.SYNTAX_ERROR); + } + + // reset last result fetched (for wasNull) + lastIndex = 0; + + // allocate enough space for all possible parameters without regard to in/out + Object[] callResult = new Object[preparedParameters.getParameterCount() + 1]; + this.callResult = callResult; + + // move them into the result set + for (int i = 0, j = 0; i < cols; i++, j++) { + // find the next out parameter, the assumption is that the functionReturnType + // array will be initialized with 0 and only out parameters will have values + // other than 0. 0 is the value for java.sql.Types.NULL, which should not + // conflict + while (j < functionReturnType.length && functionReturnType[j] == 0) { + j++; + } + + callResult[j] = rs.getObject(i + 1); + int columnType = rs.getMetaData().getColumnType(i + 1); + + if (columnType != functionReturnType[j]) { + // this is here for the sole purpose of passing the cts + if (columnType == Types.DOUBLE && functionReturnType[j] == Types.REAL) { + // return it as a float + Object result = callResult[j]; + if (result != null) { + callResult[j] = ((Double) result).floatValue(); + } + } else if (columnType == Types.REF_CURSOR && functionReturnType[j] == Types.OTHER) { + // For backwards compatibility reasons we support that ref cursors can be + // registered with both Types.OTHER and Types.REF_CURSOR so we allow + // this specific mismatch + } else { + throw new PSQLException(GT.tr( + "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.", + i + 1, "java.sql.Types=" + columnType, "java.sql.Types=" + functionReturnType[j]), + PSQLState.DATA_TYPE_MISMATCH); + } + } + + } + rs.close(); + result = null; + } + return false; + } + + /** + * {@inheritDoc} + * + *

Before executing a stored procedure call you must explicitly call registerOutParameter to + * register the java.sql.Type of each out parameter.

+ * + *

Note: When reading the value of an out parameter, you must use the getXXX method whose Java + * type XXX corresponds to the parameter's registered SQL type.

+ * + *

ONLY 1 RETURN PARAMETER if {?= call ..} syntax is used

+ * + * @param parameterIndex the first parameter is 1, the second is 2,... + * @param sqlType SQL type code defined by java.sql.Types; for parameters of type Numeric or + * Decimal use the version of registerOutParameter that accepts a scale value + * @throws SQLException if a database-access error occurs. + */ + @Override + public void registerOutParameter(int parameterIndex, int sqlType) + throws SQLException { + checkClosed(); + switch (sqlType) { + case Types.TINYINT: + // we don't have a TINYINT type use SMALLINT + sqlType = Types.SMALLINT; + break; + case Types.LONGVARCHAR: + sqlType = Types.VARCHAR; + break; + case Types.DECIMAL: + sqlType = Types.NUMERIC; + break; + case Types.FLOAT: + // float is the same as double + sqlType = Types.DOUBLE; + break; + case Types.VARBINARY: + case Types.LONGVARBINARY: + sqlType = Types.BINARY; + break; + case Types.BOOLEAN: + sqlType = Types.BIT; + break; + default: + break; + } + int[] functionReturnType = this.functionReturnType; + int[] testReturn = this.testReturn; + if (!isFunction || functionReturnType == null || testReturn == null) { + throw new PSQLException( + GT.tr( + "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."), + PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL); + } + + preparedParameters.registerOutParameter(parameterIndex, sqlType); + // functionReturnType contains the user supplied value to check + // testReturn contains a modified version to make it easier to + // check the getXXX methods.. + functionReturnType[parameterIndex - 1] = sqlType; + testReturn[parameterIndex - 1] = sqlType; + + if (functionReturnType[parameterIndex - 1] == Types.CHAR + || functionReturnType[parameterIndex - 1] == Types.LONGVARCHAR) { + testReturn[parameterIndex - 1] = Types.VARCHAR; + } else if (functionReturnType[parameterIndex - 1] == Types.FLOAT) { + testReturn[parameterIndex - 1] = Types.REAL; // changes to streamline later error checking + } + returnTypeSet = true; + } + + @Override + public boolean wasNull() throws SQLException { + if (lastIndex == 0 || callResult == null) { + throw new PSQLException(GT.tr("wasNull cannot be call before fetching a result."), + PSQLState.OBJECT_NOT_IN_STATE); + } + + // check to see if the last access threw an exception + return callResult[lastIndex - 1] == null; + } + + @Override + public String getString(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.VARCHAR, "String"); + return (String) result; + } + + @Override + public boolean getBoolean(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.BIT, "Boolean"); + if (result == null) { + return false; + } + return BooleanTypeUtil.castToBoolean(result); + } + + @Override + public byte getByte(int parameterIndex) throws SQLException { + // fake tiny int with smallint + Object result = checkIndex(parameterIndex, Types.SMALLINT, "Byte"); + + if (result == null) { + return 0; + } + + return ((Integer) result).byteValue(); + + } + + @Override + public short getShort(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.SMALLINT, "Short"); + if (result == null) { + return 0; + } + return ((Integer) result).shortValue(); + } + + @Override + public int getInt(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.INTEGER, "Int"); + if (result == null) { + return 0; + } + + return (Integer) result; + } + + @Override + public long getLong(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.BIGINT, "Long"); + if (result == null) { + return 0; + } + + return (Long) result; + } + + @Override + public float getFloat(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.REAL, "Float"); + if (result == null) { + return 0; + } + + return (Float) result; + } + + @Override + public double getDouble(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.DOUBLE, "Double"); + if (result == null) { + return 0; + } + + return (Double) result; + } + + @Override + @SuppressWarnings("deprecation") + public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { + Object result = checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal"); + return (BigDecimal) result; + } + + @Override + public byte [] getBytes(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.VARBINARY, Types.BINARY, "Bytes"); + return (byte []) result; + } + + @Override + public Date getDate(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.DATE, "Date"); + return (Date) result; + } + + @Override + public Time getTime(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.TIME, "Time"); + return (Time) result; + } + + @Override + public Timestamp getTimestamp(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.TIMESTAMP, "Timestamp"); + return (Timestamp) result; + } + + @Override + public Object getObject(int parameterIndex) throws SQLException { + return getCallResult(parameterIndex); + } + + /** + * helperfunction for the getXXX calls to check isFunction and index == 1 Compare BOTH type fields + * against the return type. + * + * @param parameterIndex parameter index (1-based) + * @param type1 type 1 + * @param type2 type 2 + * @param getName getter name + * @throws SQLException if something goes wrong + */ + protected Object checkIndex(int parameterIndex, int type1, int type2, String getName) + throws SQLException { + Object result = getCallResult(parameterIndex); + int testReturn = this.testReturn != null ? this.testReturn[parameterIndex - 1] : -1; + if (type1 != testReturn && type2 != testReturn) { + throw new PSQLException( + GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.", + "java.sql.Types=" + testReturn, getName, + "java.sql.Types=" + type1), + PSQLState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH); + } + return result; + } + + /** + * Helper function for the getXXX calls to check isFunction and index == 1. + * + * @param parameterIndex parameter index (1-based) + * @param type type + * @param getName getter name + * @throws SQLException if given index is not valid + */ + protected Object checkIndex(int parameterIndex, + int type, String getName) throws SQLException { + Object result = getCallResult(parameterIndex); + int testReturn = this.testReturn != null ? this.testReturn[parameterIndex - 1] : -1; + if (type != testReturn) { + throw new PSQLException( + GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.", + "java.sql.Types=" + testReturn, getName, + "java.sql.Types=" + type), + PSQLState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH); + } + return result; + } + + private Object getCallResult(int parameterIndex) throws SQLException { + checkClosed(); + + if (!isFunction) { + throw new PSQLException( + GT.tr( + "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."), + PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL); + } + + if (!returnTypeSet) { + throw new PSQLException(GT.tr("No function outputs were registered."), + PSQLState.OBJECT_NOT_IN_STATE); + } + + Object [] callResult = this.callResult; + if (callResult == null) { + throw new PSQLException( + GT.tr("Results cannot be retrieved from a CallableStatement before it is executed."), + PSQLState.NO_DATA); + } + + lastIndex = parameterIndex; + return callResult[parameterIndex - 1]; + } + + @Override + protected BatchResultHandler createBatchHandler(Query[] queries, + ParameterList[] parameterLists) { + return new CallableBatchResultHandler(this, queries, parameterLists); + } + + @Override + public Array getArray(int i) throws SQLException { + Object result = checkIndex(i, Types.ARRAY, "Array"); + return (Array) result; + } + + @Override + public BigDecimal getBigDecimal(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal"); + return (BigDecimal) result; + } + + @Override + public Blob getBlob(int i) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getBlob(int)"); + } + + @Override + public Clob getClob(int i) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getClob(int)"); + } + + public Object getObjectImpl(int i, Map> map) throws SQLException { + if (map == null || map.isEmpty()) { + return getObject(i); + } + throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)"); + } + + @Override + public Ref getRef(int i) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getRef(int)"); + } + + @Override + public Date getDate(int i, Calendar cal) throws SQLException { + Object result = checkIndex(i, Types.DATE, "Date"); + + if (result == null) { + return null; + } + + String value = result.toString(); + return getTimestampUtils().toDate(cal, value); + } + + @Override + public Time getTime(int i, Calendar cal) throws SQLException { + Object result = checkIndex(i, Types.TIME, "Time"); + + if (result == null) { + return null; + } + + String value = result.toString(); + return getTimestampUtils().toTime(cal, value); + } + + @Override + public Timestamp getTimestamp(int i, Calendar cal) throws SQLException { + Object result = checkIndex(i, Types.TIMESTAMP, "Timestamp"); + + if (result == null) { + return null; + } + + String value = result.toString(); + return getTimestampUtils().toTimestamp(cal, value); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType, String typeName) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter(int,int,String)"); + } + + @Override + public void setObject(String parameterName, Object x, SQLType targetSqlType, + int scaleOrLength) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setObject"); + } + + @Override + public void setObject(String parameterName, Object x, SQLType targetSqlType) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setObject"); + } + + @Override + public void registerOutParameter(int parameterIndex, SQLType sqlType) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter"); + } + + @Override + public void registerOutParameter(int parameterIndex, SQLType sqlType, int scale) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter"); + } + + @Override + public void registerOutParameter(int parameterIndex, SQLType sqlType, String typeName) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter"); + } + + @Override + public void registerOutParameter(String parameterName, SQLType sqlType) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter"); + } + + @Override + public void registerOutParameter(String parameterName, SQLType sqlType, int scale) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter"); + } + + @Override + public void registerOutParameter(String parameterName, SQLType sqlType, String typeName) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter"); + } + + @Override + public RowId getRowId(int parameterIndex) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getRowId(int)"); + } + + @Override + public RowId getRowId(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getRowId(String)"); + } + + @Override + public void setRowId(String parameterName, RowId x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setRowId(String, RowId)"); + } + + @Override + public void setNString(String parameterName, String value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNString(String, String)"); + } + + @Override + public void setNCharacterStream(String parameterName, Reader value, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader, long)"); + } + + @Override + public void setNCharacterStream(String parameterName, Reader value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader)"); + } + + @Override + public void setCharacterStream(String parameterName, Reader value, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader, long)"); + } + + @Override + public void setCharacterStream(String parameterName, Reader value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader)"); + } + + @Override + public void setBinaryStream(String parameterName, InputStream value, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream, long)"); + } + + @Override + public void setBinaryStream(String parameterName, InputStream value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream)"); + } + + @Override + public void setAsciiStream(String parameterName, InputStream value, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream, long)"); + } + + @Override + public void setAsciiStream(String parameterName, InputStream value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream)"); + } + + @Override + public void setNClob(String parameterName, NClob value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNClob(String, NClob)"); + } + + @Override + public void setClob(String parameterName, Reader reader, long length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setClob(String, Reader, long)"); + } + + @Override + public void setClob(String parameterName, Reader reader) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setClob(String, Reader)"); + } + + @Override + public void setBlob(String parameterName, InputStream inputStream, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream, long)"); + } + + @Override + public void setBlob(String parameterName, InputStream inputStream) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream)"); + } + + @Override + public void setBlob(String parameterName, Blob x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBlob(String, Blob)"); + } + + @Override + public void setClob(String parameterName, Clob x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setClob(String, Clob)"); + } + + @Override + public void setNClob(String parameterName, Reader reader, long length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader, long)"); + } + + @Override + public void setNClob(String parameterName, Reader reader) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader)"); + } + + @Override + public NClob getNClob(int parameterIndex) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getNClob(int)"); + } + + @Override + public NClob getNClob(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getNClob(String)"); + } + + @Override + public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setSQLXML(String, SQLXML)"); + } + + @Override + public SQLXML getSQLXML(int parameterIndex) throws SQLException { + Object result = checkIndex(parameterIndex, Types.SQLXML, "SQLXML"); + return (SQLXML) result; + } + + @Override + public SQLXML getSQLXML(String parameterIndex) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getSQLXML(String)"); + } + + @Override + public String getNString(int parameterIndex) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getNString(int)"); + } + + @Override + public String getNString(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getNString(String)"); + } + + @Override + public Reader getNCharacterStream(int parameterIndex) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getNCharacterStream(int)"); + } + + @Override + public Reader getNCharacterStream(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getNCharacterStream(String)"); + } + + @Override + public Reader getCharacterStream(int parameterIndex) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getCharacterStream(int)"); + } + + @Override + public Reader getCharacterStream(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getCharacterStream(String)"); + } + + @Override + public T getObject(int parameterIndex, Class type) + throws SQLException { + if (type == ResultSet.class) { + return type.cast(getObject(parameterIndex)); + } + throw new PSQLException(GT.tr("Unsupported type conversion to {1}.", type), + PSQLState.INVALID_PARAMETER_VALUE); + } + + @Override + public T getObject(String parameterName, Class type) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getObject(String, Class)"); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType) throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int)"); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType, int scale) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,int)"); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType, String typeName) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,String)"); + } + + @Override + public URL getURL(int parameterIndex) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getURL(String)"); + } + + @Override + public void setURL(String parameterName, URL val) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setURL(String,URL)"); + } + + @Override + public void setNull(String parameterName, int sqlType) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNull(String,int)"); + } + + @Override + public void setBoolean(String parameterName, boolean x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBoolean(String,boolean)"); + } + + @Override + public void setByte(String parameterName, byte x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setByte(String,byte)"); + } + + @Override + public void setShort(String parameterName, short x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setShort(String,short)"); + } + + @Override + public void setInt(String parameterName, int x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setInt(String,int)"); + } + + @Override + public void setLong(String parameterName, long x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setLong(String,long)"); + } + + @Override + public void setFloat(String parameterName, float x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setFloat(String,float)"); + } + + @Override + public void setDouble(String parameterName, double x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setDouble(String,double)"); + } + + @Override + public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBigDecimal(String,BigDecimal)"); + } + + @Override + public void setString(String parameterName, String x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setString(String,String)"); + } + + @Override + public void setBytes(String parameterName, byte [] x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBytes(String,byte)"); + } + + @Override + public void setDate(String parameterName, Date x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setDate(String,Date)"); + } + + @Override + public void setTime(String parameterName, Time x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setTime(String,Time)"); + } + + @Override + public void setTimestamp(String parameterName, Timestamp x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp)"); + } + + @Override + public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setAsciiStream(String,InputStream,int)"); + } + + @Override + public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setBinaryStream(String,InputStream,int)"); + } + + @Override + public void setObject(String parameterName, Object x, int targetSqlType, int scale) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int,int)"); + } + + @Override + public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int)"); + } + + @Override + public void setObject(String parameterName, Object x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setObject(String,Object)"); + } + + @Override + public void setCharacterStream(String parameterName, Reader reader, int length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setCharacterStream(String,Reader,int)"); + } + + @Override + public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setDate(String,Date,Calendar)"); + } + + @Override + public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setTime(String,Time,Calendar)"); + } + + @Override + public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp,Calendar)"); + } + + @Override + public void setNull(String parameterName, int sqlType, String typeName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNull(String,int,String)"); + } + + @Override + public String getString(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getString(String)"); + } + + @Override + public boolean getBoolean(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getBoolean(String)"); + } + + @Override + public byte getByte(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getByte(String)"); + } + + @Override + public short getShort(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getShort(String)"); + } + + @Override + public int getInt(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getInt(String)"); + } + + @Override + public long getLong(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getLong(String)"); + } + + @Override + public float getFloat(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getFloat(String)"); + } + + @Override + public double getDouble(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getDouble(String)"); + } + + @Override + public byte [] getBytes(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getBytes(String)"); + } + + @Override + public Date getDate(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getDate(String)"); + } + + @Override + public Time getTime(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getTime(String)"); + } + + @Override + public Timestamp getTimestamp(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getTimestamp(String)"); + } + + @Override + public Object getObject(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getObject(String)"); + } + + @Override + public BigDecimal getBigDecimal(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getBigDecimal(String)"); + } + + public Object getObjectImpl(String parameterName, Map> map) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getObject(String,Map)"); + } + + @Override + public Ref getRef(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getRef(String)"); + } + + @Override + public Blob getBlob(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getBlob(String)"); + } + + @Override + public Clob getClob(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getClob(String)"); + } + + @Override + public Array getArray(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getArray(String)"); + } + + @Override + public Date getDate(String parameterName, Calendar cal) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getDate(String,Calendar)"); + } + + @Override + public Time getTime(String parameterName, Calendar cal) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getTime(String,Calendar)"); + } + + @Override + public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getTimestamp(String,Calendar)"); + } + + @Override + public URL getURL(String parameterName) throws SQLException { + throw Driver.notImplemented(this.getClass(), "getURL(String)"); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException { + // ignore scale for now + registerOutParameter(parameterIndex, sqlType); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgClob.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgClob.java new file mode 100644 index 0000000..b4784e6 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgClob.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.core.BaseConnection; +import org.postgresql.largeobject.LargeObject; + +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.Reader; +import java.io.Writer; +import java.nio.charset.Charset; +import java.sql.Clob; +import java.sql.SQLException; + +@SuppressWarnings("try") +public class PgClob extends AbstractBlobClob implements Clob { + + public PgClob(BaseConnection conn, long oid) throws SQLException { + super(conn, oid); + } + + @Override + public Reader getCharacterStream(long pos, long length) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + throw Driver.notImplemented(this.getClass(), "getCharacterStream(long, long)"); + } + } + + @Override + public int setString(long pos, String str) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + throw Driver.notImplemented(this.getClass(), "setString(long,str)"); + } + } + + @Override + public int setString(long pos, String str, int offset, int len) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + throw Driver.notImplemented(this.getClass(), "setString(long,String,int,int)"); + } + } + + @Override + public OutputStream setAsciiStream(long pos) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + throw Driver.notImplemented(this.getClass(), "setAsciiStream(long)"); + } + } + + @Override + public Writer setCharacterStream(long pos) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + throw Driver.notImplemented(this.getClass(), "setCharacterStream(long)"); + } + } + + @Override + public InputStream getAsciiStream() throws SQLException { + return getBinaryStream(); + } + + @Override + public Reader getCharacterStream() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + Charset connectionCharset = Charset.forName(conn.getEncoding().name()); + return new InputStreamReader(getBinaryStream(), connectionCharset); + } + } + + @Override + public String getSubString(long i, int j) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + assertPosition(i, j); + LargeObject lo = getLo(false); + lo.seek((int) i - 1); + return new String(lo.read(j)); + } + } + + /** + * For now, this is not implemented. + */ + @Override + public long position(String pattern, long start) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + throw Driver.notImplemented(this.getClass(), "position(String,long)"); + } + } + + /** + * This should be simply passing the byte value of the pattern Blob. + */ + @Override + public long position(Clob pattern, long start) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + throw Driver.notImplemented(this.getClass(), "position(Clob,start)"); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnection.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnection.java new file mode 100644 index 0000000..1b9de33 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnection.java @@ -0,0 +1,1964 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.PGNotification; +import org.postgresql.PGProperty; +import org.postgresql.copy.CopyManager; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.BaseStatement; +import org.postgresql.core.CachedQuery; +import org.postgresql.core.ConnectionFactory; +import org.postgresql.core.Encoding; +import org.postgresql.core.Oid; +import org.postgresql.core.Query; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.ReplicationProtocol; +import org.postgresql.core.ResultHandlerBase; +import org.postgresql.core.ServerVersion; +import org.postgresql.core.SqlCommand; +import org.postgresql.core.TransactionState; +import org.postgresql.core.TypeInfo; +import org.postgresql.core.Utils; +import org.postgresql.core.Version; +import org.postgresql.fastpath.Fastpath; +import org.postgresql.geometric.PGbox; +import org.postgresql.geometric.PGcircle; +import org.postgresql.geometric.PGline; +import org.postgresql.geometric.PGlseg; +import org.postgresql.geometric.PGpath; +import org.postgresql.geometric.PGpoint; +import org.postgresql.geometric.PGpolygon; +import org.postgresql.largeobject.LargeObjectManager; +import org.postgresql.replication.PGReplicationConnection; +import org.postgresql.replication.PGReplicationConnectionImpl; +import org.postgresql.util.DriverInfo; +import org.postgresql.util.GT; +import org.postgresql.util.HostSpec; +import org.postgresql.util.LazyCleaner; +import org.postgresql.util.LruCache; +import org.postgresql.util.PGBinaryObject; +import org.postgresql.util.PGInterval; +import org.postgresql.util.PGmoney; +import org.postgresql.util.PGobject; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.xml.DefaultPGXmlFactoryFactory; +import org.postgresql.xml.LegacyInsecurePGXmlFactoryFactory; +import org.postgresql.xml.PGXmlFactoryFactory; + +import java.io.IOException; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.security.Permission; +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.ClientInfoStatus; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLPermission; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.sql.Types; +import java.util.Arrays; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Properties; +import java.util.Set; +import java.util.StringTokenizer; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.Executor; +import java.util.concurrent.locks.Condition; +import java.util.logging.Level; +import java.util.logging.Logger; + +@SuppressWarnings("try") +public class PgConnection implements BaseConnection { + + private static final Logger LOGGER = Logger.getLogger(PgConnection.class.getName()); + private static final Set SUPPORTED_BINARY_OIDS = getSupportedBinaryOids(); + private static final SQLPermission SQL_PERMISSION_ABORT = new SQLPermission("callAbort"); + private static final SQLPermission SQL_PERMISSION_NETWORK_TIMEOUT = new SQLPermission("setNetworkTimeout"); + + private static final MethodHandle SYSTEM_GET_SECURITY_MANAGER; + private static final MethodHandle SECURITY_MANAGER_CHECK_PERMISSION; + + static { + MethodHandle systemGetSecurityManagerHandle = null; + MethodHandle securityManagerCheckPermission = null; + try { + Class securityManagerClass = Class.forName("java.lang.SecurityManager"); + systemGetSecurityManagerHandle = + MethodHandles.lookup().findStatic(System.class, "getSecurityManager", + MethodType.methodType(securityManagerClass)); + securityManagerCheckPermission = + MethodHandles.lookup().findVirtual(securityManagerClass, "checkPermission", + MethodType.methodType(void.class, Permission.class)); + } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) { + } + SYSTEM_GET_SECURITY_MANAGER = systemGetSecurityManagerHandle; + SECURITY_MANAGER_CHECK_PERMISSION = securityManagerCheckPermission; + } + + private enum ReadOnlyBehavior { + ignore, + transaction, + always + } + + private final ResourceLock lock = new ResourceLock(); + private final Condition lockCondition = lock.newCondition(); + + // + // Data initialized on construction: + // + private final Properties clientInfo; + + /* URL we were created via */ + private final String creatingURL; + + private final ReadOnlyBehavior readOnlyBehavior; + + private Throwable openStackTrace; + + /** + * This field keeps finalize action alive, so its .finalize() method is called only + * when the connection itself becomes unreachable. + * Moving .finalize() to a different object allows JVM to release all the other objects + * referenced in PgConnection early. + */ + private final PgConnectionCleaningAction finalizeAction; + private final Object leakHandle = new Object(); + + /* Actual network handler */ + private final QueryExecutor queryExecutor; + + /* Query that runs COMMIT */ + private final Query commitQuery; + /* Query that runs ROLLBACK */ + private final Query rollbackQuery; + + private final CachedQuery setSessionReadOnly; + + private final CachedQuery setSessionNotReadOnly; + + private final TypeInfo typeCache; + + private boolean disableColumnSanitiser; + + // Default statement prepare threshold. + protected int prepareThreshold; + + /** + * Default fetch size for statement. + * + * @see PGProperty#DEFAULT_ROW_FETCH_SIZE + */ + protected int defaultFetchSize; + + // Default forcebinary option. + protected boolean forcebinary; + + /** + * Oids for which binary transfer should be disabled. + */ + private final Set binaryDisabledOids; + + private int rsHoldability = ResultSet.CLOSE_CURSORS_AT_COMMIT; + private int savepointId; + // Connection's autocommit state. + private boolean autoCommit = true; + // Connection's readonly state. + private boolean readOnly; + // Filter out database objects for which the current user has no privileges granted from the DatabaseMetaData + private final boolean hideUnprivilegedObjects ; + // Whether to include error details in logging and exceptions + private final boolean logServerErrorDetail; + // Bind String to UNSPECIFIED or VARCHAR? + private final boolean bindStringAsVarchar; + + // Current warnings; there might be more on queryExecutor too. + private SQLWarning firstWarning; + + /** + * Replication protocol in current version postgresql(10devel) supports a limited number of + * commands. + */ + private final boolean replicationConnection; + + private final LruCache fieldMetadataCache; + + private final String xmlFactoryFactoryClass; + private PGXmlFactoryFactory xmlFactoryFactory; + private final LazyCleaner.Cleanable cleanable; + + final CachedQuery borrowQuery(String sql) throws SQLException { + return queryExecutor.borrowQuery(sql); + } + + final CachedQuery borrowCallableQuery(String sql) throws SQLException { + return queryExecutor.borrowCallableQuery(sql); + } + + private CachedQuery borrowReturningQuery(String sql, String [] columnNames) + throws SQLException { + return queryExecutor.borrowReturningQuery(sql, columnNames); + } + + @Override + public CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, + String... columnNames) + throws SQLException { + return queryExecutor.createQuery(sql, escapeProcessing, isParameterized, columnNames); + } + + void releaseQuery(CachedQuery cachedQuery) { + queryExecutor.releaseQuery(cachedQuery); + } + + @Override + public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) { + queryExecutor.setFlushCacheOnDeallocate(flushCacheOnDeallocate); + LOGGER.log(Level.FINE, " setFlushCacheOnDeallocate = {0}", flushCacheOnDeallocate); + } + + // + // Ctor. + // + @SuppressWarnings("this-escape") + public PgConnection(HostSpec[] hostSpecs, + Properties info, + String url) throws SQLException { + // Print out the driver version number + LOGGER.log(Level.FINE, DriverInfo.DRIVER_FULL_NAME); + + this.creatingURL = url; + + this.readOnlyBehavior = getReadOnlyBehavior(PGProperty.READ_ONLY_MODE.getOrDefault(info)); + + setDefaultFetchSize(PGProperty.DEFAULT_ROW_FETCH_SIZE.getInt(info)); + + setPrepareThreshold(PGProperty.PREPARE_THRESHOLD.getInt(info)); + if (prepareThreshold == -1) { + setForceBinary(true); + } + + // Now make the initial connection and set up local state + this.queryExecutor = ConnectionFactory.openConnection(hostSpecs, info); + + // WARNING for unsupported servers (8.1 and lower are not supported) + if (LOGGER.isLoggable(Level.WARNING) && !haveMinimumServerVersion(ServerVersion.v8_2)) { + LOGGER.log(Level.WARNING, "Unsupported Server Version: {0}", queryExecutor.getServerVersion()); + } + + setSessionReadOnly = createQuery("SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY", false, true); + setSessionNotReadOnly = createQuery("SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE", false, true); + + // Set read-only early if requested + if (PGProperty.READ_ONLY.getBoolean(info)) { + setReadOnly(true); + } + + this.hideUnprivilegedObjects = PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(info); + + // get oids that support binary transfer + Set binaryOids = getBinaryEnabledOids(info); + // get oids that should be disabled from transfer + binaryDisabledOids = getBinaryDisabledOids(info); + // if there are any, remove them from the enabled ones + if (!binaryDisabledOids.isEmpty()) { + binaryOids.removeAll(binaryDisabledOids); + } + + // split for receive and send for better control + Set useBinarySendForOids = new HashSet<>(binaryOids); + + Set useBinaryReceiveForOids = new HashSet<>(binaryOids); + + /* + * Does not pass unit tests because unit tests expect setDate to have millisecond accuracy + * whereas the binary transfer only supports date accuracy. + */ + useBinarySendForOids.remove(Oid.DATE); + + queryExecutor.setBinaryReceiveOids(useBinaryReceiveForOids); + queryExecutor.setBinarySendOids(useBinarySendForOids); + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " types using binary send = {0}", oidsToString(useBinarySendForOids)); + LOGGER.log(Level.FINEST, " types using binary receive = {0}", oidsToString(useBinaryReceiveForOids)); + LOGGER.log(Level.FINEST, " integer date/time = {0}", queryExecutor.getIntegerDateTimes()); + } + + // + // String -> text or unknown? + // + + String stringType = PGProperty.STRING_TYPE.getOrDefault(info); + if (stringType != null) { + if ("unspecified".equalsIgnoreCase(stringType)) { + bindStringAsVarchar = false; + } else if ("varchar".equalsIgnoreCase(stringType)) { + bindStringAsVarchar = true; + } else { + throw new PSQLException( + GT.tr("Unsupported value for stringtype parameter: {0}", stringType), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else { + bindStringAsVarchar = true; + } + + // Initialize timestamp stuff + timestampUtils = new TimestampUtils(!queryExecutor.getIntegerDateTimes(), + new QueryExecutorTimeZoneProvider(queryExecutor)); + + // Initialize common queries. + // isParameterized==true so full parse is performed and the engine knows the query + // is not a compound query with ; inside, so it could use parse/bind/exec messages + commitQuery = createQuery("COMMIT", false, true).query; + rollbackQuery = createQuery("ROLLBACK", false, true).query; + + int unknownLength = PGProperty.UNKNOWN_LENGTH.getInt(info); + + // Initialize object handling + TypeInfo typeCache = createTypeInfo(this, unknownLength); + this.typeCache = typeCache; + initObjectTypes(info); + + if (PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(info)) { + openStackTrace = new Throwable("Connection was created at this point:"); + } + finalizeAction = new PgConnectionCleaningAction(lock, openStackTrace, queryExecutor.getCloseAction()); + this.logServerErrorDetail = PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info); + this.disableColumnSanitiser = PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(info); + + if (haveMinimumServerVersion(ServerVersion.v8_3)) { + typeCache.addCoreType("uuid", Oid.UUID, Types.OTHER, "java.util.UUID", Oid.UUID_ARRAY); + typeCache.addCoreType("xml", Oid.XML, Types.SQLXML, "java.sql.SQLXML", Oid.XML_ARRAY); + } + + this.clientInfo = new Properties(); + if (haveMinimumServerVersion(ServerVersion.v9_0)) { + String appName = PGProperty.APPLICATION_NAME.getOrDefault(info); + if (appName == null) { + appName = ""; + } + this.clientInfo.put("ApplicationName", appName); + } + + fieldMetadataCache = new LruCache<>( + Math.max(0, PGProperty.DATABASE_METADATA_CACHE_FIELDS.getInt(info)), + Math.max(0, PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getInt(info) * 1024L * 1024L), + false); + + replicationConnection = PGProperty.REPLICATION.getOrDefault(info) != null; + + xmlFactoryFactoryClass = PGProperty.XML_FACTORY_FACTORY.getOrDefault(info); + cleanable = LazyCleaner.getInstance().register(leakHandle, finalizeAction); + } + + private static ReadOnlyBehavior getReadOnlyBehavior(String property) { + if (property == null) { + return ReadOnlyBehavior.transaction; + } + try { + return ReadOnlyBehavior.valueOf(property); + } catch (IllegalArgumentException e) { + try { + return ReadOnlyBehavior.valueOf(property.toLowerCase(Locale.US)); + } catch (IllegalArgumentException e2) { + return ReadOnlyBehavior.transaction; + } + } + } + + private static Set getSupportedBinaryOids() { + return new HashSet<>(Arrays.asList( + Oid.BYTEA, + Oid.INT2, + Oid.INT4, + Oid.INT8, + Oid.FLOAT4, + Oid.FLOAT8, + Oid.NUMERIC, + Oid.TIME, + Oid.DATE, + Oid.TIMETZ, + Oid.TIMESTAMP, + Oid.TIMESTAMPTZ, + Oid.BYTEA_ARRAY, + Oid.INT2_ARRAY, + Oid.INT4_ARRAY, + Oid.INT8_ARRAY, + Oid.OID_ARRAY, + Oid.FLOAT4_ARRAY, + Oid.FLOAT8_ARRAY, + Oid.VARCHAR_ARRAY, + Oid.TEXT_ARRAY, + Oid.POINT, + Oid.BOX, + Oid.UUID)); + } + + /** + * Gets all oids for which binary transfer can be enabled. + * + * @param info properties + * @return oids for which binary transfer can be enabled + * @throws PSQLException if any oid is not valid + */ + private static Set getBinaryEnabledOids(Properties info) throws PSQLException { + // check if binary transfer should be enabled for built-in types + boolean binaryTransfer = PGProperty.BINARY_TRANSFER.getBoolean(info); + // get formats that currently have binary protocol support + Set binaryOids = new HashSet<>(32); + if (binaryTransfer) { + binaryOids.addAll(SUPPORTED_BINARY_OIDS); + } + // add all oids which are enabled for binary transfer by the creator of the connection + String oids = PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(info); + if (oids != null) { + binaryOids.addAll(getOidSet(oids)); + } + return binaryOids; + } + + /** + * Gets all oids for which binary transfer should be disabled. + * + * @param info properties + * @return oids for which binary transfer should be disabled + * @throws PSQLException if any oid is not valid + */ + private static Set getBinaryDisabledOids(Properties info) + throws PSQLException { + // check for oids that should explicitly be disabled + String oids = PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(info); + if (oids == null) { + return Collections.emptySet(); + } + return getOidSet(oids); + } + + private static Set getOidSet(String oidList) throws PSQLException { + if (oidList.isEmpty()) { + return Collections.emptySet(); + } + Set oids = new HashSet<>(); + StringTokenizer tokenizer = new StringTokenizer(oidList, ","); + while (tokenizer.hasMoreTokens()) { + String oid = tokenizer.nextToken(); + oids.add(Oid.valueOf(oid)); + } + return oids; + } + + private String oidsToString(Set oids) { + StringBuilder sb = new StringBuilder(); + for (Integer oid : oids) { + sb.append(Oid.toString(oid)); + sb.append(','); + } + if (sb.length() > 0) { + sb.setLength(sb.length() - 1); + } else { + sb.append(" "); + } + return sb.toString(); + } + + private final TimestampUtils timestampUtils; + + @Deprecated + @Override + public TimestampUtils getTimestampUtils() { + return timestampUtils; + } + + /** + * The current type mappings. + */ + protected Map> typemap = new HashMap<>(); + + /** + * Obtain the connection lock and return it. Callers must use try-with-resources to ensure that + * unlock() is performed on the lock. + */ + final ResourceLock obtainLock() { + return lock.obtain(); + } + + /** + * Return the lock condition for this connection. + */ + final Condition lockCondition() { + return lockCondition; + } + + @Override + public Statement createStatement() throws SQLException { + // We now follow the spec and default to TYPE_FORWARD_ONLY. + return createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + return prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + return prepareCall(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + } + + @Override + public Map> getTypeMap() throws SQLException { + checkClosed(); + return typemap; + } + + @Override + public QueryExecutor getQueryExecutor() { + return queryExecutor; + } + + @Override + public ReplicationProtocol getReplicationProtocol() { + return queryExecutor.getReplicationProtocol(); + } + + /** + * This adds a warning to the warning chain. + * + * @param warn warning to add + */ + public void addWarning(SQLWarning warn) { + // Add the warning to the chain + if (firstWarning != null) { + firstWarning.setNextWarning(warn); + } else { + firstWarning = warn; + } + + } + + @Override + public ResultSet execSQLQuery(String s) throws SQLException { + return execSQLQuery(s, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + } + + @Override + public ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency) + throws SQLException { + BaseStatement stat = (BaseStatement) createStatement(resultSetType, resultSetConcurrency); + boolean hasResultSet = stat.executeWithFlags(s, QueryExecutor.QUERY_SUPPRESS_BEGIN); + + while (!hasResultSet && stat.getUpdateCount() != -1) { + hasResultSet = stat.getMoreResults(); + } + + if (!hasResultSet) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + // Transfer warnings to the connection, since the user never + // has a chance to see the statement itself. + SQLWarning warnings = stat.getWarnings(); + if (warnings != null) { + addWarning(warnings); + } + + return stat.getResultSet(); + } + + @Override + public void execSQLUpdate(String s) throws SQLException { + try (BaseStatement stmt = (BaseStatement) createStatement()) { + if (stmt.executeWithFlags(s, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS + | QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("A result was returned when none was expected."), + PSQLState.TOO_MANY_RESULTS); + } + + // Transfer warnings to the connection, since the user never + // has a chance to see the statement itself. + SQLWarning warnings = stmt.getWarnings(); + if (warnings != null) { + addWarning(warnings); + } + } + } + + void execSQLUpdate(CachedQuery query) throws SQLException { + try (BaseStatement stmt = (BaseStatement) createStatement()) { + if (stmt.executeWithFlags(query, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS + | QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("A result was returned when none was expected."), + PSQLState.TOO_MANY_RESULTS); + } + + // Transfer warnings to the connection, since the user never + // has a chance to see the statement itself. + SQLWarning warnings = stmt.getWarnings(); + if (warnings != null) { + addWarning(warnings); + } + } + } + + /** + *

In SQL, a result table can be retrieved through a cursor that is named. The current row of a + * result can be updated or deleted using a positioned update/delete statement that references the + * cursor name.

+ * + *

We do not support positioned update/delete, so this is a no-op.

+ * + * @param cursor the cursor name + * @throws SQLException if a database access error occurs + */ + public void setCursorName(String cursor) throws SQLException { + checkClosed(); + // No-op. + } + + /** + * getCursorName gets the cursor name. + * + * @return the current cursor name + * @throws SQLException if a database access error occurs + */ + public String getCursorName() throws SQLException { + checkClosed(); + return null; + } + + /** + *

We are required to bring back certain information by the DatabaseMetaData class. These + * functions do that.

+ * + *

Method getURL() brings back the URL (good job we saved it)

+ * + * @return the url + * @throws SQLException just in case... + */ + public String getURL() throws SQLException { + return creatingURL; + } + + /** + * Method getUserName() brings back the User Name (again, we saved it). + * + * @return the user name + * @throws SQLException just in case... + */ + public String getUserName() throws SQLException { + return queryExecutor.getUser(); + } + + @SuppressWarnings("deprecation") + @Override + public Fastpath getFastpathAPI() throws SQLException { + checkClosed(); + if (fastpath == null) { + fastpath = new Fastpath(this); + } + return fastpath; + } + + // This holds a reference to the Fastpath API if already open + @SuppressWarnings("deprecation") + private Fastpath fastpath; + + @Override + public LargeObjectManager getLargeObjectAPI() throws SQLException { + checkClosed(); + if (largeobject == null) { + largeobject = new LargeObjectManager(this); + } + return largeobject; + } + + // This holds a reference to the LargeObject API if already open + private LargeObjectManager largeobject; + + /* + * This method is used internally to return an object based around org.postgresql's more unique + * data types. + * + *

It uses an internal HashMap to get the handling class. If the type is not supported, then an + * instance of org.postgresql.util.PGobject is returned. + * + * You can use the getValue() or setValue() methods to handle the returned object. Custom objects + * can have their own methods. + * + * @return PGobject for this type, and set to value + * + * @exception SQLException if value is not correct for this type + */ + @Override + public Object getObject(String type, String value, byte [] byteValue) + throws SQLException { + if (typemap != null) { + Class c = typemap.get(type); + if (c != null) { + // Handle the type (requires SQLInput & SQLOutput classes to be implemented) + throw new PSQLException(GT.tr("Custom type maps are not supported."), + PSQLState.NOT_IMPLEMENTED); + } + } + + PGobject obj = null; + + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, "Constructing object from type={0} value=<{1}>", new Object[]{type, value}); + } + + try { + Class klass = typeCache.getPGobject(type); + + // If className is not null, then try to instantiate it, + // It must be basetype PGobject + + // This is used to implement the org.postgresql unique types (like lseg, + // point, etc). + + if (klass != null) { + obj = klass.getDeclaredConstructor().newInstance(); + obj.setType(type); + if (byteValue != null && obj instanceof PGBinaryObject) { + PGBinaryObject binObj = (PGBinaryObject) obj; + binObj.setByteValue(byteValue, 0); + } else { + obj.setValue(value); + } + } else { + // If className is null, then the type is unknown. + // so return a PGobject with the type set, and the value set + obj = new PGobject(); + obj.setType(type); + obj.setValue(value); + } + + return obj; + } catch (SQLException sx) { + // rethrow the exception. Done because we capture any others next + throw sx; + } catch (Exception ex) { + throw new PSQLException(GT.tr("Failed to create object for: {0}.", type), + PSQLState.CONNECTION_FAILURE, ex); + } + } + + protected TypeInfo createTypeInfo(BaseConnection conn, int unknownLength) { + return new TypeInfoCache(conn, unknownLength); + } + + @Override + public TypeInfo getTypeInfo() { + return typeCache; + } + + @Deprecated + @Override + public void addDataType(String type, String name) { + try { + addDataType(type, Class.forName(name).asSubclass(PGobject.class)); + } catch (Exception e) { + throw new RuntimeException("Cannot register new type " + type, e); + } + } + + @Override + public void addDataType(String type, Class klass) throws SQLException { + checkClosed(); + // first add the data type to the type cache + typeCache.addDataType(type, klass); + // then check if this type supports binary transfer + if (PGBinaryObject.class.isAssignableFrom(klass) && getPreferQueryMode() != PreferQueryMode.SIMPLE) { + // try to get an oid for this type (will return 0 if the type does not exist in the database) + int oid = typeCache.getPGType(type); + // check if oid is there and if it is not disabled for binary transfer + if (oid > 0 && !binaryDisabledOids.contains(oid)) { + // allow using binary transfer for receiving and sending of this type + queryExecutor.addBinaryReceiveOid(oid); + queryExecutor.addBinarySendOid(oid); + } + } + } + + // This initialises the objectTypes hash map + private void initObjectTypes(Properties info) throws SQLException { + // Add in the types that come packaged with the driver. + // These can be overridden later if desired. + addDataType("box", PGbox.class); + addDataType("circle", PGcircle.class); + addDataType("line", PGline.class); + addDataType("lseg", PGlseg.class); + addDataType("path", PGpath.class); + addDataType("point", PGpoint.class); + addDataType("polygon", PGpolygon.class); + addDataType("money", PGmoney.class); + addDataType("interval", PGInterval.class); + + Enumeration e = info.propertyNames(); + while (e.hasMoreElements()) { + String propertyName = (String) e.nextElement(); + if (propertyName != null && propertyName.startsWith("datatype.")) { + String typeName = propertyName.substring(9); + String className = info.getProperty(propertyName); + Class klass; + + try { + klass = Class.forName(className); + } catch (ClassNotFoundException cnfe) { + throw new PSQLException( + GT.tr("Unable to load the class {0} responsible for the datatype {1}", + className, typeName), + PSQLState.SYSTEM_ERROR, cnfe); + } + + addDataType(typeName, klass.asSubclass(PGobject.class)); + } + } + } + + /** + * Note: even though {@code Statement} is automatically closed when it is garbage + * collected, it is better to close it explicitly to lower resource consumption. + * The spec says that calling close on a closed connection is a no-op. + * {@inheritDoc} + */ + @Override + public void close() throws SQLException { + if (queryExecutor == null) { + // This might happen in case constructor throws an exception (e.g. host being not available). + // When that happens the connection is still registered in the finalizer queue, so it gets finalized + return; + } + openStackTrace = null; + try { + cleanable.clean(); + } catch (IOException e) { + throw new PSQLException( + GT.tr("Unable to close connection properly"), + PSQLState.UNKNOWN_STATE, e); + } + } + + @Override + public String nativeSQL(String sql) throws SQLException { + checkClosed(); + CachedQuery cachedQuery = queryExecutor.createQuery(sql, false, true); + + return cachedQuery.query.getNativeSql(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + SQLWarning newWarnings = queryExecutor.getWarnings(); // NB: also clears them. + if (firstWarning == null) { + firstWarning = newWarnings; + } else if (newWarnings != null) { + firstWarning.setNextWarning(newWarnings); // Chain them on. + } + + return firstWarning; + } + } + + @Override + public void clearWarnings() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + queryExecutor.getWarnings(); // Clear and discard. + firstWarning = null; + } + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + checkClosed(); + if (queryExecutor.getTransactionState() != TransactionState.IDLE) { + throw new PSQLException( + GT.tr("Cannot change transaction read-only property in the middle of a transaction."), + PSQLState.ACTIVE_SQL_TRANSACTION); + } + + if (readOnly != this.readOnly && autoCommit && this.readOnlyBehavior == ReadOnlyBehavior.always) { + execSQLUpdate(readOnly ? setSessionReadOnly : setSessionNotReadOnly); + } + + this.readOnly = readOnly; + LOGGER.log(Level.FINE, " setReadOnly = {0}", readOnly); + } + + @Override + public boolean isReadOnly() throws SQLException { + checkClosed(); + return readOnly; + } + + @Override + public boolean hintReadOnly() { + return readOnly && readOnlyBehavior != ReadOnlyBehavior.ignore; + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + checkClosed(); + + if (this.autoCommit == autoCommit) { + return; + } + + if (!this.autoCommit) { + commit(); + } + + // if the connection is read only, we need to make sure session settings are + // correct when autocommit status changed + if (this.readOnly && readOnlyBehavior == ReadOnlyBehavior.always) { + // if we are turning on autocommit, we need to set session + // to read only + if (autoCommit) { + this.autoCommit = true; + execSQLUpdate(setSessionReadOnly); + } else { + // if we are turning auto commit off, we need to + // disable session + execSQLUpdate(setSessionNotReadOnly); + } + } + + this.autoCommit = autoCommit; + LOGGER.log(Level.FINE, " setAutoCommit = {0}", autoCommit); + } + + @Override + public boolean getAutoCommit() throws SQLException { + checkClosed(); + return this.autoCommit; + } + + private void executeTransactionCommand(Query query) throws SQLException { + int flags = QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS + | QueryExecutor.QUERY_SUPPRESS_BEGIN; + if (prepareThreshold == 0) { + flags |= QueryExecutor.QUERY_ONESHOT; + } + + try { + getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags); + } catch (SQLException e) { + // Don't retry composite queries as it might get partially executed + if (query.getSubqueries() != null || !queryExecutor.willHealOnRetry(e)) { + throw e; + } + query.close(); + // retry + getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags); + } + } + + @Override + public void commit() throws SQLException { + checkClosed(); + + if (autoCommit) { + throw new PSQLException(GT.tr("Cannot commit when autoCommit is enabled."), + PSQLState.NO_ACTIVE_SQL_TRANSACTION); + } + + if (queryExecutor.getTransactionState() != TransactionState.IDLE) { + executeTransactionCommand(commitQuery); + } + } + + protected void checkClosed() throws SQLException { + if (isClosed()) { + throw new PSQLException(GT.tr("This connection has been closed."), + PSQLState.CONNECTION_DOES_NOT_EXIST); + } + } + + @Override + public void rollback() throws SQLException { + checkClosed(); + + if (autoCommit) { + throw new PSQLException(GT.tr("Cannot rollback when autoCommit is enabled."), + PSQLState.NO_ACTIVE_SQL_TRANSACTION); + } + + if (queryExecutor.getTransactionState() != TransactionState.IDLE) { + executeTransactionCommand(rollbackQuery); + } else { + // just log for debugging + LOGGER.log(Level.FINE, "Rollback requested but no transaction in progress"); + } + } + + @Override + public TransactionState getTransactionState() { + return queryExecutor.getTransactionState(); + } + + @Override + public int getTransactionIsolation() throws SQLException { + checkClosed(); + + String level = null; + final ResultSet rs = execSQLQuery("SHOW TRANSACTION ISOLATION LEVEL"); // nb: no BEGIN triggered + if (rs.next()) { + level = rs.getString(1); + } + rs.close(); + + // TODO revisit: throw exception instead of silently eating the error in unknown cases? + if (level == null) { + return Connection.TRANSACTION_READ_COMMITTED; // Best guess. + } + + level = level.toUpperCase(Locale.US); + if ("READ COMMITTED".equals(level)) { + return Connection.TRANSACTION_READ_COMMITTED; + } + if ("READ UNCOMMITTED".equals(level)) { + return Connection.TRANSACTION_READ_UNCOMMITTED; + } + if ("REPEATABLE READ".equals(level)) { + return Connection.TRANSACTION_REPEATABLE_READ; + } + if ("SERIALIZABLE".equals(level)) { + return Connection.TRANSACTION_SERIALIZABLE; + } + + return Connection.TRANSACTION_READ_COMMITTED; // Best guess. + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + checkClosed(); + + if (queryExecutor.getTransactionState() != TransactionState.IDLE) { + throw new PSQLException( + GT.tr("Cannot change transaction isolation level in the middle of a transaction."), + PSQLState.ACTIVE_SQL_TRANSACTION); + } + + String isolationLevelName = getIsolationLevelName(level); + if (isolationLevelName == null) { + throw new PSQLException(GT.tr("Transaction isolation level {0} not supported.", level), + PSQLState.NOT_IMPLEMENTED); + } + + String isolationLevelSQL = + "SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL " + isolationLevelName; + execSQLUpdate(isolationLevelSQL); // nb: no BEGIN triggered + LOGGER.log(Level.FINE, " setTransactionIsolation = {0}", isolationLevelName); + } + + protected String getIsolationLevelName(int level) { + switch (level) { + case Connection.TRANSACTION_READ_COMMITTED: + return "READ COMMITTED"; + case Connection.TRANSACTION_SERIALIZABLE: + return "SERIALIZABLE"; + case Connection.TRANSACTION_READ_UNCOMMITTED: + return "READ UNCOMMITTED"; + case Connection.TRANSACTION_REPEATABLE_READ: + return "REPEATABLE READ"; + default: + return null; + } + } + + @Override + public void setCatalog(String catalog) throws SQLException { + checkClosed(); + // no-op + } + + @Override + public String getCatalog() throws SQLException { + checkClosed(); + return queryExecutor.getDatabase(); + } + + public boolean getHideUnprivilegedObjects() { + return hideUnprivilegedObjects; + } + + /** + * Get server version number. + * + * @return server version number + */ + public String getDBVersionNumber() { + return queryExecutor.getServerVersion(); + } + + /** + * Get server major version. + * + * @return server major version + */ + public int getServerMajorVersion() { + try { + StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd + return integerPart(versionTokens.nextToken()); // return X + } catch (NoSuchElementException e) { + return 0; + } + } + + /** + * Get server minor version. + * + * @return server minor version + */ + public int getServerMinorVersion() { + try { + StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd + versionTokens.nextToken(); // Skip aaXbb + return integerPart(versionTokens.nextToken()); // return Y + } catch (NoSuchElementException e) { + return 0; + } + } + + @Override + public boolean haveMinimumServerVersion(int ver) { + return queryExecutor.getServerVersionNum() >= ver; + } + + @Override + public boolean haveMinimumServerVersion(Version ver) { + return haveMinimumServerVersion(ver.getVersionNum()); + } + + @Override + public Encoding getEncoding() { + return queryExecutor.getEncoding(); + } + + @Override + public byte[] encodeString(String str) throws SQLException { + try { + return getEncoding().encode(str); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."), + PSQLState.DATA_ERROR, ioe); + } + } + + @Override + public String escapeString(String str) throws SQLException { + return Utils.escapeLiteral(null, str, queryExecutor.getStandardConformingStrings()) + .toString(); + } + + @Override + public boolean getStandardConformingStrings() { + return queryExecutor.getStandardConformingStrings(); + } + + // This is a cache of the DatabaseMetaData instance for this connection + protected DatabaseMetaData metadata; + + @Override + public boolean isClosed() throws SQLException { + return queryExecutor.isClosed(); + } + + @Override + public void cancelQuery() throws SQLException { + checkClosed(); + queryExecutor.sendQueryCancel(); + } + + @Override + public PGNotification[] getNotifications() throws SQLException { + return getNotifications(-1); + } + + @Override + public PGNotification[] getNotifications(int timeoutMillis) throws SQLException { + checkClosed(); + getQueryExecutor().processNotifies(timeoutMillis); + // Backwards-compatibility hand-holding. + PGNotification[] notifications = queryExecutor.getNotifications(); + return notifications; + } + + /** + * Handler for transaction queries. + */ + private class TransactionCommandHandler extends ResultHandlerBase { + @Override + public void handleCompletion() throws SQLException { + SQLWarning warning = getWarning(); + if (warning != null) { + PgConnection.this.addWarning(warning); + } + super.handleCompletion(); + } + } + + @Override + public int getPrepareThreshold() { + return prepareThreshold; + } + + @Override + public void setDefaultFetchSize(int fetchSize) throws SQLException { + if (fetchSize < 0) { + throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."), + PSQLState.INVALID_PARAMETER_VALUE); + } + + this.defaultFetchSize = fetchSize; + LOGGER.log(Level.FINE, " setDefaultFetchSize = {0}", fetchSize); + } + + @Override + public int getDefaultFetchSize() { + return defaultFetchSize; + } + + @Override + public void setPrepareThreshold(int newThreshold) { + this.prepareThreshold = newThreshold; + LOGGER.log(Level.FINE, " setPrepareThreshold = {0}", newThreshold); + } + + public boolean getForceBinary() { + return forcebinary; + } + + public void setForceBinary(boolean newValue) { + this.forcebinary = newValue; + LOGGER.log(Level.FINE, " setForceBinary = {0}", newValue); + } + + public void setTypeMapImpl(Map> map) throws SQLException { + typemap = map; + } + + @Override + public Logger getLogger() { + return LOGGER; + } + + public int getProtocolVersion() { + return queryExecutor.getProtocolVersion(); + } + + @Override + public boolean getStringVarcharFlag() { + return bindStringAsVarchar; + } + + private CopyManager copyManager; + + @Override + public CopyManager getCopyAPI() throws SQLException { + checkClosed(); + if (copyManager == null) { + copyManager = new CopyManager(this); + } + return copyManager; + } + + @Override + public boolean binaryTransferSend(int oid) { + return queryExecutor.useBinaryForSend(oid); + } + + @Override + public int getBackendPID() { + return queryExecutor.getBackendPID(); + } + + @Override + public boolean isColumnSanitiserDisabled() { + return this.disableColumnSanitiser; + } + + public void setDisableColumnSanitiser(boolean disableColumnSanitiser) { + this.disableColumnSanitiser = disableColumnSanitiser; + LOGGER.log(Level.FINE, " setDisableColumnSanitiser = {0}", disableColumnSanitiser); + } + + @Override + public PreferQueryMode getPreferQueryMode() { + return queryExecutor.getPreferQueryMode(); + } + + @Override + public AutoSave getAutosave() { + return queryExecutor.getAutoSave(); + } + + @Override + public void setAutosave(AutoSave autoSave) { + queryExecutor.setAutoSave(autoSave); + LOGGER.log(Level.FINE, " setAutosave = {0}", autoSave.value()); + } + + protected void abort() { + queryExecutor.abort(); + } + + private Timer getTimer() { + return finalizeAction.getTimer(); + } + + @Override + public void addTimerTask(TimerTask timerTask, long milliSeconds) { + Timer timer = getTimer(); + timer.schedule(timerTask, milliSeconds); + } + + @Override + public void purgeTimerTasks() { + finalizeAction.purgeTimerTasks(); + } + + @Override + public String escapeIdentifier(String identifier) throws SQLException { + return Utils.escapeIdentifier(null, identifier).toString(); + } + + @Override + public String escapeLiteral(String literal) throws SQLException { + return Utils.escapeLiteral(null, literal, queryExecutor.getStandardConformingStrings()) + .toString(); + } + + @Override + public LruCache getFieldMetadataCache() { + return fieldMetadataCache; + } + + @Override + public PGReplicationConnection getReplicationAPI() { + return new PGReplicationConnectionImpl(this); + } + + // Parse a "dirty" integer surrounded by non-numeric characters + private static int integerPart(String dirtyString) { + int start = 0; + + while (start < dirtyString.length() && !Character.isDigit(dirtyString.charAt(start))) { + ++start; + } + + int end = start; + while (end < dirtyString.length() && Character.isDigit(dirtyString.charAt(end))) { + ++end; + } + + if (start == end) { + return 0; + } + + return Integer.parseInt(dirtyString.substring(start, end)); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + checkClosed(); + return new PgStatement(this, resultSetType, resultSetConcurrency, resultSetHoldability); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + checkClosed(); + return new PgPreparedStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + checkClosed(); + return new PgCallableStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability); + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + checkClosed(); + if (metadata == null) { + metadata = new PgDatabaseMetaData(this); + } + return metadata; + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + setTypeMapImpl(map); + LOGGER.log(Level.FINE, " setTypeMap = {0}", map); + } + + protected Array makeArray(int oid, String fieldString) throws SQLException { + return new PgArray(this, oid, fieldString); + } + + protected Blob makeBlob(long oid) throws SQLException { + return new PgBlob(this, oid); + } + + protected Clob makeClob(long oid) throws SQLException { + return new PgClob(this, oid); + } + + protected SQLXML makeSQLXML() throws SQLException { + return new PgSQLXML(this); + } + + @Override + public Clob createClob() throws SQLException { + checkClosed(); + throw Driver.notImplemented(this.getClass(), "createClob()"); + } + + @Override + public Blob createBlob() throws SQLException { + checkClosed(); + throw Driver.notImplemented(this.getClass(), "createBlob()"); + } + + @Override + public NClob createNClob() throws SQLException { + checkClosed(); + throw Driver.notImplemented(this.getClass(), "createNClob()"); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + checkClosed(); + return makeSQLXML(); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + checkClosed(); + throw Driver.notImplemented(this.getClass(), "createStruct(String, Object[])"); + } + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public Array createArrayOf(String typeName, Object elements) throws SQLException { + checkClosed(); + + final TypeInfo typeInfo = getTypeInfo(); + + final int oid = typeInfo.getPGArrayType(typeName); + final char delim = typeInfo.getArrayDelimiter(oid); + + if (oid == Oid.UNSPECIFIED) { + throw new PSQLException(GT.tr("Unable to find server array type for provided name {0}.", typeName), + PSQLState.INVALID_NAME); + } + + if (elements == null) { + return makeArray(oid, null); + } + + final ArrayEncoding.ArrayEncoder arraySupport = ArrayEncoding.getArrayEncoder(elements); + if (arraySupport.supportBinaryRepresentation(oid) && getPreferQueryMode() != PreferQueryMode.SIMPLE) { + return new PgArray(this, oid, arraySupport.toBinaryRepresentation(this, elements, oid)); + } + + final String arrayString = arraySupport.toArrayString(delim, elements); + return makeArray(oid, arrayString); + } + + @Override + public Array createArrayOf(String typeName, Object [] elements) + throws SQLException { + return createArrayOf(typeName, (Object) elements); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + if (timeout < 0) { + throw new PSQLException(GT.tr("Invalid timeout ({0}<0).", timeout), + PSQLState.INVALID_PARAMETER_VALUE); + } + if (isClosed()) { + return false; + } + boolean changedNetworkTimeout = false; + try { + int oldNetworkTimeout = getNetworkTimeout(); + int newNetworkTimeout = (int) Math.min(timeout * 1000L, Integer.MAX_VALUE); + try { + // change network timeout only if the new value is less than the current + // (zero means infinite timeout) + if (newNetworkTimeout != 0 && (oldNetworkTimeout == 0 || newNetworkTimeout < oldNetworkTimeout)) { + changedNetworkTimeout = true; + setNetworkTimeout(null, newNetworkTimeout); + } + if (replicationConnection) { + try (Statement statement = createStatement()) { + statement.execute("IDENTIFY_SYSTEM"); + } + } else { + try (Statement checkConnectionQuery = createStatement()) { + ((PgStatement)checkConnectionQuery).execute("", QueryExecutor.QUERY_EXECUTE_AS_SIMPLE); + } + } + return true; + } finally { + if (changedNetworkTimeout) { + setNetworkTimeout(null, oldNetworkTimeout); + } + } + } catch (SQLException e) { + if (PSQLState.IN_FAILED_SQL_TRANSACTION.getState().equals(e.getSQLState())) { + // "current transaction aborted", assume the connection is up and running + return true; + } + LOGGER.log(Level.FINE, GT.tr("Validating connection."), e); + } + return false; + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + try { + checkClosed(); + } catch (final SQLException cause) { + Map failures = new HashMap<>(); + failures.put(name, ClientInfoStatus.REASON_UNKNOWN); + throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause); + } + + if (haveMinimumServerVersion(ServerVersion.v9_0) && "ApplicationName".equals(name)) { + if (value == null) { + value = ""; + } + final String oldValue = queryExecutor.getApplicationName(); + if (value.equals(oldValue)) { + return; + } + + try { + StringBuilder sql = new StringBuilder("SET application_name = '"); + Utils.escapeLiteral(sql, value, getStandardConformingStrings()); + sql.append("'"); + execSQLUpdate(sql.toString()); + } catch (SQLException sqle) { + Map failures = new HashMap<>(); + failures.put(name, ClientInfoStatus.REASON_UNKNOWN); + throw new SQLClientInfoException( + GT.tr("Failed to set ClientInfo property: {0}", "ApplicationName"), sqle.getSQLState(), + failures, sqle); + } + if (LOGGER.isLoggable(Level.FINE)) { + LOGGER.log(Level.FINE, " setClientInfo = {0} {1}", new Object[]{name, value}); + } + clientInfo.put(name, value); + return; + } + + addWarning(new SQLWarning(GT.tr("ClientInfo property not supported."), + PSQLState.NOT_IMPLEMENTED.getState())); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + try { + checkClosed(); + } catch (final SQLException cause) { + Map failures = new HashMap<>(); + for (Map.Entry e : properties.entrySet()) { + failures.put((String) e.getKey(), ClientInfoStatus.REASON_UNKNOWN); + } + throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause); + } + + Map failures = new HashMap<>(); + for (String name : new String[]{"ApplicationName"}) { + try { + setClientInfo(name, properties.getProperty(name, null)); + } catch (SQLClientInfoException e) { + failures.putAll(e.getFailedProperties()); + } + } + + if (!failures.isEmpty()) { + throw new SQLClientInfoException(GT.tr("One or more ClientInfo failed."), + PSQLState.NOT_IMPLEMENTED.getState(), failures); + } + } + + @Override + public String getClientInfo(String name) throws SQLException { + checkClosed(); + clientInfo.put("ApplicationName", queryExecutor.getApplicationName()); + return clientInfo.getProperty(name); + } + + @Override + public Properties getClientInfo() throws SQLException { + checkClosed(); + clientInfo.put("ApplicationName", queryExecutor.getApplicationName()); + return clientInfo; + } + + public T createQueryObject(Class ifc) throws SQLException { + checkClosed(); + throw Driver.notImplemented(this.getClass(), "createQueryObject(Class)"); + } + + @Override + public boolean getLogServerErrorDetail() { + return logServerErrorDetail; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + checkClosed(); + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + checkClosed(); + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } + + @Override + public String getSchema() throws SQLException { + checkClosed(); + try (Statement stmt = createStatement()) { + try (ResultSet rs = stmt.executeQuery("select current_schema()")) { + if (!rs.next()) { + return null; // Is it ever possible? + } + return rs.getString(1); + } + } + } + + @Override + public void setSchema(String schema) throws SQLException { + checkClosed(); + try (Statement stmt = createStatement()) { + if (schema == null) { + stmt.executeUpdate("SET SESSION search_path TO DEFAULT"); + } else { + StringBuilder sb = new StringBuilder(); + sb.append("SET SESSION search_path TO '"); + Utils.escapeLiteral(sb, schema, getStandardConformingStrings()); + sb.append("'"); + stmt.executeUpdate(sb.toString()); + LOGGER.log(Level.FINE, " setSchema = {0}", schema); + } + } + } + + public class AbortCommand implements Runnable { + + public AbortCommand() { + } + + @Override + public void run() { + abort(); + } + } + + @Override + public void abort(Executor executor) throws SQLException { + if (executor == null) { + throw new SQLException("executor is null"); + } + if (isClosed()) { + return; + } + + SQL_PERMISSION_ABORT.checkGuard(this); + + AbortCommand command = new AbortCommand(); + executor.execute(command); + } + + @Override + public void setNetworkTimeout(Executor executor /*not used*/, int milliseconds) + throws SQLException { + checkClosed(); + + if (milliseconds < 0) { + throw new PSQLException(GT.tr("Network timeout must be a value greater than or equal to 0."), + PSQLState.INVALID_PARAMETER_VALUE); + } + + checkPermission(SQL_PERMISSION_NETWORK_TIMEOUT); + + try { + queryExecutor.setNetworkTimeout(milliseconds); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Unable to set network timeout."), + PSQLState.COMMUNICATION_ERROR, ioe); + } + } + + private void checkPermission(SQLPermission sqlPermissionNetworkTimeout) { + if (SYSTEM_GET_SECURITY_MANAGER != null && SECURITY_MANAGER_CHECK_PERMISSION != null) { + try { + Object securityManager = SYSTEM_GET_SECURITY_MANAGER.invoke(); + if (securityManager != null) { + SECURITY_MANAGER_CHECK_PERMISSION.invoke(securityManager, sqlPermissionNetworkTimeout); + } + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + } + + @Override + public int getNetworkTimeout() throws SQLException { + checkClosed(); + + try { + return queryExecutor.getNetworkTimeout(); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Unable to get network timeout."), + PSQLState.COMMUNICATION_ERROR, ioe); + } + } + + @Override + public void setHoldability(int holdability) throws SQLException { + checkClosed(); + + switch (holdability) { + case ResultSet.CLOSE_CURSORS_AT_COMMIT: + case ResultSet.HOLD_CURSORS_OVER_COMMIT: + rsHoldability = holdability; + break; + default: + throw new PSQLException(GT.tr("Unknown ResultSet holdability setting: {0}.", holdability), + PSQLState.INVALID_PARAMETER_VALUE); + } + LOGGER.log(Level.FINE, " setHoldability = {0}", holdability); + } + + @Override + public int getHoldability() throws SQLException { + checkClosed(); + return rsHoldability; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + checkClosed(); + + String pgName; + if (getAutoCommit()) { + throw new PSQLException(GT.tr("Cannot establish a savepoint in auto-commit mode."), + PSQLState.NO_ACTIVE_SQL_TRANSACTION); + } + + PSQLSavepoint savepoint = new PSQLSavepoint(savepointId++); + pgName = savepoint.getPGName(); + + // Note we can't use execSQLUpdate because we don't want + // to suppress BEGIN. + Statement stmt = createStatement(); + stmt.executeUpdate("SAVEPOINT " + pgName); + stmt.close(); + + return savepoint; + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + checkClosed(); + + if (getAutoCommit()) { + throw new PSQLException(GT.tr("Cannot establish a savepoint in auto-commit mode."), + PSQLState.NO_ACTIVE_SQL_TRANSACTION); + } + + PSQLSavepoint savepoint = new PSQLSavepoint(name); + + // Note we can't use execSQLUpdate because we don't want + // to suppress BEGIN. + Statement stmt = createStatement(); + stmt.executeUpdate("SAVEPOINT " + savepoint.getPGName()); + stmt.close(); + + return savepoint; + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + checkClosed(); + + PSQLSavepoint pgSavepoint = (PSQLSavepoint) savepoint; + execSQLUpdate("ROLLBACK TO SAVEPOINT " + pgSavepoint.getPGName()); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + checkClosed(); + + PSQLSavepoint pgSavepoint = (PSQLSavepoint) savepoint; + execSQLUpdate("RELEASE SAVEPOINT " + pgSavepoint.getPGName()); + pgSavepoint.invalidate(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + checkClosed(); + return createStatement(resultSetType, resultSetConcurrency, getHoldability()); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + checkClosed(); + return prepareStatement(sql, resultSetType, resultSetConcurrency, getHoldability()); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + checkClosed(); + return prepareCall(sql, resultSetType, resultSetConcurrency, getHoldability()); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + if (autoGeneratedKeys != Statement.RETURN_GENERATED_KEYS) { + return prepareStatement(sql); + } + + return prepareStatement(sql, (String[]) null); + } + + @Override + public PreparedStatement prepareStatement(String sql, int [] columnIndexes) throws SQLException { + if (columnIndexes != null && columnIndexes.length == 0) { + return prepareStatement(sql); + } + + checkClosed(); + throw new PSQLException(GT.tr("Returning autogenerated keys is not supported."), + PSQLState.NOT_IMPLEMENTED); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + if (columnNames != null && columnNames.length == 0) { + return prepareStatement(sql); + } + + CachedQuery cachedQuery = borrowReturningQuery(sql, columnNames); + PgPreparedStatement ps = + new PgPreparedStatement(this, cachedQuery, + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.CONCUR_READ_ONLY, + getHoldability()); + Query query = cachedQuery.query; + SqlCommand sqlCommand = query.getSqlCommand(); + if (sqlCommand != null) { + ps.wantsGeneratedKeysAlways = sqlCommand.isReturningKeywordPresent(); + } else { + // If composite query is given, just ignore "generated keys" arguments + } + return ps; + } + + @Override + public final Map getParameterStatuses() { + return queryExecutor.getParameterStatuses(); + } + + @Override + public final String getParameterStatus(String parameterName) { + return queryExecutor.getParameterStatus(parameterName); + } + + @Override + public boolean getAdaptiveFetch() { + return queryExecutor.getAdaptiveFetch(); + } + + @Override + public void setAdaptiveFetch(boolean adaptiveFetch) { + queryExecutor.setAdaptiveFetch(adaptiveFetch); + } + + @Override + public PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException { + PGXmlFactoryFactory xmlFactoryFactory = this.xmlFactoryFactory; + if (xmlFactoryFactory != null) { + return xmlFactoryFactory; + } + if (xmlFactoryFactoryClass == null || "".equals(xmlFactoryFactoryClass)) { + xmlFactoryFactory = DefaultPGXmlFactoryFactory.INSTANCE; + } else if ("LEGACY_INSECURE".equals(xmlFactoryFactoryClass)) { + xmlFactoryFactory = LegacyInsecurePGXmlFactoryFactory.INSTANCE; + } else { + Class clazz; + try { + clazz = Class.forName(xmlFactoryFactoryClass); + } catch (ClassNotFoundException ex) { + throw new PSQLException( + GT.tr("Could not instantiate xmlFactoryFactory: {0}", xmlFactoryFactoryClass), + PSQLState.INVALID_PARAMETER_VALUE, ex); + } + if (!clazz.isAssignableFrom(PGXmlFactoryFactory.class)) { + throw new PSQLException( + GT.tr("Connection property xmlFactoryFactory must implement PGXmlFactoryFactory: {0}", xmlFactoryFactoryClass), + PSQLState.INVALID_PARAMETER_VALUE); + } + try { + xmlFactoryFactory = clazz.asSubclass(PGXmlFactoryFactory.class) + .getDeclaredConstructor() + .newInstance(); + } catch (Exception ex) { + throw new PSQLException( + GT.tr("Could not instantiate xmlFactoryFactory: {0}", xmlFactoryFactoryClass), + PSQLState.INVALID_PARAMETER_VALUE, ex); + } + } + this.xmlFactoryFactory = xmlFactoryFactory; + return xmlFactoryFactory; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnectionCleaningAction.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnectionCleaningAction.java new file mode 100644 index 0000000..7ac5e42 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnectionCleaningAction.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.util.GT; +import org.postgresql.util.LazyCleaner; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Timer; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * This class segregates the minimal resources required for proper cleanup in case + * the connection has not been closed by the user code. + *

For now, it has two actions:

+ *
    + *
  • Print stacktrace when the connection has been created, so users can identify the leak
  • + *
  • Release shared timer registration
  • + *
+ */ +@SuppressWarnings("try") +class PgConnectionCleaningAction implements LazyCleaner.CleaningAction { + private static final Logger LOGGER = Logger.getLogger(PgConnection.class.getName()); + + private final ResourceLock lock; + + private Throwable openStackTrace; + private final Closeable queryExecutorCloseAction; + + /** + * Timer for scheduling TimerTasks for the connection. + * Only instantiated if a task is actually scheduled. + * Access should be guarded with {@link #lock} + */ + private Timer cancelTimer; + + PgConnectionCleaningAction( + ResourceLock lock, + Throwable openStackTrace, + Closeable queryExecutorCloseAction) { + this.lock = lock; + this.openStackTrace = openStackTrace; + this.queryExecutorCloseAction = queryExecutorCloseAction; + } + + public Timer getTimer() { + try (ResourceLock ignore = lock.obtain()) { + Timer cancelTimer = this.cancelTimer; + if (cancelTimer == null) { + cancelTimer = Driver.getSharedTimer().getTimer(); + this.cancelTimer = cancelTimer; + } + return cancelTimer; + } + } + + public void releaseTimer() { + try (ResourceLock ignore = lock.obtain()) { + if (cancelTimer != null) { + cancelTimer = null; + Driver.getSharedTimer().releaseTimer(); + } + } + } + + public void purgeTimerTasks() { + try (ResourceLock ignore = lock.obtain()) { + Timer timer = cancelTimer; + if (timer != null) { + timer.purge(); + } + } + } + + @Override + public void onClean(boolean leak) throws IOException { + if (leak && openStackTrace != null) { + LOGGER.log(Level.WARNING, GT.tr("Leak detected: Connection.close() was not called"), openStackTrace); + } + openStackTrace = null; + releaseTimer(); + queryExecutorCloseAction.close(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java new file mode 100644 index 0000000..1184b79 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java @@ -0,0 +1,3344 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.core.BaseStatement; +import org.postgresql.core.Field; +import org.postgresql.core.Oid; +import org.postgresql.core.ServerVersion; +import org.postgresql.core.Tuple; +import org.postgresql.core.TypeInfo; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.DriverInfo; +import org.postgresql.util.GT; +import org.postgresql.util.JdbcBlackHole; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.math.BigInteger; +import java.sql.Array; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.RowIdLifetime; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.StringTokenizer; + +public class PgDatabaseMetaData implements DatabaseMetaData { + + public PgDatabaseMetaData(PgConnection conn) { + this.connection = conn; + } + + private String keywords; + + protected final PgConnection connection; // The connection association + + private int nameDataLength; // length for name datatype + private int indexMaxKeys; // maximum number of keys in an index. + + protected int getMaxIndexKeys() throws SQLException { + if (indexMaxKeys == 0) { + String sql; + sql = "SELECT setting FROM pg_catalog.pg_settings WHERE name='max_index_keys'"; + + Statement stmt = connection.createStatement(); + ResultSet rs = null; + try { + rs = stmt.executeQuery(sql); + if (!rs.next()) { + stmt.close(); + throw new PSQLException( + GT.tr( + "Unable to determine a value for MaxIndexKeys due to missing system catalog data."), + PSQLState.UNEXPECTED_ERROR); + } + indexMaxKeys = rs.getInt(1); + } finally { + JdbcBlackHole.close(rs); + JdbcBlackHole.close(stmt); + } + } + return indexMaxKeys; + } + + protected int getMaxNameLength() throws SQLException { + if (nameDataLength == 0) { + String sql; + sql = "SELECT t.typlen FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n " + + "WHERE t.typnamespace=n.oid AND t.typname='name' AND n.nspname='pg_catalog'"; + + Statement stmt = connection.createStatement(); + ResultSet rs = null; + try { + rs = stmt.executeQuery(sql); + if (!rs.next()) { + throw new PSQLException(GT.tr("Unable to find name datatype in the system catalogs."), + PSQLState.UNEXPECTED_ERROR); + } + nameDataLength = rs.getInt("typlen"); + } finally { + JdbcBlackHole.close(rs); + JdbcBlackHole.close(stmt); + } + } + return nameDataLength - 1; + } + + @Override + public boolean allProceduresAreCallable() throws SQLException { + return true; // For now... + } + + @Override + public boolean allTablesAreSelectable() throws SQLException { + return true; // For now... + } + + @Override + public String getURL() throws SQLException { + return connection.getURL(); + } + + @Override + public String getUserName() throws SQLException { + return connection.getUserName(); + } + + @Override + public boolean isReadOnly() throws SQLException { + return connection.isReadOnly(); + } + + @Override + public boolean nullsAreSortedHigh() throws SQLException { + return true; + } + + @Override + public boolean nullsAreSortedLow() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtStart() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtEnd() throws SQLException { + return false; + } + + /** + * Retrieves the name of this database product. We hope that it is PostgreSQL, so we return that + * explicitly. + * + * @return "PostgreSQL" + */ + @Override + public String getDatabaseProductName() throws SQLException { + return "PostgreSQL"; + } + + @Override + public String getDatabaseProductVersion() throws SQLException { + return connection.getDBVersionNumber(); + } + + @Override + public String getDriverName() { + return DriverInfo.DRIVER_NAME; + } + + @Override + public String getDriverVersion() { + return DriverInfo.DRIVER_VERSION; + } + + @Override + public int getDriverMajorVersion() { + return DriverInfo.MAJOR_VERSION; + } + + @Override + public int getDriverMinorVersion() { + return DriverInfo.MINOR_VERSION; + } + + /** + * Does the database store tables in a local file? No - it stores them in a file on the server. + * + * @return true if so + * @throws SQLException if a database access error occurs + */ + @Override + public boolean usesLocalFiles() throws SQLException { + return false; + } + + /** + * Does the database use a file for each table? Well, not really, since it doesn't use local files. + * + * @return true if so + * @throws SQLException if a database access error occurs + */ + @Override + public boolean usesLocalFilePerTable() throws SQLException { + return false; + } + + /** + * Does the database treat mixed case unquoted SQL identifiers as case sensitive and as a result + * store them in mixed case? A JDBC-Compliant driver will always return false. + * + * @return true if so + * @throws SQLException if a database access error occurs + */ + @Override + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesMixedCaseIdentifiers() throws SQLException { + return false; + } + + /** + * Does the database treat mixed case quoted SQL identifiers as case sensitive and as a result + * store them in mixed case? A JDBC compliant driver will always return true. + * + * @return true if so + * @throws SQLException if a database access error occurs + */ + @Override + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + /** + * What is the string used to quote SQL identifiers? This returns a space if identifier quoting + * isn't supported. A JDBC Compliant driver will always use a double quote character. + * + * @return the quoting string + * @throws SQLException if a database access error occurs + */ + @Override + public String getIdentifierQuoteString() throws SQLException { + return "\""; + } + + /** + * {@inheritDoc} + * + *

From PostgreSQL 9.0+ return the keywords from pg_catalog.pg_get_keywords()

+ * + * @return a comma separated list of keywords we use + * @throws SQLException if a database access error occurs + */ + @Override + public String getSQLKeywords() throws SQLException { + connection.checkClosed(); + String keywords = this.keywords; + if (keywords == null) { + if (connection.haveMinimumServerVersion(ServerVersion.v9_0)) { + // Exclude SQL:2003 keywords (https://github.com/ronsavage/SQL/blob/master/sql-2003-2.bnf) + // from the returned list, ugly but required by jdbc spec. + String sql = "select string_agg(word, ',') from pg_catalog.pg_get_keywords() " + + "where word <> ALL ('{a,abs,absolute,action,ada,add,admin,after,all,allocate,alter," + + "always,and,any,are,array,as,asc,asensitive,assertion,assignment,asymmetric,at,atomic," + + "attribute,attributes,authorization,avg,before,begin,bernoulli,between,bigint,binary," + + "blob,boolean,both,breadth,by,c,call,called,cardinality,cascade,cascaded,case,cast," + + "catalog,catalog_name,ceil,ceiling,chain,char,char_length,character,character_length," + + "character_set_catalog,character_set_name,character_set_schema,characteristics," + + "characters,check,checked,class_origin,clob,close,coalesce,cobol,code_units,collate," + + "collation,collation_catalog,collation_name,collation_schema,collect,column," + + "column_name,command_function,command_function_code,commit,committed,condition," + + "condition_number,connect,connection_name,constraint,constraint_catalog,constraint_name," + + "constraint_schema,constraints,constructors,contains,continue,convert,corr," + + "corresponding,count,covar_pop,covar_samp,create,cross,cube,cume_dist,current," + + "current_collation,current_date,current_default_transform_group,current_path," + + "current_role,current_time,current_timestamp,current_transform_group_for_type,current_user," + + "cursor,cursor_name,cycle,data,date,datetime_interval_code,datetime_interval_precision," + + "day,deallocate,dec,decimal,declare,default,defaults,deferrable,deferred,defined,definer," + + "degree,delete,dense_rank,depth,deref,derived,desc,describe,descriptor,deterministic," + + "diagnostics,disconnect,dispatch,distinct,domain,double,drop,dynamic,dynamic_function," + + "dynamic_function_code,each,element,else,end,end-exec,equals,escape,every,except," + + "exception,exclude,excluding,exec,execute,exists,exp,external,extract,false,fetch,filter," + + "final,first,float,floor,following,for,foreign,fortran,found,free,from,full,function," + + "fusion,g,general,get,global,go,goto,grant,granted,group,grouping,having,hierarchy,hold," + + "hour,identity,immediate,implementation,in,including,increment,indicator,initially," + + "inner,inout,input,insensitive,insert,instance,instantiable,int,integer,intersect," + + "intersection,interval,into,invoker,is,isolation,join,k,key,key_member,key_type,language," + + "large,last,lateral,leading,left,length,level,like,ln,local,localtime,localtimestamp," + + "locator,lower,m,map,match,matched,max,maxvalue,member,merge,message_length," + + "message_octet_length,message_text,method,min,minute,minvalue,mod,modifies,module,month," + + "more,multiset,mumps,name,names,national,natural,nchar,nclob,nesting,new,next,no,none," + + "normalize,normalized,not,\"null\",nullable,nullif,nulls,number,numeric,object," + + "octet_length,octets,of,old,on,only,open,option,options,or,order,ordering,ordinality," + + "others,out,outer,output,over,overlaps,overlay,overriding,pad,parameter,parameter_mode," + + "parameter_name,parameter_ordinal_position,parameter_specific_catalog," + + "parameter_specific_name,parameter_specific_schema,partial,partition,pascal,path," + + "percent_rank,percentile_cont,percentile_disc,placing,pli,position,power,preceding," + + "precision,prepare,preserve,primary,prior,privileges,procedure,public,range,rank,read," + + "reads,real,recursive,ref,references,referencing,regr_avgx,regr_avgy,regr_count," + + "regr_intercept,regr_r2,regr_slope,regr_sxx,regr_sxy,regr_syy,relative,release," + + "repeatable,restart,result,return,returned_cardinality,returned_length," + + "returned_octet_length,returned_sqlstate,returns,revoke,right,role,rollback,rollup," + + "routine,routine_catalog,routine_name,routine_schema,row,row_count,row_number,rows," + + "savepoint,scale,schema,schema_name,scope_catalog,scope_name,scope_schema,scroll," + + "search,second,section,security,select,self,sensitive,sequence,serializable,server_name," + + "session,session_user,set,sets,similar,simple,size,smallint,some,source,space,specific," + + "specific_name,specifictype,sql,sqlexception,sqlstate,sqlwarning,sqrt,start,state," + + "statement,static,stddev_pop,stddev_samp,structure,style,subclass_origin,submultiset," + + "substring,sum,symmetric,system,system_user,table,table_name,tablesample,temporary,then," + + "ties,time,timestamp,timezone_hour,timezone_minute,to,top_level_count,trailing," + + "transaction,transaction_active,transactions_committed,transactions_rolled_back," + + "transform,transforms,translate,translation,treat,trigger,trigger_catalog,trigger_name," + + "trigger_schema,trim,true,type,uescape,unbounded,uncommitted,under,union,unique,unknown," + + "unnamed,unnest,update,upper,usage,user,user_defined_type_catalog,user_defined_type_code," + + "user_defined_type_name,user_defined_type_schema,using,value,values,var_pop,var_samp," + + "varchar,varying,view,when,whenever,where,width_bucket,window,with,within,without,work," + + "write,year,zone}'::text[])"; + + Statement stmt = null; + ResultSet rs = null; + try { + stmt = connection.createStatement(); + rs = stmt.executeQuery(sql); + if (!rs.next()) { + throw new PSQLException(GT.tr("Unable to find keywords in the system catalogs."), + PSQLState.UNEXPECTED_ERROR); + } + keywords = rs.getString(1); + } finally { + JdbcBlackHole.close(rs); + JdbcBlackHole.close(stmt); + } + } else { + // Static list from PG8.2 src/backend/parser/keywords.c with SQL:2003 excluded. + keywords = "abort,access,aggregate,also,analyse,analyze,backward,bit,cache,checkpoint,class," + + "cluster,comment,concurrently,connection,conversion,copy,csv,database,delimiter," + + "delimiters,disable,do,enable,encoding,encrypted,exclusive,explain,force,forward,freeze," + + "greatest,handler,header,if,ilike,immutable,implicit,index,indexes,inherit,inherits," + + "instead,isnull,least,limit,listen,load,location,lock,mode,move,nothing,notify,notnull," + + "nowait,off,offset,oids,operator,owned,owner,password,prepared,procedural,quote,reassign," + + "recheck,reindex,rename,replace,reset,restrict,returning,rule,setof,share,show,stable," + + "statistics,stdin,stdout,storage,strict,sysid,tablespace,temp,template,truncate,trusted," + + "unencrypted,unlisten,until,vacuum,valid,validator,verbose,volatile"; + } + this.keywords = keywords; + } + return keywords; + } + + @Override + @SuppressWarnings("deprecation") + public String getNumericFunctions() throws SQLException { + return EscapedFunctions.ABS + ',' + EscapedFunctions.ACOS + ',' + EscapedFunctions.ASIN + ',' + + EscapedFunctions.ATAN + ',' + EscapedFunctions.ATAN2 + ',' + EscapedFunctions.CEILING + + ',' + EscapedFunctions.COS + ',' + EscapedFunctions.COT + ',' + EscapedFunctions.DEGREES + + ',' + EscapedFunctions.EXP + ',' + EscapedFunctions.FLOOR + ',' + EscapedFunctions.LOG + + ',' + EscapedFunctions.LOG10 + ',' + EscapedFunctions.MOD + ',' + EscapedFunctions.PI + + ',' + EscapedFunctions.POWER + ',' + EscapedFunctions.RADIANS + ',' + + EscapedFunctions.ROUND + ',' + EscapedFunctions.SIGN + ',' + EscapedFunctions.SIN + ',' + + EscapedFunctions.SQRT + ',' + EscapedFunctions.TAN + ',' + EscapedFunctions.TRUNCATE; + + } + + @Override + @SuppressWarnings("deprecation") + public String getStringFunctions() throws SQLException { + String funcs = EscapedFunctions.ASCII + ',' + EscapedFunctions.CHAR + ',' + + EscapedFunctions.CONCAT + ',' + EscapedFunctions.LCASE + ',' + EscapedFunctions.LEFT + ',' + + EscapedFunctions.LENGTH + ',' + EscapedFunctions.LTRIM + ',' + EscapedFunctions.REPEAT + + ',' + EscapedFunctions.RTRIM + ',' + EscapedFunctions.SPACE + ',' + + EscapedFunctions.SUBSTRING + ',' + EscapedFunctions.UCASE; + + // Currently these don't work correctly with parameterized + // arguments, so leave them out. They reorder the arguments + // when rewriting the query, but no translation layer is provided, + // so a setObject(N, obj) will not go to the correct parameter. + // ','+EscapedFunctions.INSERT+','+EscapedFunctions.LOCATE+ + // ','+EscapedFunctions.RIGHT+ + + funcs += ',' + EscapedFunctions.REPLACE; + + return funcs; + } + + @Override + @SuppressWarnings("deprecation") + public String getSystemFunctions() throws SQLException { + return EscapedFunctions.DATABASE + ',' + EscapedFunctions.IFNULL + ',' + EscapedFunctions.USER; + } + + @Override + @SuppressWarnings("deprecation") + public String getTimeDateFunctions() throws SQLException { + String timeDateFuncs = EscapedFunctions.CURDATE + ',' + EscapedFunctions.CURTIME + ',' + + EscapedFunctions.DAYNAME + ',' + EscapedFunctions.DAYOFMONTH + ',' + + EscapedFunctions.DAYOFWEEK + ',' + EscapedFunctions.DAYOFYEAR + ',' + + EscapedFunctions.HOUR + ',' + EscapedFunctions.MINUTE + ',' + EscapedFunctions.MONTH + ',' + + EscapedFunctions.MONTHNAME + ',' + EscapedFunctions.NOW + ',' + EscapedFunctions.QUARTER + + ',' + EscapedFunctions.SECOND + ',' + EscapedFunctions.WEEK + ',' + EscapedFunctions.YEAR; + + timeDateFuncs += ',' + EscapedFunctions.TIMESTAMPADD; + + // +','+EscapedFunctions.TIMESTAMPDIFF; + + return timeDateFuncs; + } + + @Override + public String getSearchStringEscape() throws SQLException { + // This method originally returned "\\\\" assuming that it + // would be fed directly into pg's input parser so it would + // need two backslashes. This isn't how it's supposed to be + // used though. If passed as a PreparedStatement parameter + // or fed to a DatabaseMetaData method then double backslashes + // are incorrect. If you're feeding something directly into + // a query you are responsible for correctly escaping it. + // With 8.2+ this escaping is a little trickier because you + // must know the setting of standard_conforming_strings, but + // that's not our problem. + + return "\\"; + } + + /** + * {@inheritDoc} + * + *

Postgresql allows any high-bit character to be used in an unquoted identifier, so we can't + * possibly list them all.

+ * + *

From the file src/backend/parser/scan.l, an identifier is ident_start [A-Za-z\200-\377_] + * ident_cont [A-Za-z\200-\377_0-9\$] identifier {ident_start}{ident_cont}*

+ * + * @return a string containing the extra characters + * @throws SQLException if a database access error occurs + */ + @Override + public String getExtraNameCharacters() throws SQLException { + return ""; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 6.1+ + */ + @Override + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.3+ + */ + @Override + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return true; + } + + @Override + public boolean supportsColumnAliasing() throws SQLException { + return true; + } + + @Override + public boolean nullPlusNonNullIsNull() throws SQLException { + return true; + } + + @Override + public boolean supportsConvert() throws SQLException { + return false; + } + + @Override + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } + + @Override + public boolean supportsTableCorrelationNames() throws SQLException { + return true; + } + + @Override + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsExpressionsInOrderBy() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 6.4+ + */ + @Override + public boolean supportsOrderByUnrelated() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupBy() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 6.4+ + */ + @Override + public boolean supportsGroupByUnrelated() throws SQLException { + return true; + } + + /* + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 6.4+ + */ + @Override + public boolean supportsGroupByBeyondSelect() throws SQLException { + return true; + } + + /* + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.1+ + */ + @Override + public boolean supportsLikeEscapeClause() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleResultSets() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleTransactions() throws SQLException { + return true; + } + + @Override + public boolean supportsNonNullableColumns() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + *

This grammar is defined at: + * + * http://www.microsoft.com/msdn/sdk/platforms/doc/odbc/src/intropr.htm

+ * + *

In Appendix C. From this description, we seem to support the ODBC minimal (Level 0) grammar.

+ * + * @return true + */ + @Override + public boolean supportsMinimumSQLGrammar() throws SQLException { + return true; + } + + /** + * Does this driver support the Core ODBC SQL grammar. We need SQL-92 conformance for this. + * + * @return false + * @throws SQLException if a database access error occurs + */ + @Override + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + /** + * Does this driver support the Extended (Level 2) ODBC SQL grammar. We don't conform to the Core + * (Level 1), so we can't conform to the Extended SQL Grammar. + * + * @return false + * @throws SQLException if a database access error occurs + */ + @Override + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + /** + * Does this driver support the ANSI-92 entry level SQL grammar? All JDBC Compliant drivers must + * return true. We currently report false until 'schema' support is added. Then this should be + * changed to return true, since we will be mostly compliant (probably more compliant than many + * other databases) And since this is a requirement for all JDBC drivers we need to get to the + * point where we can return true. + * + * @return true if connected to PostgreSQL 7.3+ + * @throws SQLException if a database access error occurs + */ + @Override + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return false + */ + @Override + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + /** + * {@inheritDoc} + * + * @return false + */ + @Override + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + /* + * Is the SQL Integrity Enhancement Facility supported? Our best guess is that this means support + * for constraints + * + * @return true + * + * @exception SQLException if a database access error occurs + */ + @Override + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.1+ + */ + @Override + public boolean supportsOuterJoins() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.1+ + */ + @Override + public boolean supportsFullOuterJoins() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.1+ + */ + @Override + public boolean supportsLimitedOuterJoins() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + *

PostgreSQL doesn't have schemas, but when it does, we'll use the term "schema".

+ * + * @return {@code "schema"} + */ + @Override + public String getSchemaTerm() throws SQLException { + return "schema"; + } + + /** + * {@inheritDoc} + * + * @return {@code "function"} + */ + @Override + public String getProcedureTerm() throws SQLException { + return "function"; + } + + /** + * {@inheritDoc} + * + * @return {@code "database"} + */ + @Override + public String getCatalogTerm() throws SQLException { + return "database"; + } + + @Override + public boolean isCatalogAtStart() throws SQLException { + return true; + } + + @Override + public String getCatalogSeparator() throws SQLException { + return "."; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.3+ + */ + @Override + public boolean supportsSchemasInDataManipulation() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.3+ + */ + @Override + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.3+ + */ + @Override + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.3+ + */ + @Override + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.3+ + */ + @Override + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return false; + } + + @Override + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return false; + } + + /** + * We support cursors for gets only it seems. I dont see a method to get a positioned delete. + * + * @return false + * @throws SQLException if a database access error occurs + */ + @Override + public boolean supportsPositionedDelete() throws SQLException { + return false; // For now... + } + + @Override + public boolean supportsPositionedUpdate() throws SQLException { + return false; // For now... + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 6.5+ + */ + @Override + public boolean supportsSelectForUpdate() throws SQLException { + return true; + } + + @Override + public boolean supportsStoredProcedures() throws SQLException { + return true; + } + + @Override + public boolean supportsSubqueriesInComparisons() throws SQLException { + return true; + } + + @Override + public boolean supportsSubqueriesInExists() throws SQLException { + return true; + } + + @Override + public boolean supportsSubqueriesInIns() throws SQLException { + return true; + } + + @Override + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.1+ + */ + @Override + public boolean supportsCorrelatedSubqueries() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 6.3+ + */ + @Override + public boolean supportsUnion() throws SQLException { + return true; // since 6.3 + } + + /** + * {@inheritDoc} + * + * @return true if connected to PostgreSQL 7.1+ + */ + @Override + public boolean supportsUnionAll() throws SQLException { + return true; + } + + /** + * {@inheritDoc} In PostgreSQL, Cursors are only open within transactions. + */ + @Override + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + /** + * {@inheritDoc} + *

Can statements remain open across commits? They may, but this driver cannot guarantee that. In + * further reflection. we are talking a Statement object here, so the answer is yes, since the + * Statement is only a vehicle to ExecSQL()

+ * + * @return true + */ + @Override + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + *

Can statements remain open across rollbacks? They may, but this driver cannot guarantee that. + * In further contemplation, we are talking a Statement object here, so the answer is yes, since + * the Statement is only a vehicle to ExecSQL() in Connection

+ * + * @return true + */ + @Override + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return true; + } + + @Override + public int getMaxCharLiteralLength() throws SQLException { + return 0; // no limit + } + + @Override + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; // no limit + } + + @Override + public int getMaxColumnNameLength() throws SQLException { + return getMaxNameLength(); + } + + @Override + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; // no limit + } + + @Override + public int getMaxColumnsInIndex() throws SQLException { + return getMaxIndexKeys(); + } + + @Override + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; // no limit + } + + @Override + public int getMaxColumnsInSelect() throws SQLException { + return 0; // no limit + } + + /** + * {@inheritDoc} What is the maximum number of columns in a table? From the CREATE TABLE reference + * page... + * + *

"The new class is created as a heap with no initial data. A class can have no more than 1600 + * attributes (realistically, this is limited by the fact that tuple sizes must be less than 8192 + * bytes)..."

+ * + * @return the max columns + * @throws SQLException if a database access error occurs + */ + @Override + public int getMaxColumnsInTable() throws SQLException { + return 1600; + } + + /** + * {@inheritDoc} How many active connection can we have at a time to this database? Well, since it + * depends on postmaster, which just does a listen() followed by an accept() and fork(), its + * basically very high. Unless the system runs out of processes, it can be 65535 (the number of + * aux. ports on a TCP/IP system). I will return 8192 since that is what even the largest system + * can realistically handle, + * + * @return the maximum number of connections + * @throws SQLException if a database access error occurs + */ + @Override + public int getMaxConnections() throws SQLException { + return 8192; + } + + @Override + public int getMaxCursorNameLength() throws SQLException { + return getMaxNameLength(); + } + + @Override + public int getMaxIndexLength() throws SQLException { + return 0; // no limit (larger than an int anyway) + } + + @Override + public int getMaxSchemaNameLength() throws SQLException { + return getMaxNameLength(); + } + + @Override + public int getMaxProcedureNameLength() throws SQLException { + return getMaxNameLength(); + } + + @Override + public int getMaxCatalogNameLength() throws SQLException { + return getMaxNameLength(); + } + + @Override + public int getMaxRowSize() throws SQLException { + return 1073741824; // 1 GB + } + + @Override + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return false; + } + + @Override + public int getMaxStatementLength() throws SQLException { + return 0; // actually whatever fits in size_t + } + + @Override + public int getMaxStatements() throws SQLException { + return 0; + } + + @Override + public int getMaxTableNameLength() throws SQLException { + return getMaxNameLength(); + } + + @Override + public int getMaxTablesInSelect() throws SQLException { + return 0; // no limit + } + + @Override + public int getMaxUserNameLength() throws SQLException { + return getMaxNameLength(); + } + + @Override + public int getDefaultTransactionIsolation() throws SQLException { + String sql = + "SELECT setting FROM pg_catalog.pg_settings WHERE name='default_transaction_isolation'"; + + try (Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql)) { + String level = null; + if (rs.next()) { + level = rs.getString(1); + } + if (level == null) { + throw new PSQLException( + GT.tr( + "Unable to determine a value for DefaultTransactionIsolation due to missing " + + " entry in pg_catalog.pg_settings WHERE name='default_transaction_isolation'."), + PSQLState.UNEXPECTED_ERROR); + } + // PostgreSQL returns the value in lower case, so using "toLowerCase" here would be + // slightly more efficient. + switch (level.toLowerCase(Locale.ROOT)) { + case "read uncommitted": + return Connection.TRANSACTION_READ_UNCOMMITTED; + case "repeatable read": + return Connection.TRANSACTION_REPEATABLE_READ; + case "serializable": + return Connection.TRANSACTION_SERIALIZABLE; + case "read committed": + default: // Best guess. + return Connection.TRANSACTION_READ_COMMITTED; + } + } + } + + @Override + public boolean supportsTransactions() throws SQLException { + return true; + } + + /** + * {@inheritDoc} + *

We only support TRANSACTION_SERIALIZABLE and TRANSACTION_READ_COMMITTED before 8.0; from 8.0 + * READ_UNCOMMITTED and REPEATABLE_READ are accepted aliases for READ_COMMITTED.

+ */ + @Override + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + switch (level) { + case Connection.TRANSACTION_READ_UNCOMMITTED: + case Connection.TRANSACTION_READ_COMMITTED: + case Connection.TRANSACTION_REPEATABLE_READ: + case Connection.TRANSACTION_SERIALIZABLE: + return true; + default: + return false; + } + } + + @Override + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return true; + } + + @Override + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + /** + *

Does a data definition statement within a transaction force the transaction to commit? It seems + * to mean something like:

+ * + *
+   * CREATE TABLE T (A INT);
+   * INSERT INTO T (A) VALUES (2);
+   * BEGIN;
+   * UPDATE T SET A = A + 1;
+   * CREATE TABLE X (A INT);
+   * SELECT A FROM T INTO X;
+   * COMMIT;
+   * 
+ * + *

Does the CREATE TABLE call cause a commit? The answer is no.

+ * + * @return true if so + * @throws SQLException if a database access error occurs + */ + @Override + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + /** + * Turn the provided value into a valid string literal for direct inclusion into a query. This + * includes the single quotes needed around it. + * + * @param s input value + * + * @return string literal for direct inclusion into a query + * @throws SQLException if something wrong happens + */ + protected String escapeQuotes(String s) throws SQLException { + StringBuilder sb = new StringBuilder(); + if (!connection.getStandardConformingStrings()) { + sb.append("E"); + } + sb.append("'"); + sb.append(connection.escapeString(s)); + sb.append("'"); + return sb.toString(); + } + + @Override + public ResultSet getProcedures(String catalog, String schemaPattern, + String procedureNamePattern) + throws SQLException { + String sql; + sql = "SELECT NULL AS PROCEDURE_CAT, n.nspname AS PROCEDURE_SCHEM, p.proname AS PROCEDURE_NAME, " + + "NULL, NULL, NULL, d.description AS REMARKS, " + + DatabaseMetaData.procedureReturnsResult + " AS PROCEDURE_TYPE, " + + " p.proname || '_' || p.oid AS SPECIFIC_NAME " + + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_proc p " + + " LEFT JOIN pg_catalog.pg_description d ON (p.oid=d.objoid) " + + " LEFT JOIN pg_catalog.pg_class c ON (d.classoid=c.oid AND c.relname='pg_proc') " + + " LEFT JOIN pg_catalog.pg_namespace pn ON (c.relnamespace=pn.oid AND pn.nspname='pg_catalog') " + + " WHERE p.pronamespace=n.oid "; + + if (connection.haveMinimumServerVersion(ServerVersion.v11)) { + sql += " AND p.prokind='p'"; + } + if (schemaPattern != null && !schemaPattern.isEmpty()) { + sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); + } + if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) { + sql += " AND p.proname LIKE " + escapeQuotes(procedureNamePattern); + } + if (connection.getHideUnprivilegedObjects()) { + sql += " AND has_function_privilege(p.oid,'EXECUTE')"; + } + sql += " ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, p.oid::text "; + + return createMetaDataStatement().executeQuery(sql); + } + + @Override + public ResultSet getProcedureColumns(String catalog, String schemaPattern, + String procedureNamePattern, String columnNamePattern) + throws SQLException { + int columns = 20; + + Field[] f = new Field[columns]; + List v = new ArrayList<>(); // The new ResultSet tuple stuff + + f[0] = new Field("PROCEDURE_CAT", Oid.VARCHAR); + f[1] = new Field("PROCEDURE_SCHEM", Oid.VARCHAR); + f[2] = new Field("PROCEDURE_NAME", Oid.VARCHAR); + f[3] = new Field("COLUMN_NAME", Oid.VARCHAR); + f[4] = new Field("COLUMN_TYPE", Oid.INT2); + f[5] = new Field("DATA_TYPE", Oid.INT2); + f[6] = new Field("TYPE_NAME", Oid.VARCHAR); + f[7] = new Field("PRECISION", Oid.INT4); + f[8] = new Field("LENGTH", Oid.INT4); + f[9] = new Field("SCALE", Oid.INT2); + f[10] = new Field("RADIX", Oid.INT2); + f[11] = new Field("NULLABLE", Oid.INT2); + f[12] = new Field("REMARKS", Oid.VARCHAR); + f[13] = new Field("COLUMN_DEF", Oid.VARCHAR); + f[14] = new Field("SQL_DATA_TYPE", Oid.INT4); + f[15] = new Field("SQL_DATETIME_SUB", Oid.INT4); + f[16] = new Field("CHAR_OCTET_LENGTH", Oid.INT4); + f[17] = new Field("ORDINAL_POSITION", Oid.INT4); + f[18] = new Field("IS_NULLABLE", Oid.VARCHAR); + f[19] = new Field("SPECIFIC_NAME", Oid.VARCHAR); + + String sql; + sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype,t.typrelid, " + + " p.proargnames, p.proargmodes, p.proallargtypes, p.oid " + + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n, pg_catalog.pg_type t " + + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid "; + if (schemaPattern != null && !schemaPattern.isEmpty()) { + sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); + } + if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) { + sql += " AND p.proname LIKE " + escapeQuotes(procedureNamePattern); + } + sql += " ORDER BY n.nspname, p.proname, p.oid::text "; + + byte[] isnullableUnknown = new byte[0]; + + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) { + byte[] schema = rs.getBytes("nspname"); + byte[] procedureName = rs.getBytes("proname"); + byte[] specificName = + connection.encodeString(rs.getString("proname") + "_" + rs.getString("oid")); + int returnType = (int) rs.getLong("prorettype"); + String returnTypeType = rs.getString("typtype"); + int returnTypeRelid = (int) rs.getLong("typrelid"); + + String strArgTypes = rs.getString("proargtypes"); + StringTokenizer st = new StringTokenizer(strArgTypes); + List argTypes = new ArrayList<>(); + while (st.hasMoreTokens()) { + argTypes.add(Long.valueOf(st.nextToken())); + } + + String[] argNames = null; + Array argNamesArray = rs.getArray("proargnames"); + if (argNamesArray != null) { + argNames = (String[]) argNamesArray.getArray(); + } + + String[] argModes = null; + Array argModesArray = rs.getArray("proargmodes"); + if (argModesArray != null) { + argModes = (String[]) argModesArray.getArray(); + } + + int numArgs = argTypes.size(); + + Long[] allArgTypes = null; + Array allArgTypesArray = rs.getArray("proallargtypes"); + if (allArgTypesArray != null) { + allArgTypes = (Long[]) allArgTypesArray.getArray(); + numArgs = allArgTypes.length; + } + + // decide if we are returning a single column result. + if ("b".equals(returnTypeType) || "d".equals(returnTypeType) || "e".equals(returnTypeType) + || ("p".equals(returnTypeType) && argModesArray == null)) { + byte[] [] tuple = new byte[columns][]; + tuple[0] = null; + tuple[1] = schema; + tuple[2] = procedureName; + tuple[3] = connection.encodeString("returnValue"); + tuple[4] = connection + .encodeString(Integer.toString(DatabaseMetaData.procedureColumnReturn)); + tuple[5] = connection + .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(returnType))); + tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(returnType)); + tuple[7] = null; + tuple[8] = null; + tuple[9] = null; + tuple[10] = null; + tuple[11] = connection + .encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown)); + tuple[12] = null; + tuple[17] = connection.encodeString(Integer.toString(0)); + tuple[18] = isnullableUnknown; + tuple[19] = specificName; + + v.add(new Tuple(tuple)); + } + + // Add a row for each argument. + for (int i = 0; i < numArgs; i++) { + byte[] [] tuple = new byte[columns][]; + tuple[0] = null; + tuple[1] = schema; + tuple[2] = procedureName; + + if (argNames != null) { + tuple[3] = connection.encodeString(argNames[i]); + } else { + tuple[3] = connection.encodeString("$" + (i + 1)); + } + + int columnMode = DatabaseMetaData.procedureColumnIn; + if (argModes != null && "o".equals(argModes[i])) { + columnMode = DatabaseMetaData.procedureColumnOut; + } else if (argModes != null && "b".equals(argModes[i])) { + columnMode = DatabaseMetaData.procedureColumnInOut; + } else if (argModes != null && "t".equals(argModes[i])) { + columnMode = DatabaseMetaData.procedureColumnReturn; + } + + tuple[4] = connection.encodeString(Integer.toString(columnMode)); + + int argOid; + if (allArgTypes != null) { + argOid = allArgTypes[i].intValue(); + } else { + argOid = argTypes.get(i).intValue(); + } + + tuple[5] = + connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(argOid))); + tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(argOid)); + tuple[7] = null; + tuple[8] = null; + tuple[9] = null; + tuple[10] = null; + tuple[11] = + connection.encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown)); + tuple[12] = null; + tuple[17] = connection.encodeString(Integer.toString(i + 1)); + tuple[18] = isnullableUnknown; + tuple[19] = specificName; + + v.add(new Tuple(tuple)); + } + + // if we are returning a multi-column result. + if ("c".equals(returnTypeType) || ("p".equals(returnTypeType) && argModesArray != null)) { + String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a " + + " WHERE a.attrelid = " + returnTypeRelid + + " AND NOT a.attisdropped AND a.attnum > 0 ORDER BY a.attnum "; + Statement columnstmt = connection.createStatement(); + ResultSet columnrs = columnstmt.executeQuery(columnsql); + while (columnrs.next()) { + int columnTypeOid = (int) columnrs.getLong("atttypid"); + byte[] [] tuple = new byte[columns][]; + tuple[0] = null; + tuple[1] = schema; + tuple[2] = procedureName; + tuple[3] = columnrs.getBytes("attname"); + tuple[4] = connection + .encodeString(Integer.toString(DatabaseMetaData.procedureColumnResult)); + tuple[5] = connection + .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(columnTypeOid))); + tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(columnTypeOid)); + tuple[7] = null; + tuple[8] = null; + tuple[9] = null; + tuple[10] = null; + tuple[11] = connection + .encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown)); + tuple[12] = null; + tuple[17] = connection.encodeString(Integer.toString(0)); + tuple[18] = isnullableUnknown; + tuple[19] = specificName; + + v.add(new Tuple(tuple)); + } + columnrs.close(); + columnstmt.close(); + } + } + rs.close(); + stmt.close(); + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getTables(String catalog, String schemaPattern, + String tableNamePattern, String [] types) throws SQLException { + String select; + String orderby; + String useSchemas = "SCHEMAS"; + select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname AS TABLE_NAME, " + + " CASE n.nspname ~ '^pg_' OR n.nspname = 'information_schema' " + + " WHEN true THEN CASE " + + " WHEN n.nspname = 'pg_catalog' OR n.nspname = 'information_schema' THEN CASE c.relkind " + + " WHEN 'r' THEN 'SYSTEM TABLE' " + + " WHEN 'v' THEN 'SYSTEM VIEW' " + + " WHEN 'i' THEN 'SYSTEM INDEX' " + + " ELSE NULL " + + " END " + + " WHEN n.nspname = 'pg_toast' THEN CASE c.relkind " + + " WHEN 'r' THEN 'SYSTEM TOAST TABLE' " + + " WHEN 'i' THEN 'SYSTEM TOAST INDEX' " + + " ELSE NULL " + + " END " + + " ELSE CASE c.relkind " + + " WHEN 'r' THEN 'TEMPORARY TABLE' " + + " WHEN 'p' THEN 'TEMPORARY TABLE' " + + " WHEN 'i' THEN 'TEMPORARY INDEX' " + + " WHEN 'S' THEN 'TEMPORARY SEQUENCE' " + + " WHEN 'v' THEN 'TEMPORARY VIEW' " + + " ELSE NULL " + + " END " + + " END " + + " WHEN false THEN CASE c.relkind " + + " WHEN 'r' THEN 'TABLE' " + + " WHEN 'p' THEN 'PARTITIONED TABLE' " + + " WHEN 'i' THEN 'INDEX' " + + " WHEN 'P' then 'PARTITIONED INDEX' " + + " WHEN 'S' THEN 'SEQUENCE' " + + " WHEN 'v' THEN 'VIEW' " + + " WHEN 'c' THEN 'TYPE' " + + " WHEN 'f' THEN 'FOREIGN TABLE' " + + " WHEN 'm' THEN 'MATERIALIZED VIEW' " + + " ELSE NULL " + + " END " + + " ELSE NULL " + + " END " + + " AS TABLE_TYPE, d.description AS REMARKS, " + + " '' as TYPE_CAT, '' as TYPE_SCHEM, '' as TYPE_NAME, " + + "'' AS SELF_REFERENCING_COL_NAME, '' AS REF_GENERATION " + + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c " + + " LEFT JOIN pg_catalog.pg_description d ON (c.oid = d.objoid AND d.objsubid = 0 and d.classoid = 'pg_class'::regclass) " + + " WHERE c.relnamespace = n.oid "; + + if (schemaPattern != null && !schemaPattern.isEmpty()) { + select += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); + } + if (connection.getHideUnprivilegedObjects()) { + select += " AND has_table_privilege(c.oid, " + + " 'SELECT, INSERT, UPDATE, DELETE, RULE, REFERENCES, TRIGGER')"; + } + orderby = " ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME "; + + if (tableNamePattern != null && !tableNamePattern.isEmpty()) { + select += " AND c.relname LIKE " + escapeQuotes(tableNamePattern); + } + if (types != null) { + select += " AND (false "; + StringBuilder orclause = new StringBuilder(); + for (String type : types) { + Map clauses = tableTypeClauses.get(type); + if (clauses != null) { + String clause = clauses.get(useSchemas); + orclause.append(" OR ( ").append(clause).append(" ) "); + } + } + select += orclause.toString() + ") "; + } + String sql = select + orderby; + + return ((PgResultSet) createMetaDataStatement().executeQuery(sql)).upperCaseFieldLabels(); + } + + private static final Map> tableTypeClauses; + + static { + tableTypeClauses = new HashMap<>(); + Map ht = new HashMap<>(); + tableTypeClauses.put("TABLE", ht); + ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); + ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname !~ '^pg_'"); + ht = new HashMap<>(); + tableTypeClauses.put("PARTITIONED TABLE", ht); + ht.put("SCHEMAS", "c.relkind = 'p' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); + ht.put("NOSCHEMAS", "c.relkind = 'p' AND c.relname !~ '^pg_'"); + ht = new HashMap<>(); + tableTypeClauses.put("VIEW", ht); + ht.put("SCHEMAS", + "c.relkind = 'v' AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema'"); + ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname !~ '^pg_'"); + ht = new HashMap<>(); + tableTypeClauses.put("INDEX", ht); + ht.put("SCHEMAS", + "c.relkind = 'i' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); + ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname !~ '^pg_'"); + ht = new HashMap<>(); + tableTypeClauses.put("PARTITIONED INDEX", ht); + ht.put("SCHEMAS", "c.relkind = 'I' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); + ht.put("NOSCHEMAS", "c.relkind = 'I' AND c.relname !~ '^pg_'"); + ht = new HashMap<>(); + tableTypeClauses.put("SEQUENCE", ht); + ht.put("SCHEMAS", "c.relkind = 'S'"); + ht.put("NOSCHEMAS", "c.relkind = 'S'"); + ht = new HashMap<>(); + tableTypeClauses.put("TYPE", ht); + ht.put("SCHEMAS", + "c.relkind = 'c' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'"); + ht.put("NOSCHEMAS", "c.relkind = 'c' AND c.relname !~ '^pg_'"); + ht = new HashMap<>(); + tableTypeClauses.put("SYSTEM TABLE", ht); + ht.put("SCHEMAS", + "c.relkind = 'r' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema')"); + ht.put("NOSCHEMAS", + "c.relkind = 'r' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'"); + ht = new HashMap<>(); + tableTypeClauses.put("SYSTEM TOAST TABLE", ht); + ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname = 'pg_toast'"); + ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname ~ '^pg_toast_'"); + ht = new HashMap<>(); + tableTypeClauses.put("SYSTEM TOAST INDEX", ht); + ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname = 'pg_toast'"); + ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_toast_'"); + ht = new HashMap<>(); + tableTypeClauses.put("SYSTEM VIEW", ht); + ht.put("SCHEMAS", + "c.relkind = 'v' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') "); + ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_'"); + ht = new HashMap<>(); + tableTypeClauses.put("SYSTEM INDEX", ht); + ht.put("SCHEMAS", + "c.relkind = 'i' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') "); + ht.put("NOSCHEMAS", + "c.relkind = 'v' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'"); + ht = new HashMap<>(); + tableTypeClauses.put("TEMPORARY TABLE", ht); + ht.put("SCHEMAS", "c.relkind IN ('r','p') AND n.nspname ~ '^pg_temp_' "); + ht.put("NOSCHEMAS", "c.relkind IN ('r','p') AND c.relname ~ '^pg_temp_' "); + ht = new HashMap<>(); + tableTypeClauses.put("TEMPORARY INDEX", ht); + ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname ~ '^pg_temp_' "); + ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_temp_' "); + ht = new HashMap<>(); + tableTypeClauses.put("TEMPORARY VIEW", ht); + ht.put("SCHEMAS", "c.relkind = 'v' AND n.nspname ~ '^pg_temp_' "); + ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_temp_' "); + ht = new HashMap<>(); + tableTypeClauses.put("TEMPORARY SEQUENCE", ht); + ht.put("SCHEMAS", "c.relkind = 'S' AND n.nspname ~ '^pg_temp_' "); + ht.put("NOSCHEMAS", "c.relkind = 'S' AND c.relname ~ '^pg_temp_' "); + ht = new HashMap<>(); + tableTypeClauses.put("FOREIGN TABLE", ht); + ht.put("SCHEMAS", "c.relkind = 'f'"); + ht.put("NOSCHEMAS", "c.relkind = 'f'"); + ht = new HashMap<>(); + tableTypeClauses.put("MATERIALIZED VIEW", ht); + ht.put("SCHEMAS", "c.relkind = 'm'"); + ht.put("NOSCHEMAS", "c.relkind = 'm'"); + } + + @Override + public ResultSet getSchemas() throws SQLException { + return getSchemas(null, null); + } + + @Override + public ResultSet getSchemas(String catalog, String schemaPattern) + throws SQLException { + String sql; + sql = "SELECT nspname AS TABLE_SCHEM, NULL AS TABLE_CATALOG FROM pg_catalog.pg_namespace " + + " WHERE nspname <> 'pg_toast' AND (nspname !~ '^pg_temp_' " + + " OR nspname = (pg_catalog.current_schemas(true))[1]) AND (nspname !~ '^pg_toast_temp_' " + + " OR nspname = replace((pg_catalog.current_schemas(true))[1], 'pg_temp_', 'pg_toast_temp_')) "; + if (schemaPattern != null && !schemaPattern.isEmpty()) { + sql += " AND nspname LIKE " + escapeQuotes(schemaPattern); + } + if (connection.getHideUnprivilegedObjects()) { + sql += " AND has_schema_privilege(nspname, 'USAGE, CREATE')"; + } + sql += " ORDER BY TABLE_SCHEM"; + + return createMetaDataStatement().executeQuery(sql); + } + + @Override + public ResultSet getCatalogs() throws SQLException { + String sql = "SELECT datname AS TABLE_CAT FROM pg_catalog.pg_database" + + " WHERE datallowconn = true" + + " ORDER BY datname"; + return createMetaDataStatement().executeQuery(sql); + } + + @Override + public ResultSet getTableTypes() throws SQLException { + String[] types = tableTypeClauses.keySet().toArray(new String[0]); + Arrays.sort(types); + + Field[] f = new Field[1]; + List v = new ArrayList<>(); + f[0] = new Field("TABLE_TYPE", Oid.VARCHAR); + for (String type : types) { + byte[] [] tuple = new byte[1][]; + tuple[0] = connection.encodeString(type); + v.add(new Tuple(tuple)); + } + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getColumns(String catalog, String schemaPattern, + String tableNamePattern, + String columnNamePattern) throws SQLException { + + int numberOfFields = 24; // JDBC4 + List v = new ArrayList<>(); // The new ResultSet tuple stuff + Field[] f = new Field[numberOfFields]; // The field descriptors for the new ResultSet + + f[0] = new Field("TABLE_CAT", Oid.VARCHAR); + f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR); + f[2] = new Field("TABLE_NAME", Oid.VARCHAR); + f[3] = new Field("COLUMN_NAME", Oid.VARCHAR); + f[4] = new Field("DATA_TYPE", Oid.INT2); + f[5] = new Field("TYPE_NAME", Oid.VARCHAR); + f[6] = new Field("COLUMN_SIZE", Oid.INT4); + f[7] = new Field("BUFFER_LENGTH", Oid.VARCHAR); + f[8] = new Field("DECIMAL_DIGITS", Oid.INT4); + f[9] = new Field("NUM_PREC_RADIX", Oid.INT4); + f[10] = new Field("NULLABLE", Oid.INT4); + f[11] = new Field("REMARKS", Oid.VARCHAR); + f[12] = new Field("COLUMN_DEF", Oid.VARCHAR); + f[13] = new Field("SQL_DATA_TYPE", Oid.INT4); + f[14] = new Field("SQL_DATETIME_SUB", Oid.INT4); + f[15] = new Field("CHAR_OCTET_LENGTH", Oid.VARCHAR); + f[16] = new Field("ORDINAL_POSITION", Oid.INT4); + f[17] = new Field("IS_NULLABLE", Oid.VARCHAR); + f[18] = new Field("SCOPE_CATALOG", Oid.VARCHAR); + f[19] = new Field("SCOPE_SCHEMA", Oid.VARCHAR); + f[20] = new Field("SCOPE_TABLE", Oid.VARCHAR); + f[21] = new Field("SOURCE_DATA_TYPE", Oid.INT2); + f[22] = new Field("IS_AUTOINCREMENT", Oid.VARCHAR); + f[23] = new Field( "IS_GENERATEDCOLUMN", Oid.VARCHAR); + + String sql; + // a.attnum isn't decremented when preceding columns are dropped, + // so the only way to calculate the correct column number is with + // window functions, new in 8.4. + // + // We want to push as much predicate information below the window + // function as possible (schema/table names), but must leave + // column name outside so we correctly count the other columns. + // + if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) { + sql = "SELECT * FROM ("; + } else { + sql = ""; + } + + sql += "SELECT n.nspname,c.relname,a.attname,a.atttypid,a.attnotnull " + + "OR (t.typtype = 'd' AND t.typnotnull) AS attnotnull,a.atttypmod,a.attlen,t.typtypmod,"; + + if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) { + sql += "row_number() OVER (PARTITION BY a.attrelid ORDER BY a.attnum) AS attnum, "; + } else { + sql += "a.attnum,"; + } + + if (connection.haveMinimumServerVersion(ServerVersion.v10)) { + sql += "nullif(a.attidentity, '') as attidentity,"; + } else { + sql += "null as attidentity,"; + } + + if (connection.haveMinimumServerVersion(ServerVersion.v12)) { + sql += "nullif(a.attgenerated, '') as attgenerated,"; + } else { + sql += "null as attgenerated,"; + } + + sql += "pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc,dsc.description,t.typbasetype,t.typtype " + + " FROM pg_catalog.pg_namespace n " + + " JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) " + + " JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) " + + " JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) " + + " LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) " + + " LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) " + + " LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') " + + " LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') " + + " WHERE c.relkind in ('r','p','v','f','m') and a.attnum > 0 AND NOT a.attisdropped "; + + if (schemaPattern != null && !schemaPattern.isEmpty()) { + sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); + } + if (tableNamePattern != null && !tableNamePattern.isEmpty()) { + sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern); + } + if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) { + sql += ") c WHERE true "; + } + if (columnNamePattern != null && !columnNamePattern.isEmpty()) { + sql += " AND attname LIKE " + escapeQuotes(columnNamePattern); + } + sql += " ORDER BY nspname,c.relname,attnum "; + + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) { + byte[] [] tuple = new byte[numberOfFields][]; + int typeOid = (int) rs.getLong("atttypid"); + int typeMod = rs.getInt("atttypmod"); + + tuple[0] = null; // Catalog name, not supported + tuple[1] = rs.getBytes("nspname"); // Schema + tuple[2] = rs.getBytes("relname"); // Table name + tuple[3] = rs.getBytes("attname"); // Column name + + String typtype = rs.getString("typtype"); + int sqlType; + if ("c".equals(typtype)) { + sqlType = Types.STRUCT; + } else if ("d".equals(typtype)) { + sqlType = Types.DISTINCT; + } else if ("e".equals(typtype)) { + sqlType = Types.VARCHAR; + } else { + sqlType = connection.getTypeInfo().getSQLType(typeOid); + } + + tuple[4] = connection.encodeString(Integer.toString(sqlType)); + String pgType = connection.getTypeInfo().getPGType(typeOid); + tuple[5] = connection.encodeString(pgType); // Type name + tuple[7] = null; // Buffer length + + String defval = rs.getString("adsrc"); + + if (defval != null && defval.contains("nextval(") ) { + if ("int4".equals(pgType)) { + tuple[5] = connection.encodeString("serial"); // Type name == serial + } else if ("int8".equals(pgType)) { + tuple[5] = connection.encodeString("bigserial"); // Type name == bigserial + } else if ("int2".equals(pgType) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) { + tuple[5] = connection.encodeString("smallserial"); // Type name == smallserial + } + } + String identity = rs.getString("attidentity"); + + String generated = rs.getString("attgenerated"); + + int baseTypeOid = (int) rs.getLong("typbasetype"); + + int decimalDigits; + int columnSize; + + /* this is really a DOMAIN type not sure where DISTINCT came from */ + if ( sqlType == Types.DISTINCT ) { + /* + From the docs if typtypmod is -1 + */ + int typtypmod = rs.getInt("typtypmod"); + decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typeMod); + /* + From the postgres docs: + Domains use typtypmod to record the typmod to be applied to their + base type (-1 if base type does not use a typmod). -1 if this type is not a domain. + if it is -1 then get the precision from the basetype. This doesn't help if the basetype is + a domain, but for actual types this will return the correct value. + */ + if ( typtypmod == -1 ) { + columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typeMod); + } else if (baseTypeOid == Oid.NUMERIC ) { + decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typtypmod); + columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typtypmod); + } else { + columnSize = typtypmod; + } + } else { + decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod); + columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod); + if ( sqlType != Types.NUMERIC && columnSize == 0 ) { + columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod); + } + } + tuple[6] = connection.encodeString(Integer.toString(columnSize)); + // Give null for an unset scale on Decimal and Numeric columns + if (((sqlType == Types.NUMERIC) || (sqlType == Types.DECIMAL)) && (typeMod == -1)) { + tuple[8] = null; + } else { + tuple[8] = connection.encodeString(Integer.toString(decimalDigits)); + } + + // Everything is base 10 unless we override later. + tuple[9] = connection.encodeString("10"); + + if ("bit".equals(pgType) || "varbit".equals(pgType)) { + tuple[9] = connection.encodeString("2"); + } + + tuple[10] = connection.encodeString(Integer.toString(rs.getBoolean("attnotnull") + ? DatabaseMetaData.columnNoNulls : DatabaseMetaData.columnNullable)); // Nullable + tuple[11] = rs.getBytes("description"); // Description (if any) + tuple[12] = rs.getBytes("adsrc"); // Column default + tuple[13] = null; // sql data type (unused) + tuple[14] = null; // sql datetime sub (unused) + tuple[15] = tuple[6]; // char octet length + tuple[16] = connection.encodeString(String.valueOf(rs.getInt("attnum"))); // ordinal position + // Is nullable + tuple[17] = connection.encodeString(rs.getBoolean("attnotnull") ? "NO" : "YES"); + + tuple[18] = null; // SCOPE_CATLOG + tuple[19] = null; // SCOPE_SCHEMA + tuple[20] = null; // SCOPE_TABLE + tuple[21] = baseTypeOid == 0 // SOURCE_DATA_TYPE + ? null + : connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(baseTypeOid))); + + String autoinc = "NO"; + if (defval != null && defval.contains("nextval(") || identity != null) { + autoinc = "YES"; + } + tuple[22] = connection.encodeString(autoinc); // IS_AUTOINCREMENT + + String generatedcolumn = "NO"; + if (generated != null) { + generatedcolumn = "YES"; + } + tuple[23] = connection.encodeString(generatedcolumn); // IS_GENERATEDCOLUMN + + v.add(new Tuple(tuple)); + } + rs.close(); + stmt.close(); + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getColumnPrivileges(String catalog, String schema, + String table, String columnNamePattern) throws SQLException { + Field[] f = new Field[8]; + List v = new ArrayList<>(); + + f[0] = new Field("TABLE_CAT", Oid.VARCHAR); + f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR); + f[2] = new Field("TABLE_NAME", Oid.VARCHAR); + f[3] = new Field("COLUMN_NAME", Oid.VARCHAR); + f[4] = new Field("GRANTOR", Oid.VARCHAR); + f[5] = new Field("GRANTEE", Oid.VARCHAR); + f[6] = new Field("PRIVILEGE", Oid.VARCHAR); + f[7] = new Field("IS_GRANTABLE", Oid.VARCHAR); + + String sql; + sql = "SELECT n.nspname,c.relname,r.rolname,c.relacl, " + + (connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "a.attacl, " : "") + + " a.attname " + + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, " + + " pg_catalog.pg_roles r, pg_catalog.pg_attribute a " + + " WHERE c.relnamespace = n.oid " + + " AND c.relowner = r.oid " + + " AND c.oid = a.attrelid " + + " AND c.relkind = 'r' " + + " AND a.attnum > 0 AND NOT a.attisdropped "; + + if (schema != null && !schema.isEmpty()) { + sql += " AND n.nspname = " + escapeQuotes(schema); + } + if (table != null && !table.isEmpty()) { + sql += " AND c.relname = " + escapeQuotes(table); + } + if (columnNamePattern != null && !columnNamePattern.isEmpty()) { + sql += " AND a.attname LIKE " + escapeQuotes(columnNamePattern); + } + sql += " ORDER BY attname "; + + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) { + byte[] schemaName = rs.getBytes("nspname"); + byte[] tableName = rs.getBytes("relname"); + byte[] column = rs.getBytes("attname"); + String owner = rs.getString("rolname"); + String relAcl = rs.getString("relacl"); + + // For instance: SELECT -> user1 -> list of [grantor, grantable] + Map>> permissions = parseACL(relAcl, owner); + + if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) { + String acl = rs.getString("attacl"); + Map>> relPermissions = parseACL(acl, owner); + permissions.putAll(relPermissions); + } + String[] permNames = permissions.keySet().toArray(new String[0]); + Arrays.sort(permNames); + for (String permName : permNames) { + byte[] privilege = connection.encodeString(permName); + Map> grantees = permissions.get(permName); + for (Map.Entry> userToGrantable : grantees.entrySet()) { + List grantor = userToGrantable.getValue(); + String grantee = userToGrantable.getKey(); + for (String[] grants : grantor) { + String grantable = owner.equals(grantee) ? "YES" : grants[1]; + byte[] [] tuple = new byte[8][]; + tuple[0] = null; + tuple[1] = schemaName; + tuple[2] = tableName; + tuple[3] = column; + tuple[4] = connection.encodeString(grants[0]); + tuple[5] = connection.encodeString(grantee); + tuple[6] = privilege; + tuple[7] = connection.encodeString(grantable); + v.add(new Tuple(tuple)); + } + } + } + } + rs.close(); + stmt.close(); + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getTablePrivileges(String catalog, String schemaPattern, + String tableNamePattern) throws SQLException { + Field[] f = new Field[7]; + List v = new ArrayList<>(); + + f[0] = new Field("TABLE_CAT", Oid.VARCHAR); + f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR); + f[2] = new Field("TABLE_NAME", Oid.VARCHAR); + f[3] = new Field("GRANTOR", Oid.VARCHAR); + f[4] = new Field("GRANTEE", Oid.VARCHAR); + f[5] = new Field("PRIVILEGE", Oid.VARCHAR); + f[6] = new Field("IS_GRANTABLE", Oid.VARCHAR); + + String sql; + // r = ordinary table, p = partitioned table, v = view, m = materialized view, f = foreign table + sql = "SELECT n.nspname,c.relname,r.rolname,c.relacl " + + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, pg_catalog.pg_roles r " + + " WHERE c.relnamespace = n.oid " + + " AND c.relowner = r.oid " + + " AND c.relkind IN ('r','p','v','m','f') "; + + if (schemaPattern != null && !schemaPattern.isEmpty()) { + sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); + } + + if (tableNamePattern != null && !tableNamePattern.isEmpty()) { + sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern); + } + sql += " ORDER BY nspname, relname "; + + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) { + byte[] schema = rs.getBytes("nspname"); + byte[] table = rs.getBytes("relname"); + String owner = rs.getString("rolname"); + String acl = rs.getString("relacl"); + Map>> permissions = parseACL(acl, owner); + String[] permNames = permissions.keySet().toArray(new String[0]); + Arrays.sort(permNames); + for (String permName : permNames) { + byte[] privilege = connection.encodeString(permName); + Map> grantees = permissions.get(permName); + for (Map.Entry> userToGrantable : grantees.entrySet()) { + List grants = userToGrantable.getValue(); + String granteeUser = userToGrantable.getKey(); + for (String[] grantTuple : grants) { + // report the owner as grantor if it's missing + String grantor = grantTuple[0] == null ? owner : grantTuple[0]; + // owner always has grant privileges + String grantable = owner.equals(granteeUser) ? "YES" : grantTuple[1]; + byte[] [] tuple = new byte[7][]; + tuple[0] = null; + tuple[1] = schema; + tuple[2] = table; + tuple[3] = connection.encodeString(grantor); + tuple[4] = connection.encodeString(granteeUser); + tuple[5] = privilege; + tuple[6] = connection.encodeString(grantable); + v.add(new Tuple(tuple)); + } + } + } + } + rs.close(); + stmt.close(); + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + /** + * Parse an String of ACLs into a List of ACLs. + */ + private static List parseACLArray(String aclString) { + List acls = new ArrayList<>(); + if (aclString == null || aclString.isEmpty()) { + return acls; + } + boolean inQuotes = false; + // start at 1 because of leading "{" + int beginIndex = 1; + char prevChar = ' '; + for (int i = beginIndex; i < aclString.length(); i++) { + + char c = aclString.charAt(i); + if (c == '"' && prevChar != '\\') { + inQuotes = !inQuotes; + } else if (c == ',' && !inQuotes) { + acls.add(aclString.substring(beginIndex, i)); + beginIndex = i + 1; + } + prevChar = c; + } + // add last element removing the trailing "}" + acls.add(aclString.substring(beginIndex, aclString.length() - 1)); + + // Strip out enclosing quotes, if any. + for (int i = 0; i < acls.size(); i++) { + String acl = acls.get(i); + if (acl.startsWith("\"") && acl.endsWith("\"")) { + acl = acl.substring(1, acl.length() - 1); + acls.set(i, acl); + } + } + return acls; + } + + /** + * Add the user described by the given acl to the Lists of users with the privileges described by + * the acl. + */ + private static void addACLPrivileges(String acl, + Map>> privileges) { + int equalIndex = acl.lastIndexOf("="); + int slashIndex = acl.lastIndexOf("/"); + if (equalIndex == -1) { + return; + } + + String user = acl.substring(0, equalIndex); + String grantor = null; + if (user.isEmpty()) { + user = "PUBLIC"; + } + String privs; + if (slashIndex != -1) { + privs = acl.substring(equalIndex + 1, slashIndex); + grantor = acl.substring(slashIndex + 1, acl.length()); + } else { + privs = acl.substring(equalIndex + 1, acl.length()); + } + + for (int i = 0; i < privs.length(); i++) { + char c = privs.charAt(i); + if (c != '*') { + String sqlpriv; + String grantable; + if (i < privs.length() - 1 && privs.charAt(i + 1) == '*') { + grantable = "YES"; + } else { + grantable = "NO"; + } + switch (c) { + case 'a': + sqlpriv = "INSERT"; + break; + case 'r': + case 'p': + sqlpriv = "SELECT"; + break; + case 'w': + sqlpriv = "UPDATE"; + break; + case 'd': + sqlpriv = "DELETE"; + break; + case 'D': + sqlpriv = "TRUNCATE"; + break; + case 'R': + sqlpriv = "RULE"; + break; + case 'x': + sqlpriv = "REFERENCES"; + break; + case 't': + sqlpriv = "TRIGGER"; + break; + // the following can't be granted to a table, but + // we'll keep them for completeness. + case 'X': + sqlpriv = "EXECUTE"; + break; + case 'U': + sqlpriv = "USAGE"; + break; + case 'C': + sqlpriv = "CREATE"; + break; + case 'T': + sqlpriv = "CREATE TEMP"; + break; + default: + sqlpriv = "UNKNOWN"; + } + + Map> usersWithPermission = privileges.get(sqlpriv); + if (usersWithPermission == null) { + usersWithPermission = new HashMap<>(); + privileges.put(sqlpriv, usersWithPermission); + } + + List permissionByGrantor = usersWithPermission.get(user); + if (permissionByGrantor == null) { + permissionByGrantor = new ArrayList<>(); + usersWithPermission.put(user, permissionByGrantor); + } + + String[] grant = {grantor, grantable}; + permissionByGrantor.add(grant); + } + } + } + + /** + * Take the a String representing an array of ACLs and return a Map mapping the SQL permission + * name to a List of usernames who have that permission. + * For instance: {@code SELECT -> user1 -> list of [grantor, grantable]} + * + * @param aclArray ACL array + * @param owner owner + * @return a Map mapping the SQL permission name + */ + public Map>> parseACL(String aclArray, + String owner) { + if (aclArray == null) { + // arwdxt -- 8.2 Removed the separate RULE permission + // arwdDxt -- 8.4 Added a separate TRUNCATE permission + String perms = connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "arwdDxt" : "arwdxt"; + + aclArray = "{" + owner + "=" + perms + "/" + owner + "}"; + } + + List acls = parseACLArray(aclArray); + Map>> privileges = + new HashMap<>(); + for (String acl : acls) { + addACLPrivileges(acl, privileges); + } + return privileges; + } + + @Override + public ResultSet getBestRowIdentifier( + String catalog, String schema, String table, + int scope, boolean nullable) throws SQLException { + Field[] f = new Field[8]; + List v = new ArrayList<>(); // The new ResultSet tuple stuff + + f[0] = new Field("SCOPE", Oid.INT2); + f[1] = new Field("COLUMN_NAME", Oid.VARCHAR); + f[2] = new Field("DATA_TYPE", Oid.INT2); + f[3] = new Field("TYPE_NAME", Oid.VARCHAR); + f[4] = new Field("COLUMN_SIZE", Oid.INT4); + f[5] = new Field("BUFFER_LENGTH", Oid.INT4); + f[6] = new Field("DECIMAL_DIGITS", Oid.INT2); + f[7] = new Field("PSEUDO_COLUMN", Oid.INT2); + + /* + * At the moment this simply returns a table's primary key, if there is one. I believe other + * unique indexes, ctid, and oid should also be considered. -KJ + */ + + String sql; + sql = "SELECT a.attname, a.atttypid, atttypmod " + + "FROM pg_catalog.pg_class ct " + + " JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) " + + " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) " + + " JOIN (SELECT i.indexrelid, i.indrelid, i.indisprimary, " + + " information_schema._pg_expandarray(i.indkey) AS keys " + + " FROM pg_catalog.pg_index i) i " + + " ON (a.attnum = (i.keys).x AND a.attrelid = i.indrelid) " + + "WHERE true "; + + if (schema != null && !schema.isEmpty()) { + sql += " AND n.nspname = " + escapeQuotes(schema); + } + + sql += " AND ct.relname = " + escapeQuotes(table) + + " AND i.indisprimary " + + " ORDER BY a.attnum "; + + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) { + byte[] [] tuple = new byte[8][]; + int typeOid = (int) rs.getLong("atttypid"); + int sqlType = connection.getTypeInfo().getSQLType(typeOid); + int typeMod = rs.getInt("atttypmod"); + int decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod); + int columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod); + if ( sqlType != Types.NUMERIC && columnSize == 0) { + columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod); + } + tuple[0] = connection.encodeString(Integer.toString(scope)); + tuple[1] = rs.getBytes("attname"); + tuple[2] = + connection.encodeString(Integer.toString(sqlType)); + tuple[3] = connection.encodeString(connection.getTypeInfo().getPGType(typeOid)); + tuple[4] = connection.encodeString(Integer.toString(columnSize)); + tuple[5] = null; // unused + tuple[6] = connection.encodeString(Integer.toString(decimalDigits)); + tuple[7] = + connection.encodeString(Integer.toString(DatabaseMetaData.bestRowNotPseudo)); + v.add(new Tuple(tuple)); + } + rs.close(); + stmt.close(); + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getVersionColumns( + String catalog, String schema, String table) + throws SQLException { + Field[] f = new Field[8]; + List v = new ArrayList<>(); // The new ResultSet tuple stuff + + f[0] = new Field("SCOPE", Oid.INT2); + f[1] = new Field("COLUMN_NAME", Oid.VARCHAR); + f[2] = new Field("DATA_TYPE", Oid.INT2); + f[3] = new Field("TYPE_NAME", Oid.VARCHAR); + f[4] = new Field("COLUMN_SIZE", Oid.INT4); + f[5] = new Field("BUFFER_LENGTH", Oid.INT4); + f[6] = new Field("DECIMAL_DIGITS", Oid.INT2); + f[7] = new Field("PSEUDO_COLUMN", Oid.INT2); + + byte[] [] tuple = new byte[8][]; + + /* + * Postgresql does not have any column types that are automatically updated like some databases' + * timestamp type. We can't tell what rules or triggers might be doing, so we are left with the + * system columns that change on an update. An update may change all of the following system + * columns: ctid, xmax, xmin, cmax, and cmin. Depending on if we are in a transaction and + * whether we roll it back or not the only guaranteed change is to ctid. -KJ + */ + + tuple[0] = null; + tuple[1] = connection.encodeString("ctid"); + tuple[2] = + connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType("tid"))); + tuple[3] = connection.encodeString("tid"); + tuple[4] = null; + tuple[5] = null; + tuple[6] = null; + tuple[7] = + connection.encodeString(Integer.toString(DatabaseMetaData.versionColumnPseudo)); + v.add(new Tuple(tuple)); + + /* + * Perhaps we should check that the given catalog.schema.table actually exists. -KJ + */ + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getPrimaryKeys(String catalog, String schema, String table) + throws SQLException { + String sql; + sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, " + + " ct.relname AS TABLE_NAME, a.attname AS COLUMN_NAME, " + + " (information_schema._pg_expandarray(i.indkey)).n AS KEY_SEQ, ci.relname AS PK_NAME, " + + " information_schema._pg_expandarray(i.indkey) AS KEYS, a.attnum AS A_ATTNUM " + + "FROM pg_catalog.pg_class ct " + + " JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) " + + " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) " + + " JOIN pg_catalog.pg_index i ON ( a.attrelid = i.indrelid) " + + " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) " + + "WHERE true "; + + if (schema != null && !schema.isEmpty()) { + sql += " AND n.nspname = " + escapeQuotes(schema); + } + + if (table != null && !table.isEmpty()) { + sql += " AND ct.relname = " + escapeQuotes(table); + } + + sql += " AND i.indisprimary "; + sql = "SELECT " + + " result.TABLE_CAT, " + + " result.TABLE_SCHEM, " + + " result.TABLE_NAME, " + + " result.COLUMN_NAME, " + + " result.KEY_SEQ, " + + " result.PK_NAME " + + "FROM " + + " (" + sql + " ) result" + + " where " + + " result.A_ATTNUM = (result.KEYS).x "; + sql += " ORDER BY result.table_name, result.pk_name, result.key_seq"; + + return createMetaDataStatement().executeQuery(sql); + } + + /* + This is for internal use only to see if a resultset is updateable. + Unique keys can also be used so we add them to the query. + */ + protected ResultSet getPrimaryUniqueKeys(String catalog, String schema, String table) + throws SQLException { + String sql; + sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, " + + " ct.relname AS TABLE_NAME, a.attname AS COLUMN_NAME, " + + " (information_schema._pg_expandarray(i.indkey)).n AS KEY_SEQ, ci.relname AS PK_NAME, " + + " information_schema._pg_expandarray(i.indkey) AS KEYS, a.attnum AS A_ATTNUM, " + + " a.attnotnull AS IS_NOT_NULL " + + "FROM pg_catalog.pg_class ct " + + " JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) " + + " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) " + + " JOIN pg_catalog.pg_index i ON ( a.attrelid = i.indrelid) " + + " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) " + // primary as well as unique keys can be used to uniquely identify a row to update + + "WHERE (i.indisprimary OR ( " + + " i.indisunique " + + " AND i.indisvalid " + // partial indexes are not allowed - indpred will not be null if this is a partial index + + " AND i.indpred IS NULL " + // indexes with expressions are not allowed + + " AND i.indexprs IS NULL " + + " )) "; + + if (schema != null && !schema.isEmpty()) { + sql += " AND n.nspname = " + escapeQuotes(schema); + } + + if (table != null && !table.isEmpty()) { + sql += " AND ct.relname = " + escapeQuotes(table); + } + + sql = "SELECT " + + " result.TABLE_CAT, " + + " result.TABLE_SCHEM, " + + " result.TABLE_NAME, " + + " result.COLUMN_NAME, " + + " result.KEY_SEQ, " + + " result.PK_NAME, " + + " result.IS_NOT_NULL " + + "FROM " + + " (" + sql + " ) result" + + " where " + + " result.A_ATTNUM = (result.KEYS).x "; + sql += " ORDER BY result.table_name, result.pk_name, result.key_seq"; + + return createMetaDataStatement().executeQuery(sql); + } + + /** + * @param primaryCatalog primary catalog + * @param primarySchema primary schema + * @param primaryTable if provided will get the keys exported by this table + * @param foreignCatalog foreign catalog + * @param foreignSchema foreign schema + * @param foreignTable if provided will get the keys imported by this table + * @return ResultSet + * @throws SQLException if something wrong happens + */ + protected ResultSet getImportedExportedKeys( + String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) + throws SQLException { + + /* + * The addition of the pg_constraint in 7.3 table should have really helped us out here, but it + * comes up just a bit short. - The conkey, confkey columns aren't really useful without + * contrib/array unless we want to issues separate queries. - Unique indexes that can support + * foreign keys are not necessarily added to pg_constraint. Also multiple unique indexes + * covering the same keys can be created which make it difficult to determine the PK_NAME field. + */ + + String sql = + "SELECT NULL::text AS PKTABLE_CAT, pkn.nspname AS PKTABLE_SCHEM, pkc.relname AS PKTABLE_NAME, pka.attname AS PKCOLUMN_NAME, " + + "NULL::text AS FKTABLE_CAT, fkn.nspname AS FKTABLE_SCHEM, fkc.relname AS FKTABLE_NAME, fka.attname AS FKCOLUMN_NAME, " + + "pos.n AS KEY_SEQ, " + + "CASE con.confupdtype " + + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade + + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull + + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault + + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict + + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict + + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction + + " ELSE NULL END AS UPDATE_RULE, " + + "CASE con.confdeltype " + + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade + + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull + + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault + + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict + + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict + + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction + + " ELSE NULL END AS DELETE_RULE, " + + "con.conname AS FK_NAME, pkic.relname AS PK_NAME, " + + "CASE " + + " WHEN con.condeferrable AND con.condeferred THEN " + + DatabaseMetaData.importedKeyInitiallyDeferred + + " WHEN con.condeferrable THEN " + DatabaseMetaData.importedKeyInitiallyImmediate + + " ELSE " + DatabaseMetaData.importedKeyNotDeferrable + + " END AS DEFERRABILITY " + + " FROM " + + " pg_catalog.pg_namespace pkn, pg_catalog.pg_class pkc, pg_catalog.pg_attribute pka, " + + " pg_catalog.pg_namespace fkn, pg_catalog.pg_class fkc, pg_catalog.pg_attribute fka, " + + " pg_catalog.pg_constraint con, " + + " pg_catalog.generate_series(1, " + getMaxIndexKeys() + ") pos(n), " + + " pg_catalog.pg_class pkic"; + // Starting in Postgres 9.0, pg_constraint was augmented with the conindid column, which + // contains the oid of the index supporting the constraint. This makes it unnecessary to do a + // further join on pg_depend. + if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) { + sql += ", pg_catalog.pg_depend dep "; + } + sql += + " WHERE pkn.oid = pkc.relnamespace AND pkc.oid = pka.attrelid AND pka.attnum = con.confkey[pos.n] AND con.confrelid = pkc.oid " + + " AND fkn.oid = fkc.relnamespace AND fkc.oid = fka.attrelid AND fka.attnum = con.conkey[pos.n] AND con.conrelid = fkc.oid " + + " AND con.contype = 'f' "; + /* + In version 11 we added Partitioned indexes indicated by relkind = 'I' + I could have done this using lower(relkind) = 'i' but chose to be explicit + for clarity + */ + + if (!connection.haveMinimumServerVersion(ServerVersion.v11)) { + sql += "AND pkic.relkind = 'i' "; + } else { + sql += "AND (pkic.relkind = 'i' OR pkic.relkind = 'I')"; + } + + if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) { + sql += " AND con.oid = dep.objid AND pkic.oid = dep.refobjid AND dep.classid = 'pg_constraint'::regclass::oid AND dep.refclassid = 'pg_class'::regclass::oid "; + } else { + sql += " AND pkic.oid = con.conindid "; + } + + if (primarySchema != null && !primarySchema.isEmpty()) { + sql += " AND pkn.nspname = " + escapeQuotes(primarySchema); + } + if (foreignSchema != null && !foreignSchema.isEmpty()) { + sql += " AND fkn.nspname = " + escapeQuotes(foreignSchema); + } + if (primaryTable != null && !primaryTable.isEmpty()) { + sql += " AND pkc.relname = " + escapeQuotes(primaryTable); + } + if (foreignTable != null && !foreignTable.isEmpty()) { + sql += " AND fkc.relname = " + escapeQuotes(foreignTable); + } + + if (primaryTable != null) { + sql += " ORDER BY fkn.nspname,fkc.relname,con.conname,pos.n"; + } else { + sql += " ORDER BY pkn.nspname,pkc.relname, con.conname,pos.n"; + } + + return createMetaDataStatement().executeQuery(sql); + } + + @Override + public ResultSet getImportedKeys(String catalog, String schema, String table) + throws SQLException { + return getImportedExportedKeys(null, null, null, catalog, schema, table); + } + + @Override + public ResultSet getExportedKeys(String catalog, String schema, String table) + throws SQLException { + return getImportedExportedKeys(catalog, schema, table, null, null, null); + } + + @Override + public ResultSet getCrossReference( + String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) + throws SQLException { + return getImportedExportedKeys(primaryCatalog, primarySchema, primaryTable, foreignCatalog, + foreignSchema, foreignTable); + } + + @Override + public ResultSet getTypeInfo() throws SQLException { + + Field[] f = new Field[18]; + List v = new ArrayList<>(); // The new ResultSet tuple stuff + + f[0] = new Field("TYPE_NAME", Oid.VARCHAR); + f[1] = new Field("DATA_TYPE", Oid.INT2); + f[2] = new Field("PRECISION", Oid.INT4); + f[3] = new Field("LITERAL_PREFIX", Oid.VARCHAR); + f[4] = new Field("LITERAL_SUFFIX", Oid.VARCHAR); + f[5] = new Field("CREATE_PARAMS", Oid.VARCHAR); + f[6] = new Field("NULLABLE", Oid.INT2); + f[7] = new Field("CASE_SENSITIVE", Oid.BOOL); + f[8] = new Field("SEARCHABLE", Oid.INT2); + f[9] = new Field("UNSIGNED_ATTRIBUTE", Oid.BOOL); + f[10] = new Field("FIXED_PREC_SCALE", Oid.BOOL); + f[11] = new Field("AUTO_INCREMENT", Oid.BOOL); + f[12] = new Field("LOCAL_TYPE_NAME", Oid.VARCHAR); + f[13] = new Field("MINIMUM_SCALE", Oid.INT2); + f[14] = new Field("MAXIMUM_SCALE", Oid.INT2); + f[15] = new Field("SQL_DATA_TYPE", Oid.INT4); + f[16] = new Field("SQL_DATETIME_SUB", Oid.INT4); + f[17] = new Field("NUM_PREC_RADIX", Oid.INT4); + + String sql; + sql = "SELECT t.typname,t.oid FROM pg_catalog.pg_type t" + + " JOIN pg_catalog.pg_namespace n ON (t.typnamespace = n.oid) " + + " WHERE n.nspname != 'pg_toast'" + + " AND " + + " (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))"; + + if (connection.getHideUnprivilegedObjects() && connection.haveMinimumServerVersion(ServerVersion.v9_2)) { + sql += " AND has_type_privilege(t.oid, 'USAGE')"; + } + + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + // cache some results, this will keep memory usage down, and speed + // things up a little. + byte[] bZero = connection.encodeString("0"); + byte[] b10 = connection.encodeString("10"); + byte[] bf = connection.encodeString("f"); + byte[] bt = connection.encodeString("t"); + byte[] bliteral = connection.encodeString("'"); + byte[] bNullable = + connection.encodeString(Integer.toString(DatabaseMetaData.typeNullable)); + byte[] bSearchable = + connection.encodeString(Integer.toString(DatabaseMetaData.typeSearchable)); + + TypeInfo ti = connection.getTypeInfo(); + if (ti instanceof TypeInfoCache) { + ((TypeInfoCache) ti).cacheSQLTypes(); + } + + while (rs.next()) { + byte[] [] tuple = new byte[19][]; + String typname = rs.getString(1); + int typeOid = (int) rs.getLong(2); + + tuple[0] = connection.encodeString(typname); + int sqlType = connection.getTypeInfo().getSQLType(typname); + tuple[1] = + connection.encodeString(Integer.toString(sqlType)); + + /* this is just for sorting below, the result set never sees this */ + tuple[18] = BigInteger.valueOf(sqlType).toByteArray(); + + tuple[2] = connection + .encodeString(Integer.toString(connection.getTypeInfo().getMaximumPrecision(typeOid))); + + // Using requiresQuoting(oid) would might trigger select statements that might fail with NPE + // if oid in question is being dropped. + // requiresQuotingSqlType is not bulletproof, however, it solves the most visible NPE. + if (connection.getTypeInfo().requiresQuotingSqlType(sqlType)) { + tuple[3] = bliteral; + tuple[4] = bliteral; + } + + tuple[6] = bNullable; // all types can be null + tuple[7] = connection.getTypeInfo().isCaseSensitive(typeOid) ? bt : bf; + tuple[8] = bSearchable; // any thing can be used in the WHERE clause + tuple[9] = connection.getTypeInfo().isSigned(typeOid) ? bf : bt; + tuple[10] = bf; // false for now - must handle money + tuple[11] = bf; // false - it isn't autoincrement + tuple[13] = bZero; // min scale is zero + // only numeric can supports a scale. + tuple[14] = typeOid == Oid.NUMERIC ? connection.encodeString("1000") : bZero; + + // 12 - LOCAL_TYPE_NAME is null + // 15 & 16 are unused so we return null + tuple[17] = b10; // everything is base 10 + v.add(new Tuple(tuple)); + + // add pseudo-type serial, bigserial, smallserial + if ("int4".equals(typname)) { + byte[] [] tuple1 = tuple.clone(); + + tuple1[0] = connection.encodeString("serial"); + tuple1[11] = bt; + v.add(new Tuple(tuple1)); + } else if ("int8".equals(typname)) { + byte[] [] tuple1 = tuple.clone(); + + tuple1[0] = connection.encodeString("bigserial"); + tuple1[11] = bt; + v.add(new Tuple(tuple1)); + } else if ("int2".equals(typname) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) { + byte[] [] tuple1 = tuple.clone(); + + tuple1[0] = connection.encodeString("smallserial"); + tuple1[11] = bt; + v.add(new Tuple(tuple1)); + } + + } + rs.close(); + stmt.close(); + + Collections.sort(v, new Comparator() { + @Override + public int compare(Tuple o1, Tuple o2) { + int i1 = ByteConverter.bytesToInt(o1.get(18)); + int i2 = ByteConverter.bytesToInt(o2.get(18)); + return i1 < i2 ? -1 : (i1 == i2 ? 0 : 1); + } + }); + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getIndexInfo( + String catalog, String schema, String tableName, + boolean unique, boolean approximate) throws SQLException { + /* + * This is a complicated function because we have three possible situations: <= 7.2 no schemas, + * single column functional index 7.3 schemas, single column functional index >= 7.4 schemas, + * multi-column expressional index >= 8.3 supports ASC/DESC column info >= 9.0 no longer renames + * index columns on a table column rename, so we must look at the table attribute names + * + * with the single column functional index we need an extra join to the table's pg_attribute + * data to get the column the function operates on. + */ + String sql; + if (connection.haveMinimumServerVersion(ServerVersion.v8_3)) { + sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, " + + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, " + + " NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, " + + " CASE i.indisclustered " + + " WHEN true THEN " + DatabaseMetaData.tableIndexClustered + + " ELSE CASE am.amname " + + " WHEN 'hash' THEN " + DatabaseMetaData.tableIndexHashed + + " ELSE " + DatabaseMetaData.tableIndexOther + + " END " + + " END AS TYPE, " + + " (information_schema._pg_expandarray(i.indkey)).n AS ORDINAL_POSITION, " + + " ci.reltuples AS CARDINALITY, " + + " ci.relpages AS PAGES, " + + " pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION, " + + " ci.oid AS CI_OID, " + + " i.indoption AS I_INDOPTION, " + + (connection.haveMinimumServerVersion(ServerVersion.v9_6) ? " am.amname AS AM_NAME " : " am.amcanorder AS AM_CANORDER ") + + "FROM pg_catalog.pg_class ct " + + " JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) " + + " JOIN pg_catalog.pg_index i ON (ct.oid = i.indrelid) " + + " JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) " + + " JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) " + + "WHERE true "; + + if (schema != null && !schema.isEmpty()) { + sql += " AND n.nspname = " + escapeQuotes(schema); + } + + sql += " AND ct.relname = " + escapeQuotes(tableName); + + if (unique) { + sql += " AND i.indisunique "; + } + + sql = "SELECT " + + " tmp.TABLE_CAT, " + + " tmp.TABLE_SCHEM, " + + " tmp.TABLE_NAME, " + + " tmp.NON_UNIQUE, " + + " tmp.INDEX_QUALIFIER, " + + " tmp.INDEX_NAME, " + + " tmp.TYPE, " + + " tmp.ORDINAL_POSITION, " + + " trim(both '\"' from pg_catalog.pg_get_indexdef(tmp.CI_OID, tmp.ORDINAL_POSITION, false)) AS COLUMN_NAME, " + + (connection.haveMinimumServerVersion(ServerVersion.v9_6) + ? " CASE tmp.AM_NAME " + + " WHEN 'btree' THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1::smallint " + + " WHEN 1 THEN 'D' " + + " ELSE 'A' " + + " END " + + " ELSE NULL " + + " END AS ASC_OR_DESC, " + : " CASE tmp.AM_CANORDER " + + " WHEN true THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1::smallint " + + " WHEN 1 THEN 'D' " + + " ELSE 'A' " + + " END " + + " ELSE NULL " + + " END AS ASC_OR_DESC, ") + + " tmp.CARDINALITY, " + + " tmp.PAGES, " + + " tmp.FILTER_CONDITION " + + "FROM (" + + sql + + ") AS tmp"; + } else { + String select; + String from; + String where; + + select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "; + from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, " + + " pg_catalog.pg_attribute a, pg_catalog.pg_am am "; + where = " AND n.oid = ct.relnamespace "; + from += ", pg_catalog.pg_index i "; + + if (schema != null && !schema.isEmpty()) { + where += " AND n.nspname = " + escapeQuotes(schema); + } + + sql = select + + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, " + + " CASE i.indisclustered " + + " WHEN true THEN " + DatabaseMetaData.tableIndexClustered + + " ELSE CASE am.amname " + + " WHEN 'hash' THEN " + DatabaseMetaData.tableIndexHashed + + " ELSE " + DatabaseMetaData.tableIndexOther + + " END " + + " END AS TYPE, " + + " a.attnum AS ORDINAL_POSITION, " + + " CASE WHEN i.indexprs IS NULL THEN a.attname " + + " ELSE pg_catalog.pg_get_indexdef(ci.oid,a.attnum,false) END AS COLUMN_NAME, " + + " NULL AS ASC_OR_DESC, " + + " ci.reltuples AS CARDINALITY, " + + " ci.relpages AS PAGES, " + + " pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION " + + from + + " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid AND a.attrelid=ci.oid AND ci.relam=am.oid " + + where; + + sql += " AND ct.relname = " + escapeQuotes(tableName); + + if (unique) { + sql += " AND i.indisunique "; + } + } + + sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION "; + + return ((PgResultSet) createMetaDataStatement().executeQuery(sql)).upperCaseFieldLabels(); + } + + // ** JDBC 2 Extensions ** + + @Override + public boolean supportsResultSetType(int type) throws SQLException { + // The only type we don't support + return type != ResultSet.TYPE_SCROLL_SENSITIVE; + } + + @Override + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + // These combinations are not supported! + if (type == ResultSet.TYPE_SCROLL_SENSITIVE) { + return false; + } + + // We do support Updateable ResultSets + if (concurrency == ResultSet.CONCUR_UPDATABLE) { + return true; + } + + // Everything else we do + return true; + } + + /* lots of unsupported stuff... */ + @Override + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return true; + } + + @Override + public boolean ownDeletesAreVisible(int type) throws SQLException { + return true; + } + + @Override + public boolean ownInsertsAreVisible(int type) throws SQLException { + // indicates that + return true; + } + + @Override + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersDeletesAreVisible(int i) throws SQLException { + return false; + } + + @Override + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean deletesAreDetected(int i) throws SQLException { + return false; + } + + @Override + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean supportsBatchUpdates() throws SQLException { + return true; + } + + @Override + public ResultSet getUDTs(String catalog, String schemaPattern, + String typeNamePattern, int [] types) throws SQLException { + String sql = "select " + + "null as type_cat, n.nspname as type_schem, t.typname as type_name, null as class_name, " + + "CASE WHEN t.typtype='c' then " + Types.STRUCT + " else " + + Types.DISTINCT + + " end as data_type, pg_catalog.obj_description(t.oid, 'pg_type') " + + "as remarks, CASE WHEN t.typtype = 'd' then (select CASE"; + TypeInfo typeInfo = connection.getTypeInfo(); + + StringBuilder sqlwhen = new StringBuilder(); + for (Iterator i = typeInfo.getPGTypeOidsWithSQLTypes(); i.hasNext(); ) { + Integer typOid = i.next(); + // NB: Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers. + // We must therefore map it to a positive long value before writing it into the query, + // or we'll be unable to correctly handle ~ half of the oid space. + long longTypOid = typeInfo.intOidToLong(typOid); + int sqlType = typeInfo.getSQLType(typOid); + + sqlwhen.append(" when base_type.oid = ").append(longTypOid).append(" then ").append(sqlType); + } + sql += sqlwhen.toString(); + + sql += " else " + Types.OTHER + " end from pg_type base_type where base_type.oid=t.typbasetype) " + + "else null end as base_type " + + "from pg_catalog.pg_type t, pg_catalog.pg_namespace n where t.typnamespace = n.oid and n.nspname != 'pg_catalog' and n.nspname != 'pg_toast'"; + + StringBuilder toAdd = new StringBuilder(); + if (types != null) { + toAdd.append(" and (false "); + for (int type : types) { + if (type == Types.STRUCT) { + toAdd.append(" or t.typtype = 'c'"); + } else if (type == Types.DISTINCT) { + toAdd.append(" or t.typtype = 'd'"); + } + } + toAdd.append(" ) "); + } else { + toAdd.append(" and t.typtype IN ('c','d') "); + } + // spec says that if typeNamePattern is a fully qualified name + // then the schema and catalog are ignored + + if (typeNamePattern != null) { + // search for qualifier + int firstQualifier = typeNamePattern.indexOf('.'); + int secondQualifier = typeNamePattern.lastIndexOf('.'); + + if (firstQualifier != -1) { + // if one of them is -1 they both will be + if (firstQualifier != secondQualifier) { + // we have a catalog.schema.typename, ignore catalog + schemaPattern = typeNamePattern.substring(firstQualifier + 1, secondQualifier); + } else { + // we just have a schema.typename + schemaPattern = typeNamePattern.substring(0, firstQualifier); + } + // strip out just the typeName + typeNamePattern = typeNamePattern.substring(secondQualifier + 1); + } + toAdd.append(" and t.typname like ").append(escapeQuotes(typeNamePattern)); + } + + // schemaPattern may have been modified above + if (schemaPattern != null) { + toAdd.append(" and n.nspname like ").append(escapeQuotes(schemaPattern)); + } + sql += toAdd.toString(); + + if (connection.getHideUnprivilegedObjects() + && connection.haveMinimumServerVersion(ServerVersion.v9_2)) { + sql += " AND has_type_privilege(t.oid, 'USAGE')"; + } + + sql += " order by data_type, type_schem, type_name"; + return createMetaDataStatement().executeQuery(sql); + } + + @Override + public Connection getConnection() throws SQLException { + return connection; + } + + protected Statement createMetaDataStatement() throws SQLException { + return connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_READ_ONLY); + } + + @Override + public long getMaxLogicalLobSize() throws SQLException { + return 0; + } + + @Override + public boolean supportsRefCursors() throws SQLException { + return true; + } + + @Override + public RowIdLifetime getRowIdLifetime() throws SQLException { + throw Driver.notImplemented(this.getClass(), "getRowIdLifetime()"); + } + + @Override + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return true; + } + + @Override + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + @Override + public ResultSet getClientInfoProperties() throws SQLException { + Field[] f = new Field[4]; + f[0] = new Field("NAME", Oid.VARCHAR); + f[1] = new Field("MAX_LEN", Oid.INT4); + f[2] = new Field("DEFAULT_VALUE", Oid.VARCHAR); + f[3] = new Field("DESCRIPTION", Oid.VARCHAR); + + List v = new ArrayList<>(); + + if (connection.haveMinimumServerVersion(ServerVersion.v9_0)) { + byte[] [] tuple = new byte[4][]; + tuple[0] = connection.encodeString("ApplicationName"); + tuple[1] = connection.encodeString(Integer.toString(getMaxNameLength())); + tuple[2] = connection.encodeString(""); + tuple[3] = connection + .encodeString("The name of the application currently utilizing the connection."); + v.add(new Tuple(tuple)); + } + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } + + @Override + public ResultSet getFunctions(String catalog, String schemaPattern, + String functionNamePattern) + throws SQLException { + + // The pg_get_function_result only exists 8.4 or later + boolean pgFuncResultExists = connection.haveMinimumServerVersion(ServerVersion.v8_4); + + // Use query that support pg_get_function_result to get function result, else unknown is defaulted + String funcTypeSql = DatabaseMetaData.functionResultUnknown + " "; + if (pgFuncResultExists) { + funcTypeSql = " CASE " + + " WHEN (format_type(p.prorettype, null) = 'unknown') THEN " + DatabaseMetaData.functionResultUnknown + + " WHEN " + + " (substring(pg_get_function_result(p.oid) from 0 for 6) = 'TABLE') OR " + + " (substring(pg_get_function_result(p.oid) from 0 for 6) = 'SETOF') THEN " + DatabaseMetaData.functionReturnsTable + + " ELSE " + DatabaseMetaData.functionNoTable + + " END "; + } + + // Build query and result + String sql; + sql = "SELECT current_database() AS FUNCTION_CAT, n.nspname AS FUNCTION_SCHEM, p.proname AS FUNCTION_NAME, " + + " d.description AS REMARKS, " + + funcTypeSql + " AS FUNCTION_TYPE, " + + " p.proname || '_' || p.oid AS SPECIFIC_NAME " + + "FROM pg_catalog.pg_proc p " + + "INNER JOIN pg_catalog.pg_namespace n ON p.pronamespace=n.oid " + + "LEFT JOIN pg_catalog.pg_description d ON p.oid=d.objoid " + + "WHERE true "; + + if (connection.haveMinimumServerVersion(ServerVersion.v11)) { + sql += " AND p.prokind='f'"; + } + /* + if the user provides a schema then search inside the schema for it + */ + if (schemaPattern != null && !schemaPattern.isEmpty()) { + sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); + } + if (functionNamePattern != null && !functionNamePattern.isEmpty()) { + sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern); + } + if (connection.getHideUnprivilegedObjects()) { + sql += " AND has_function_privilege(p.oid,'EXECUTE')"; + } + sql += " ORDER BY FUNCTION_SCHEM, FUNCTION_NAME, p.oid::text "; + + return createMetaDataStatement().executeQuery(sql); + } + + @Override + public ResultSet getFunctionColumns(String catalog, String schemaPattern, + String functionNamePattern, String columnNamePattern) + throws SQLException { + int columns = 17; + + Field[] f = new Field[columns]; + List v = new ArrayList<>(); + + f[0] = new Field("FUNCTION_CAT", Oid.VARCHAR); + f[1] = new Field("FUNCTION_SCHEM", Oid.VARCHAR); + f[2] = new Field("FUNCTION_NAME", Oid.VARCHAR); + f[3] = new Field("COLUMN_NAME", Oid.VARCHAR); + f[4] = new Field("COLUMN_TYPE", Oid.INT2); + f[5] = new Field("DATA_TYPE", Oid.INT2); + f[6] = new Field("TYPE_NAME", Oid.VARCHAR); + f[7] = new Field("PRECISION", Oid.INT2); + f[8] = new Field("LENGTH", Oid.INT4); + f[9] = new Field("SCALE", Oid.INT2); + f[10] = new Field("RADIX", Oid.INT2); + f[11] = new Field("NULLABLE", Oid.INT2); + f[12] = new Field("REMARKS", Oid.VARCHAR); + f[13] = new Field("CHAR_OCTET_LENGTH", Oid.INT4); + f[14] = new Field("ORDINAL_POSITION", Oid.INT4); + f[15] = new Field("IS_NULLABLE", Oid.VARCHAR); + f[16] = new Field("SPECIFIC_NAME", Oid.VARCHAR); + + String sql; + sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype,t.typrelid, " + + " p.proargnames, p.proargmodes, p.proallargtypes, p.oid " + + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n, pg_catalog.pg_type t " + + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid "; + if (schemaPattern != null && !schemaPattern.isEmpty()) { + sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern); + } + if (functionNamePattern != null && !functionNamePattern.isEmpty()) { + sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern); + } + sql += " ORDER BY n.nspname, p.proname, p.oid::text "; + + byte[] isnullableUnknown = new byte[0]; + + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) { + byte[] schema = rs.getBytes("nspname"); + byte[] functionName = rs.getBytes("proname"); + byte[] specificName = + connection.encodeString(rs.getString("proname") + "_" + rs.getString("oid")); + int returnType = (int) rs.getLong("prorettype"); + String returnTypeType = rs.getString("typtype"); + int returnTypeRelid = (int) rs.getLong("typrelid"); + + String strArgTypes = rs.getString("proargtypes"); + StringTokenizer st = new StringTokenizer(strArgTypes); + List argTypes = new ArrayList<>(); + while (st.hasMoreTokens()) { + argTypes.add(Long.valueOf(st.nextToken())); + } + + String[] argNames = null; + Array argNamesArray = rs.getArray("proargnames"); + if (argNamesArray != null) { + argNames = (String[]) argNamesArray.getArray(); + } + + String[] argModes = null; + Array argModesArray = rs.getArray("proargmodes"); + if (argModesArray != null) { + argModes = (String[]) argModesArray.getArray(); + } + + int numArgs = argTypes.size(); + + Long[] allArgTypes = null; + Array allArgTypesArray = rs.getArray("proallargtypes"); + if (allArgTypesArray != null) { + allArgTypes = (Long[]) allArgTypesArray.getArray(); + numArgs = allArgTypes.length; + } + + // decide if we are returning a single column result. + if ("b".equals(returnTypeType) || "d".equals(returnTypeType) || "e".equals(returnTypeType) + || ("p".equals(returnTypeType) && argModesArray == null)) { + byte[] [] tuple = new byte[columns][]; + tuple[0] = null; + tuple[1] = schema; + tuple[2] = functionName; + tuple[3] = connection.encodeString("returnValue"); + tuple[4] = connection + .encodeString(Integer.toString(DatabaseMetaData.functionReturn)); + tuple[5] = connection + .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(returnType))); + tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(returnType)); + tuple[7] = null; + tuple[8] = null; + tuple[9] = null; + tuple[10] = null; + tuple[11] = connection + .encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown)); + tuple[12] = null; + tuple[14] = connection.encodeString(Integer.toString(0)); + tuple[15] = isnullableUnknown; + tuple[16] = specificName; + + v.add(new Tuple(tuple)); + } + + // Add a row for each argument. + for (int i = 0; i < numArgs; i++) { + byte[] [] tuple = new byte[columns][]; + tuple[0] = null; + tuple[1] = schema; + tuple[2] = functionName; + + if (argNames != null) { + tuple[3] = connection.encodeString(argNames[i]); + } else { + tuple[3] = connection.encodeString("$" + (i + 1)); + } + + int columnMode = DatabaseMetaData.functionColumnIn; + if (argModes != null && argModes[i] != null) { + if ("o".equals(argModes[i])) { + columnMode = DatabaseMetaData.functionColumnOut; + } else if ("b".equals(argModes[i])) { + columnMode = DatabaseMetaData.functionColumnInOut; + } else if ("t".equals(argModes[i])) { + columnMode = DatabaseMetaData.functionReturn; + } + } + + tuple[4] = connection.encodeString(Integer.toString(columnMode)); + + int argOid; + if (allArgTypes != null) { + argOid = allArgTypes[i].intValue(); + } else { + argOid = argTypes.get(i).intValue(); + } + + tuple[5] = + connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(argOid))); + tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(argOid)); + tuple[7] = null; + tuple[8] = null; + tuple[9] = null; + tuple[10] = null; + tuple[11] = + connection.encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown)); + tuple[12] = null; + tuple[14] = connection.encodeString(Integer.toString(i + 1)); + tuple[15] = isnullableUnknown; + tuple[16] = specificName; + + v.add(new Tuple(tuple)); + } + + // if we are returning a multi-column result. + if ("c".equals(returnTypeType) || ("p".equals(returnTypeType) && argModesArray != null)) { + String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a " + + " WHERE a.attrelid = " + returnTypeRelid + + " AND NOT a.attisdropped AND a.attnum > 0 ORDER BY a.attnum "; + Statement columnstmt = connection.createStatement(); + ResultSet columnrs = columnstmt.executeQuery(columnsql); + while (columnrs.next()) { + int columnTypeOid = (int) columnrs.getLong("atttypid"); + byte[] [] tuple = new byte[columns][]; + tuple[0] = null; + tuple[1] = schema; + tuple[2] = functionName; + tuple[3] = columnrs.getBytes("attname"); + tuple[4] = connection + .encodeString(Integer.toString(DatabaseMetaData.functionColumnResult)); + tuple[5] = connection + .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(columnTypeOid))); + tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(columnTypeOid)); + tuple[7] = null; + tuple[8] = null; + tuple[9] = null; + tuple[10] = null; + tuple[11] = connection + .encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown)); + tuple[12] = null; + tuple[14] = connection.encodeString(Integer.toString(0)); + tuple[15] = isnullableUnknown; + tuple[16] = specificName; + + v.add(new Tuple(tuple)); + } + columnrs.close(); + columnstmt.close(); + } + } + rs.close(); + stmt.close(); + + return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v); + } + + @Override + public ResultSet getPseudoColumns(String catalog, String schemaPattern, + String tableNamePattern, String columnNamePattern) + throws SQLException { + throw Driver.notImplemented(this.getClass(), + "getPseudoColumns(String, String, String, String)"); + } + + @Override + public boolean generatedKeyAlwaysReturned() throws SQLException { + return true; + } + + @Override + public boolean supportsSavepoints() throws SQLException { + return true; + } + + @Override + public boolean supportsNamedParameters() throws SQLException { + return false; + } + + @Override + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } + + @Override + public boolean supportsGetGeneratedKeys() throws SQLException { + // We don't support returning generated keys by column index, + // but that should be a rarer case than the ones we do support. + // + return true; + } + + @Override + public ResultSet getSuperTypes(String catalog, String schemaPattern, + String typeNamePattern) + throws SQLException { + throw Driver.notImplemented(this.getClass(), + "getSuperTypes(String,String,String)"); + } + + @Override + public ResultSet getSuperTables(String catalog, String schemaPattern, + String tableNamePattern) + throws SQLException { + throw Driver.notImplemented(this.getClass(), + "getSuperTables(String,String,String,String)"); + } + + @Override + public ResultSet getAttributes(String catalog, String schemaPattern, + String typeNamePattern, String attributeNamePattern) throws SQLException { + throw Driver.notImplemented(this.getClass(), + "getAttributes(String,String,String,String)"); + } + + @Override + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return true; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public int getDatabaseMajorVersion() throws SQLException { + return connection.getServerMajorVersion(); + } + + @Override + public int getDatabaseMinorVersion() throws SQLException { + return connection.getServerMinorVersion(); + } + + @Override + public int getJDBCMajorVersion() { + return DriverInfo.JDBC_MAJOR_VERSION; + } + + @Override + public int getJDBCMinorVersion() { + return DriverInfo.JDBC_MINOR_VERSION; + } + + @Override + public int getSQLStateType() throws SQLException { + return sqlStateSQL; + } + + @Override + public boolean locatorsUpdateCopy() throws SQLException { + /* + * Currently LOB's aren't updateable at all, so it doesn't matter what we return. We don't throw + * the notImplemented Exception because the 1.5 JDK's CachedRowSet calls this method regardless + * of whether large objects are used. + */ + return true; + } + + @Override + public boolean supportsStatementPooling() throws SQLException { + return false; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgParameterMetaData.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgParameterMetaData.java new file mode 100644 index 0000000..1b15b77 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgParameterMetaData.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.BaseConnection; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.ParameterMetaData; +import java.sql.SQLException; + +public class PgParameterMetaData implements ParameterMetaData { + + private final BaseConnection connection; + private final int[] oids; + + public PgParameterMetaData(BaseConnection connection, int[] oids) { + this.connection = connection; + this.oids = oids; + } + + @Override + public String getParameterClassName(int param) throws SQLException { + checkParamIndex(param); + return connection.getTypeInfo().getJavaClass(oids[param - 1]); + } + + @Override + public int getParameterCount() { + return oids.length; + } + + /** + * {@inheritDoc} For now report all parameters as inputs. CallableStatements may have one output, + * but ignore that for now. + */ + @Override + public int getParameterMode(int param) throws SQLException { + checkParamIndex(param); + return ParameterMetaData.parameterModeIn; + } + + @Override + public int getParameterType(int param) throws SQLException { + checkParamIndex(param); + return connection.getTypeInfo().getSQLType(oids[param - 1]); + } + + @Override + public String getParameterTypeName(int param) throws SQLException { + checkParamIndex(param); + return connection.getTypeInfo().getPGType(oids[param - 1]); + } + + // we don't know this + @Override + public int getPrecision(int param) throws SQLException { + checkParamIndex(param); + return 0; + } + + // we don't know this + @Override + public int getScale(int param) throws SQLException { + checkParamIndex(param); + return 0; + } + + // we can't tell anything about nullability + @Override + public int isNullable(int param) throws SQLException { + checkParamIndex(param); + return ParameterMetaData.parameterNullableUnknown; + } + + /** + * {@inheritDoc} PostgreSQL doesn't have unsigned numbers + */ + @Override + public boolean isSigned(int param) throws SQLException { + checkParamIndex(param); + return connection.getTypeInfo().isSigned(oids[param - 1]); + } + + private void checkParamIndex(int param) throws PSQLException { + if (param < 1 || param > oids.length) { + throw new PSQLException( + GT.tr("The parameter index is out of range: {0}, number of parameters: {1}.", + param, oids.length), + PSQLState.INVALID_PARAMETER_VALUE); + } + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgPreparedStatement.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgPreparedStatement.java new file mode 100644 index 0000000..6ed7b44 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgPreparedStatement.java @@ -0,0 +1,1800 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.CachedQuery; +import org.postgresql.core.Oid; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.ServerVersion; +import org.postgresql.core.TypeInfo; +import org.postgresql.core.v3.BatchedQuery; +import org.postgresql.largeobject.LargeObject; +import org.postgresql.largeobject.LargeObjectManager; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; +import org.postgresql.util.HStoreConverter; +import org.postgresql.util.PGBinaryObject; +import org.postgresql.util.PGTime; +import org.postgresql.util.PGTimestamp; +import org.postgresql.util.PGobject; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.ReaderInputStream; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.io.UnsupportedEncodingException; +import java.io.Writer; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; +import java.net.URL; +import java.nio.charset.Charset; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLType; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Map; +import java.util.TimeZone; +import java.util.UUID; + +@SuppressWarnings("try") +class PgPreparedStatement extends PgStatement implements PreparedStatement { + + protected final CachedQuery preparedQuery; // Query fragments for prepared statement. + protected final ParameterList preparedParameters; // Parameter values for prepared statement. + + private TimeZone defaultTimeZone; + + PgPreparedStatement(PgConnection connection, String sql, int rsType, int rsConcurrency, + int rsHoldability) throws SQLException { + this(connection, connection.borrowQuery(sql), rsType, rsConcurrency, rsHoldability); + } + + PgPreparedStatement(PgConnection connection, CachedQuery query, int rsType, + int rsConcurrency, int rsHoldability) throws SQLException { + super(connection, rsType, rsConcurrency, rsHoldability); + + this.preparedQuery = query; + this.preparedParameters = this.preparedQuery.query.createParameterList(); + int parameterCount = preparedParameters.getParameterCount(); + int maxSupportedParameters = maximumNumberOfParameters(); + if (parameterCount > maxSupportedParameters) { + throw new PSQLException( + GT.tr("PreparedStatement can have at most {0} parameters. Please consider using arrays, or splitting the query in several ones, or using COPY. Given query has {1} parameters", + maxSupportedParameters, + parameterCount), + PSQLState.INVALID_PARAMETER_VALUE); + } + + // TODO: this.wantsGeneratedKeysAlways = true; + + setPoolable(true); // As per JDBC spec: prepared and callable statements are poolable by + } + + final int maximumNumberOfParameters() { + return connection.getPreferQueryMode() == PreferQueryMode.SIMPLE ? Integer.MAX_VALUE : 65535; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + throw new PSQLException( + GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), + PSQLState.WRONG_OBJECT_TYPE); + } + + /* + * A Prepared SQL query is executed and its ResultSet is returned + * + * @return a ResultSet that contains the data produced by the * query - never null + * + * @exception SQLException if a database access error occurs + */ + @Override + public ResultSet executeQuery() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (!executeWithFlags(0)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + return getSingleResultSet(); + } + } + + @Override + public int executeUpdate(String sql) throws SQLException { + throw new PSQLException( + GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), + PSQLState.WRONG_OBJECT_TYPE); + } + + @Override + public int executeUpdate() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + executeWithFlags(QueryExecutor.QUERY_NO_RESULTS); + checkNoResultUpdate(); + return getUpdateCount(); + } + } + + @Override + public long executeLargeUpdate() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + executeWithFlags(QueryExecutor.QUERY_NO_RESULTS); + checkNoResultUpdate(); + return getLargeUpdateCount(); + } + } + + @Override + public boolean execute(String sql) throws SQLException { + throw new PSQLException( + GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), + PSQLState.WRONG_OBJECT_TYPE); + } + + @Override + public boolean execute() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + return executeWithFlags(0); + } + } + + @Override + public boolean executeWithFlags(int flags) throws SQLException { + try { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + + if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) { + flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; + } + + execute(preparedQuery, preparedParameters, flags); + + checkClosed(); + return result != null && result.getResultSet() != null; + } + } finally { + defaultTimeZone = null; + } + } + + @Override + protected boolean isOneShotQuery(CachedQuery cachedQuery) { + if (cachedQuery == null) { + cachedQuery = preparedQuery; + } + return super.isOneShotQuery(cachedQuery); + } + + @Override + public void closeImpl() throws SQLException { + if (preparedQuery != null) { + connection.releaseQuery(preparedQuery); + } + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + checkClosed(); + + if (parameterIndex < 1 || parameterIndex > preparedParameters.getParameterCount()) { + throw new PSQLException( + GT.tr("The column index is out of range: {0}, number of columns: {1}.", + parameterIndex, preparedParameters.getParameterCount()), + PSQLState.INVALID_PARAMETER_VALUE); + } + + int oid; + switch (sqlType) { + case Types.SQLXML: + oid = Oid.XML; + break; + case Types.INTEGER: + oid = Oid.INT4; + break; + case Types.TINYINT: + case Types.SMALLINT: + oid = Oid.INT2; + break; + case Types.BIGINT: + oid = Oid.INT8; + break; + case Types.REAL: + oid = Oid.FLOAT4; + break; + case Types.DOUBLE: + case Types.FLOAT: + oid = Oid.FLOAT8; + break; + case Types.DECIMAL: + case Types.NUMERIC: + oid = Oid.NUMERIC; + break; + case Types.CHAR: + oid = Oid.BPCHAR; + break; + case Types.VARCHAR: + case Types.LONGVARCHAR: + oid = connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED; + break; + case Types.DATE: + oid = Oid.DATE; + break; + case Types.TIME: + case Types.TIME_WITH_TIMEZONE: + case Types.TIMESTAMP_WITH_TIMEZONE: + case Types.TIMESTAMP: + oid = Oid.UNSPECIFIED; + break; + case Types.BOOLEAN: + case Types.BIT: + oid = Oid.BOOL; + break; + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + oid = Oid.BYTEA; + break; + case Types.BLOB: + case Types.CLOB: + oid = Oid.OID; + break; + case Types.REF_CURSOR: + oid = Oid.REF_CURSOR; + break; + case Types.ARRAY: + case Types.DISTINCT: + case Types.STRUCT: + case Types.NULL: + case Types.OTHER: + oid = Oid.UNSPECIFIED; + break; + default: + // Bad Types value. + throw new PSQLException(GT.tr("Unknown Types value."), PSQLState.INVALID_PARAMETER_TYPE); + } + preparedParameters.setNull(parameterIndex, oid); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + checkClosed(); + // The key words TRUE and FALSE are the preferred (SQL-compliant) usage. + bindLiteral(parameterIndex, x ? "TRUE" : "FALSE", Oid.BOOL); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + setShort(parameterIndex, x); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + checkClosed(); + if (connection.binaryTransferSend(Oid.INT2)) { + byte[] val = new byte[2]; + ByteConverter.int2(val, 0, x); + bindBytes(parameterIndex, val, Oid.INT2); + return; + } + bindLiteral(parameterIndex, Integer.toString(x), Oid.INT2); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + checkClosed(); + if (connection.binaryTransferSend(Oid.INT4)) { + byte[] val = new byte[4]; + ByteConverter.int4(val, 0, x); + bindBytes(parameterIndex, val, Oid.INT4); + return; + } + bindLiteral(parameterIndex, Integer.toString(x), Oid.INT4); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + checkClosed(); + if (connection.binaryTransferSend(Oid.INT8)) { + byte[] val = new byte[8]; + ByteConverter.int8(val, 0, x); + bindBytes(parameterIndex, val, Oid.INT8); + return; + } + bindLiteral(parameterIndex, Long.toString(x), Oid.INT8); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + checkClosed(); + if (connection.binaryTransferSend(Oid.FLOAT4)) { + byte[] val = new byte[4]; + ByteConverter.float4(val, 0, x); + bindBytes(parameterIndex, val, Oid.FLOAT4); + return; + } + bindLiteral(parameterIndex, Float.toString(x), Oid.FLOAT8); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + checkClosed(); + if (connection.binaryTransferSend(Oid.FLOAT8)) { + byte[] val = new byte[8]; + ByteConverter.float8(val, 0, x); + bindBytes(parameterIndex, val, Oid.FLOAT8); + return; + } + bindLiteral(parameterIndex, Double.toString(x), Oid.FLOAT8); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) + throws SQLException { + if (x != null && connection.binaryTransferSend(Oid.NUMERIC)) { + final byte[] bytes = ByteConverter.numeric(x); + bindBytes(parameterIndex, bytes, Oid.NUMERIC); + return; + } + setNumber(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + checkClosed(); + setString(parameterIndex, x, getStringType()); + } + + private int getStringType() { + return connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED; + } + + protected void setString(int parameterIndex, + String x, int oid) throws SQLException { + // if the passed string is null, then set this column to null + checkClosed(); + if (x == null) { + preparedParameters.setNull(parameterIndex, oid); + } else { + bindString(parameterIndex, x, oid); + } + } + + @Override + public void setBytes(int parameterIndex, byte [] x) throws SQLException { + checkClosed(); + + if (null == x) { + setNull(parameterIndex, Types.VARBINARY); + return; + } + + // Version 7.2 supports the bytea datatype for byte arrays + byte[] copy = new byte[x.length]; + System.arraycopy(x, 0, copy, 0, x.length); + preparedParameters.setBytea(parameterIndex, copy, 0, x.length); + } + + private void setByteStreamWriter(int parameterIndex, + ByteStreamWriter x) throws SQLException { + preparedParameters.setBytea(parameterIndex, x); + } + + @Override + public void setDate(int parameterIndex, + Date x) throws SQLException { + setDate(parameterIndex, x, null); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + setTime(parameterIndex, x, null); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + setTimestamp(parameterIndex, x, null); + } + + private void setCharacterStreamPost71(int parameterIndex, + InputStream x, int length, + String encoding) throws SQLException { + + if (x == null) { + setNull(parameterIndex, Types.VARCHAR); + return; + } + if (length < 0) { + throw new PSQLException(GT.tr("Invalid stream length {0}.", length), + PSQLState.INVALID_PARAMETER_VALUE); + } + + // Version 7.2 supports AsciiStream for all PG text types (char, varchar, text) + // As the spec/javadoc for this method indicate this is to be used for + // large String values (i.e. LONGVARCHAR) PG doesn't have a separate + // long varchar datatype, but with toast all text datatypes are capable of + // handling very large values. Thus the implementation ends up calling + // setString() since there is no current way to stream the value to the server + try { + InputStreamReader inStream = new InputStreamReader(x, encoding); + char[] chars = new char[length]; + int charsRead = 0; + while (true) { + int n = inStream.read(chars, charsRead, length - charsRead); + if (n == -1) { + break; + } + + charsRead += n; + + if (charsRead == length) { + break; + } + } + + setString(parameterIndex, new String(chars, 0, charsRead), Oid.VARCHAR); + } catch (UnsupportedEncodingException uee) { + throw new PSQLException(GT.tr("The JVM claims not to support the {0} encoding.", encoding), + PSQLState.UNEXPECTED_ERROR, uee); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Provided InputStream failed."), PSQLState.UNEXPECTED_ERROR, + ioe); + } + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, + int length) throws SQLException { + checkClosed(); + setCharacterStreamPost71(parameterIndex, x, length, "ASCII"); + } + + @Override + @SuppressWarnings("deprecation") + public void setUnicodeStream(int parameterIndex, InputStream x, + int length) throws SQLException { + checkClosed(); + + setCharacterStreamPost71(parameterIndex, x, length, "UTF-8"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, + int length) throws SQLException { + checkClosed(); + + if (x == null) { + setNull(parameterIndex, Types.VARBINARY); + return; + } + + if (length < 0) { + throw new PSQLException(GT.tr("Invalid stream length {0}.", length), + PSQLState.INVALID_PARAMETER_VALUE); + } + + // Version 7.2 supports BinaryStream for the PG bytea type + // As the spec/javadoc for this method indicate this is to be used for + // large binary values (i.e. LONGVARBINARY) PG doesn't have a separate + // long binary datatype, but with toast the bytea datatype is capable of + // handling very large values. + preparedParameters.setBytea(parameterIndex, x, length); + } + + @Override + public void clearParameters() throws SQLException { + preparedParameters.clear(); + } + + // Helper method for setting parameters to PGobject subclasses. + private void setPGobject(int parameterIndex, PGobject x) throws SQLException { + String typename = x.getType(); + int oid = connection.getTypeInfo().getPGType(typename); + if (oid == Oid.UNSPECIFIED) { + throw new PSQLException(GT.tr("Unknown type {0}.", typename), + PSQLState.INVALID_PARAMETER_TYPE); + } + + if ((x instanceof PGBinaryObject) && connection.binaryTransferSend(oid)) { + PGBinaryObject binObj = (PGBinaryObject) x; + int length = binObj.lengthInBytes(); + if (length == 0) { + preparedParameters.setNull(parameterIndex, oid); + return; + } + byte[] data = new byte[length]; + binObj.toBytes(data, 0); + bindBytes(parameterIndex, data, oid); + } else { + setString(parameterIndex, x.getValue(), oid); + } + } + + private void setMap(int parameterIndex, Map x) throws SQLException { + int oid = connection.getTypeInfo().getPGType("hstore"); + if (oid == Oid.UNSPECIFIED) { + throw new PSQLException(GT.tr("No hstore extension installed."), + PSQLState.INVALID_PARAMETER_TYPE); + } + if (connection.binaryTransferSend(oid)) { + byte[] data = HStoreConverter.toBytes(x, connection.getEncoding()); + bindBytes(parameterIndex, data, oid); + } else { + setString(parameterIndex, HStoreConverter.toString(x), oid); + } + } + + private void setNumber(int parameterIndex, Number x) throws SQLException { + checkClosed(); + if (x == null) { + setNull(parameterIndex, Types.DECIMAL); + } else { + bindLiteral(parameterIndex, x.toString(), Oid.NUMERIC); + } + } + + @Override + public void setObject(int parameterIndex, Object in, + int targetSqlType, int scale) + throws SQLException { + checkClosed(); + + if (in == null) { + setNull(parameterIndex, targetSqlType); + return; + } + + if (targetSqlType == Types.OTHER && in instanceof UUID + && connection.haveMinimumServerVersion(ServerVersion.v8_3)) { + setUuid(parameterIndex, (UUID) in); + return; + } + + switch (targetSqlType) { + case Types.SQLXML: + if (in instanceof SQLXML) { + setSQLXML(parameterIndex, (SQLXML) in); + } else { + setSQLXML(parameterIndex, new PgSQLXML(connection, in.toString())); + } + break; + case Types.INTEGER: + setInt(parameterIndex, castToInt(in)); + break; + case Types.TINYINT: + case Types.SMALLINT: + setShort(parameterIndex, castToShort(in)); + break; + case Types.BIGINT: + setLong(parameterIndex, castToLong(in)); + break; + case Types.REAL: + setFloat(parameterIndex, castToFloat(in)); + break; + case Types.DOUBLE: + case Types.FLOAT: + setDouble(parameterIndex, castToDouble(in)); + break; + case Types.DECIMAL: + case Types.NUMERIC: + setBigDecimal(parameterIndex, castToBigDecimal(in, scale)); + break; + case Types.CHAR: + setString(parameterIndex, castToString(in), Oid.BPCHAR); + break; + case Types.VARCHAR: + setString(parameterIndex, castToString(in), getStringType()); + break; + case Types.LONGVARCHAR: + if (in instanceof InputStream) { + preparedParameters.setText(parameterIndex, (InputStream) in); + } else { + setString(parameterIndex, castToString(in), getStringType()); + } + break; + case Types.DATE: + if (in instanceof Date) { + setDate(parameterIndex, (Date) in); + } else { + Date tmpd; + if (in instanceof java.util.Date) { + tmpd = new Date(((java.util.Date) in).getTime()); + } else if (in instanceof LocalDate) { + setDate(parameterIndex, (LocalDate) in); + break; + } else { + tmpd = getTimestampUtils().toDate(getDefaultCalendar(), in.toString()); + } + setDate(parameterIndex, tmpd); + } + break; + case Types.TIME: + if (in instanceof Time) { + setTime(parameterIndex, (Time) in); + } else { + Time tmpt; + if (in instanceof java.util.Date) { + tmpt = new Time(((java.util.Date) in).getTime()); + } else if (in instanceof LocalTime) { + setTime(parameterIndex, (LocalTime) in); + break; + } else if (in instanceof OffsetTime) { + setTime(parameterIndex, (OffsetTime) in); + break; + } else { + tmpt = getTimestampUtils().toTime(getDefaultCalendar(), in.toString()); + } + setTime(parameterIndex, tmpt); + } + break; + case Types.TIMESTAMP: + if (in instanceof PGTimestamp) { + setObject(parameterIndex, in); + } else if (in instanceof Timestamp) { + setTimestamp(parameterIndex, (Timestamp) in); + } else { + Timestamp tmpts; + if (in instanceof java.util.Date) { + tmpts = new Timestamp(((java.util.Date) in).getTime()); + } else if (in instanceof LocalDateTime) { + setTimestamp(parameterIndex, (LocalDateTime) in); + break; + } else { + tmpts = getTimestampUtils().toTimestamp(getDefaultCalendar(), in.toString()); + } + setTimestamp(parameterIndex, tmpts); + } + break; + case Types.TIMESTAMP_WITH_TIMEZONE: + if (in instanceof OffsetDateTime) { + setTimestamp(parameterIndex, (OffsetDateTime) in); + } else if (in instanceof PGTimestamp) { + setObject(parameterIndex, in); + } else { + throw new PSQLException( + GT.tr("Cannot cast an instance of {0} to type {1}", + in.getClass().getName(), "Types.TIMESTAMP_WITH_TIMEZONE"), + PSQLState.INVALID_PARAMETER_TYPE); + } + break; + case Types.BOOLEAN: + case Types.BIT: + setBoolean(parameterIndex, BooleanTypeUtil.castToBoolean(in)); + break; + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + setObject(parameterIndex, in); + break; + case Types.BLOB: + if (in instanceof Blob) { + setBlob(parameterIndex, (Blob) in); + } else if (in instanceof InputStream) { + long oid = createBlob(parameterIndex, (InputStream) in, Long.MAX_VALUE); + setLong(parameterIndex, oid); + } else { + throw new PSQLException( + GT.tr("Cannot cast an instance of {0} to type {1}", + in.getClass().getName(), "Types.BLOB"), + PSQLState.INVALID_PARAMETER_TYPE); + } + break; + case Types.CLOB: + if (in instanceof Clob) { + setClob(parameterIndex, (Clob) in); + } else { + throw new PSQLException( + GT.tr("Cannot cast an instance of {0} to type {1}", + in.getClass().getName(), "Types.CLOB"), + PSQLState.INVALID_PARAMETER_TYPE); + } + break; + case Types.ARRAY: + if (in instanceof Array) { + setArray(parameterIndex, (Array) in); + } else { + try { + setObjectArray(parameterIndex, in); + } catch (Exception e) { + throw new PSQLException( + GT.tr("Cannot cast an instance of {0} to type {1}", in.getClass().getName(), "Types.ARRAY"), + PSQLState.INVALID_PARAMETER_TYPE, e); + } + } + break; + case Types.DISTINCT: + bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED); + break; + case Types.OTHER: + if (in instanceof PGobject) { + setPGobject(parameterIndex, (PGobject) in); + } else if (in instanceof Map) { + setMap(parameterIndex, (Map) in); + } else { + bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED); + } + break; + default: + throw new PSQLException(GT.tr("Unsupported Types value: {0}", targetSqlType), + PSQLState.INVALID_PARAMETER_TYPE); + } + } + + private Class getArrayType(Class type) { + Class subType = type.getComponentType(); + while (subType != null) { + type = subType; + subType = type.getComponentType(); + } + return type; + } + + private void setObjectArray(int parameterIndex, A in) throws SQLException { + final ArrayEncoding.ArrayEncoder arraySupport = ArrayEncoding.getArrayEncoder(in); + + final TypeInfo typeInfo = connection.getTypeInfo(); + + int oid = arraySupport.getDefaultArrayTypeOid(); + + if (arraySupport.supportBinaryRepresentation(oid) && connection.getPreferQueryMode() != PreferQueryMode.SIMPLE) { + bindBytes(parameterIndex, arraySupport.toBinaryRepresentation(connection, in, oid), oid); + } else { + if (oid == Oid.UNSPECIFIED) { + Class arrayType = getArrayType(in.getClass()); + oid = typeInfo.getJavaArrayType(arrayType.getName()); + if (oid == Oid.UNSPECIFIED) { + throw new SQLFeatureNotSupportedException(); + } + } + final int baseOid = typeInfo.getPGArrayElement(oid); + final String baseType = typeInfo.getPGType(baseOid); + + final Array array = getPGConnection().createArrayOf(baseType, in); + this.setArray(parameterIndex, array); + } + } + + private static String asString(final Clob in) throws SQLException { + return in.getSubString(1, (int) in.length()); + } + + private static int castToInt(final Object in) throws SQLException { + try { + if (in instanceof String) { + return Integer.parseInt((String) in); + } + if (in instanceof Number) { + return ((Number) in).intValue(); + } + if (in instanceof java.util.Date) { + return (int) ((java.util.Date) in).getTime(); + } + if (in instanceof Boolean) { + return (Boolean) in ? 1 : 0; + } + if (in instanceof Clob) { + return Integer.parseInt(asString((Clob) in)); + } + if (in instanceof Character) { + return Integer.parseInt(in.toString()); + } + } catch (final Exception e) { + throw cannotCastException(in.getClass().getName(), "int", e); + } + throw cannotCastException(in.getClass().getName(), "int"); + } + + private static short castToShort(final Object in) throws SQLException { + try { + if (in instanceof String) { + return Short.parseShort((String) in); + } + if (in instanceof Number) { + return ((Number) in).shortValue(); + } + if (in instanceof java.util.Date) { + return (short) ((java.util.Date) in).getTime(); + } + if (in instanceof Boolean) { + return (Boolean) in ? (short) 1 : (short) 0; + } + if (in instanceof Clob) { + return Short.parseShort(asString((Clob) in)); + } + if (in instanceof Character) { + return Short.parseShort(in.toString()); + } + } catch (final Exception e) { + throw cannotCastException(in.getClass().getName(), "short", e); + } + throw cannotCastException(in.getClass().getName(), "short"); + } + + private static long castToLong(final Object in) throws SQLException { + try { + if (in instanceof String) { + return Long.parseLong((String) in); + } + if (in instanceof Number) { + return ((Number) in).longValue(); + } + if (in instanceof java.util.Date) { + return ((java.util.Date) in).getTime(); + } + if (in instanceof Boolean) { + return (Boolean) in ? 1L : 0L; + } + if (in instanceof Clob) { + return Long.parseLong(asString((Clob) in)); + } + if (in instanceof Character) { + return Long.parseLong(in.toString()); + } + } catch (final Exception e) { + throw cannotCastException(in.getClass().getName(), "long", e); + } + throw cannotCastException(in.getClass().getName(), "long"); + } + + private static float castToFloat(final Object in) throws SQLException { + try { + if (in instanceof String) { + return Float.parseFloat((String) in); + } + if (in instanceof Number) { + return ((Number) in).floatValue(); + } + if (in instanceof java.util.Date) { + return ((java.util.Date) in).getTime(); + } + if (in instanceof Boolean) { + return (Boolean) in ? 1f : 0f; + } + if (in instanceof Clob) { + return Float.parseFloat(asString((Clob) in)); + } + if (in instanceof Character) { + return Float.parseFloat(in.toString()); + } + } catch (final Exception e) { + throw cannotCastException(in.getClass().getName(), "float", e); + } + throw cannotCastException(in.getClass().getName(), "float"); + } + + private static double castToDouble(final Object in) throws SQLException { + try { + if (in instanceof String) { + return Double.parseDouble((String) in); + } + if (in instanceof Number) { + return ((Number) in).doubleValue(); + } + if (in instanceof java.util.Date) { + return ((java.util.Date) in).getTime(); + } + if (in instanceof Boolean) { + return (Boolean) in ? 1d : 0d; + } + if (in instanceof Clob) { + return Double.parseDouble(asString((Clob) in)); + } + if (in instanceof Character) { + return Double.parseDouble(in.toString()); + } + } catch (final Exception e) { + throw cannotCastException(in.getClass().getName(), "double", e); + } + throw cannotCastException(in.getClass().getName(), "double"); + } + + private static BigDecimal castToBigDecimal(final Object in, final int scale) throws SQLException { + try { + BigDecimal rc = null; + if (in instanceof String) { + rc = new BigDecimal((String) in); + } else if (in instanceof BigDecimal) { + rc = (BigDecimal) in; + } else if (in instanceof BigInteger) { + rc = new BigDecimal((BigInteger) in); + } else if (in instanceof Long || in instanceof Integer || in instanceof Short + || in instanceof Byte) { + rc = BigDecimal.valueOf(((Number) in).longValue()); + } else if (in instanceof Double || in instanceof Float) { + rc = BigDecimal.valueOf(((Number) in).doubleValue()); + } else if (in instanceof java.util.Date) { + rc = BigDecimal.valueOf(((java.util.Date) in).getTime()); + } else if (in instanceof Boolean) { + rc = (Boolean) in ? BigDecimal.ONE : BigDecimal.ZERO; + } else if (in instanceof Clob) { + rc = new BigDecimal(asString((Clob) in)); + } else if (in instanceof Character) { + rc = new BigDecimal(new char[]{(Character) in}); + } + if (rc != null) { + if (scale >= 0) { + rc = rc.setScale(scale, RoundingMode.HALF_UP); + } + return rc; + } + } catch (final Exception e) { + throw cannotCastException(in.getClass().getName(), "BigDecimal", e); + } + throw cannotCastException(in.getClass().getName(), "BigDecimal"); + } + + private static String castToString(final Object in) throws SQLException { + try { + if (in instanceof String) { + return (String) in; + } + if (in instanceof Clob) { + return asString((Clob) in); + } + // convert any unknown objects to string. + return in.toString(); + + } catch (final Exception e) { + throw cannotCastException(in.getClass().getName(), "String", e); + } + } + + private static PSQLException cannotCastException(final String fromType, final String toType) { + return cannotCastException(fromType, toType, null); + } + + private static PSQLException cannotCastException(final String fromType, final String toType, + final Exception cause) { + return new PSQLException( + GT.tr("Cannot convert an instance of {0} to type {1}", fromType, toType), + PSQLState.INVALID_PARAMETER_TYPE, cause); + } + + @Override + public void setObject(int parameterIndex, Object x, + int targetSqlType) throws SQLException { + setObject(parameterIndex, x, targetSqlType, -1); + } + + /* + * This stores an Object into a parameter. + */ + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + checkClosed(); + if (x == null) { + setNull(parameterIndex, Types.OTHER); + } else if (x instanceof UUID && connection.haveMinimumServerVersion(ServerVersion.v8_3)) { + setUuid(parameterIndex, (UUID) x); + } else if (x instanceof SQLXML) { + setSQLXML(parameterIndex, (SQLXML) x); + } else if (x instanceof String) { + setString(parameterIndex, (String) x); + } else if (x instanceof BigDecimal) { + setBigDecimal(parameterIndex, (BigDecimal) x); + } else if (x instanceof Short) { + setShort(parameterIndex, (Short) x); + } else if (x instanceof Integer) { + setInt(parameterIndex, (Integer) x); + } else if (x instanceof Long) { + setLong(parameterIndex, (Long) x); + } else if (x instanceof Float) { + setFloat(parameterIndex, (Float) x); + } else if (x instanceof Double) { + setDouble(parameterIndex, (Double) x); + } else if (x instanceof byte[]) { + setBytes(parameterIndex, (byte[]) x); + } else if (x instanceof ByteStreamWriter) { + setByteStreamWriter(parameterIndex, (ByteStreamWriter) x); + } else if (x instanceof Date) { + setDate(parameterIndex, (Date) x); + } else if (x instanceof Time) { + setTime(parameterIndex, (Time) x); + } else if (x instanceof Timestamp) { + setTimestamp(parameterIndex, (Timestamp) x); + } else if (x instanceof Boolean) { + setBoolean(parameterIndex, (Boolean) x); + } else if (x instanceof Byte) { + setByte(parameterIndex, (Byte) x); + } else if (x instanceof Blob) { + setBlob(parameterIndex, (Blob) x); + } else if (x instanceof Clob) { + setClob(parameterIndex, (Clob) x); + } else if (x instanceof Array) { + setArray(parameterIndex, (Array) x); + } else if (x instanceof PGobject) { + setPGobject(parameterIndex, (PGobject) x); + } else if (x instanceof Character) { + setString(parameterIndex, ((Character) x).toString()); + } else if (x instanceof LocalDate) { + setDate(parameterIndex, (LocalDate) x); + } else if (x instanceof LocalTime) { + setTime(parameterIndex, (LocalTime) x); + } else if (x instanceof OffsetTime) { + setTime(parameterIndex, (OffsetTime) x); + } else if (x instanceof LocalDateTime) { + setTimestamp(parameterIndex, (LocalDateTime) x); + } else if (x instanceof OffsetDateTime) { + setTimestamp(parameterIndex, (OffsetDateTime) x); + } else if (x instanceof Map) { + setMap(parameterIndex, (Map) x); + } else if (x instanceof Number) { + setNumber(parameterIndex, (Number) x); + } else if (x.getClass().isArray()) { + try { + setObjectArray(parameterIndex, x); + } catch (Exception e) { + throw new PSQLException( + GT.tr("Cannot cast an instance of {0} to type {1}", x.getClass().getName(), "Types.ARRAY"), + PSQLState.INVALID_PARAMETER_TYPE, e); + } + } else { + // Can't infer a type. + throw new PSQLException(GT.tr( + "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.", + x.getClass().getName()), PSQLState.INVALID_PARAMETER_TYPE); + } + } + + /** + * Returns the SQL statement with the current template values substituted. + * + * @return SQL statement with the current template values substituted + */ + @Override + public String toString() { + if (preparedQuery == null) { + return super.toString(); + } + + return preparedQuery.query.toString(preparedParameters); + } + + /** + * Note if s is a String it should be escaped by the caller to avoid SQL injection attacks. It is + * not done here for efficiency reasons as most calls to this method do not require escaping as + * the source of the string is known safe (i.e. {@code Integer.toString()}) + * + * @param paramIndex parameter index + * @param s value (the value should already be escaped) + * @param oid type oid + * @throws SQLException if something goes wrong + */ + protected void bindLiteral(int paramIndex, + String s, int oid) throws SQLException { + preparedParameters.setLiteralParameter(paramIndex, s, oid); + } + + protected void bindBytes(int paramIndex, + byte[] b, int oid) throws SQLException { + preparedParameters.setBinaryParameter(paramIndex, b, oid); + } + + /** + * This version is for values that should turn into strings e.g. setString directly calls + * bindString with no escaping; the per-protocol ParameterList does escaping as needed. + * + * @param paramIndex parameter index + * @param s value + * @param oid type oid + * @throws SQLException if something goes wrong + */ + private void bindString(int paramIndex, String s, int oid) throws SQLException { + preparedParameters.setStringParameter(paramIndex, s, oid); + } + + @Override + public boolean isUseServerPrepare() { + return preparedQuery != null && mPrepareThreshold != 0 + && preparedQuery.getExecuteCount() + 1 >= mPrepareThreshold; + } + + @Override + public void addBatch(String sql) throws SQLException { + checkClosed(); + + throw new PSQLException( + GT.tr("Can''t use query methods that take a query string on a PreparedStatement."), + PSQLState.WRONG_OBJECT_TYPE); + } + + @Override + public void addBatch() throws SQLException { + checkClosed(); + ArrayList batchStatements = this.batchStatements; + if (batchStatements == null) { + this.batchStatements = batchStatements = new ArrayList<>(); + } + ArrayList batchParameters = this.batchParameters; + if (batchParameters == null) { + this.batchParameters = batchParameters = new ArrayList(); + } + // we need to create copies of our parameters, otherwise the values can be changed + batchParameters.add(preparedParameters.copy()); + Query query = preparedQuery.query; + if (!(query instanceof BatchedQuery) || batchStatements.isEmpty()) { + batchStatements.add(query); + } + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + checkClosed(); + ResultSet rs = getResultSet(); + + if (rs == null || ((PgResultSet) rs).isResultSetClosed()) { + // OK, we haven't executed it yet, or it was closed + // we've got to go to the backend + // for more info. We send the full query, but just don't + // execute it. + + int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY + | QueryExecutor.QUERY_SUPPRESS_BEGIN; + StatementResultHandler handler = new StatementResultHandler(); + connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0, + flags); + ResultWrapper wrapper = handler.getResults(); + if (wrapper != null) { + rs = wrapper.getResultSet(); + } + } + + if (rs != null) { + return rs.getMetaData(); + } + + return null; + } + + @Override + public void setArray(int i, Array x) throws SQLException { + checkClosed(); + + if (null == x) { + setNull(i, Types.ARRAY); + return; + } + + // This only works for Array implementations that return a valid array + // literal from Array.toString(), such as the implementation we return + // from ResultSet.getArray(). Eventually we need a proper implementation + // here that works for any Array implementation. + String typename = x.getBaseTypeName(); + int oid = connection.getTypeInfo().getPGArrayType(typename); + if (oid == Oid.UNSPECIFIED) { + throw new PSQLException(GT.tr("Unknown type {0}.", typename), + PSQLState.INVALID_PARAMETER_TYPE); + } + + if (x instanceof PgArray) { + PgArray arr = (PgArray) x; + byte[] bytes = arr.toBytes(); + if (bytes != null) { + bindBytes(i, bytes, oid); + return; + } + } + + setString(i, x.toString(), oid); + } + + protected long createBlob(int i, InputStream inputStream, + long length) throws SQLException { + LargeObjectManager lom = connection.getLargeObjectAPI(); + long oid = lom.createLO(); + LargeObject lob = lom.open(oid); + try (OutputStream outputStream = lob.getOutputStream()) { + // The actual buffer size does not matter much, see benchmarks + // https://github.com/pgjdbc/pgjdbc/pull/3044#issuecomment-1838057929 + // BlobOutputStream would gradually increase the buffer, so it will level the number of + // database calls. + // At the same time, inputStream.read might produce less rows than requested, so we can not + // use a plain lob.write(buf, 0, numRead) as it might not align with 2K boundaries. + byte[] buf = new byte[(int) Math.min(length, 8192)]; + int numRead; + while (length > 0 && ( + numRead = inputStream.read(buf, 0, (int) Math.min(buf.length, length))) >= 0) { + length -= numRead; + outputStream.write(buf, 0, numRead); + } + } catch (IOException se) { + throw new PSQLException(GT.tr("Unexpected error writing large object to database."), + PSQLState.UNEXPECTED_ERROR, se); + } + return oid; + } + + @Override + public void setBlob(int i, Blob x) throws SQLException { + checkClosed(); + + if (x == null) { + setNull(i, Types.BLOB); + return; + } + + InputStream inStream = x.getBinaryStream(); + try { + long oid = createBlob(i, inStream, x.length()); + setLong(i, oid); + } finally { + try { + inStream.close(); + } catch (Exception e) { + } + } + } + + private String readerToString(Reader value, int maxLength) throws SQLException { + try { + int bufferSize = Math.min(maxLength, 1024); + StringBuilder v = new StringBuilder(bufferSize); + char[] buf = new char[bufferSize]; + int nRead = 0; + while (nRead > -1 && v.length() < maxLength) { + nRead = value.read(buf, 0, Math.min(bufferSize, maxLength - v.length())); + if (nRead > 0) { + v.append(buf, 0, nRead); + } + } + return v.toString(); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Provided Reader failed."), PSQLState.UNEXPECTED_ERROR, ioe); + } + } + + @Override + public void setCharacterStream(int i, Reader x, + int length) throws SQLException { + checkClosed(); + + if (x == null) { + setNull(i, Types.VARCHAR); + return; + } + + if (length < 0) { + throw new PSQLException(GT.tr("Invalid stream length {0}.", length), + PSQLState.INVALID_PARAMETER_VALUE); + } + + // Version 7.2 supports CharacterStream for the PG text types + // As the spec/javadoc for this method indicate this is to be used for + // large text values (i.e. LONGVARCHAR) PG doesn't have a separate + // long varchar datatype, but with toast all the text datatypes are capable of + // handling very large values. Thus the implementation ends up calling + // setString() since there is no current way to stream the value to the server + setString(i, readerToString(x, length)); + } + + @Override + public void setClob(int i, Clob x) throws SQLException { + checkClosed(); + + if (x == null) { + setNull(i, Types.CLOB); + return; + } + + Reader inStream = x.getCharacterStream(); + int length = (int) x.length(); + LargeObjectManager lom = connection.getLargeObjectAPI(); + long oid = lom.createLO(); + LargeObject lob = lom.open(oid); + Charset connectionCharset = Charset.forName(connection.getEncoding().name()); + OutputStream los = lob.getOutputStream(); + Writer lw = new OutputStreamWriter(los, connectionCharset); + try { + // could be buffered, but then the OutputStream returned by LargeObject + // is buffered internally anyhow, so there would be no performance + // boost gained, if anything it would be worse! + int c = inStream.read(); + int p = 0; + while (c > -1 && p < length) { + lw.write(c); + c = inStream.read(); + p++; + } + lw.close(); + } catch (IOException se) { + throw new PSQLException(GT.tr("Unexpected error writing large object to database."), + PSQLState.UNEXPECTED_ERROR, se); + } + // lob is closed by the stream so don't call lob.close() + setLong(i, oid); + } + + @Override + public void setNull(int parameterIndex, int t, + String typeName) throws SQLException { + if (typeName == null) { + setNull(parameterIndex, t); + return; + } + + checkClosed(); + + TypeInfo typeInfo = connection.getTypeInfo(); + int oid = typeInfo.getPGType(typeName); + + preparedParameters.setNull(parameterIndex, oid); + } + + @Override + public void setRef(int i, Ref x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setRef(int,Ref)"); + } + + @Override + public void setDate(int i, Date d, + Calendar cal) throws SQLException { + checkClosed(); + + if (d == null) { + setNull(i, Types.DATE); + return; + } + + if (connection.binaryTransferSend(Oid.DATE)) { + byte[] val = new byte[4]; + TimeZone tz = cal != null ? cal.getTimeZone() : null; + getTimestampUtils().toBinDate(tz, val, d); + preparedParameters.setBinaryParameter(i, val, Oid.DATE); + return; + } + + // We must use UNSPECIFIED here, or inserting a Date-with-timezone into a + // timestamptz field does an unexpected rotation by the server's TimeZone: + // + // We want to interpret 2005/01/01 with calendar +0100 as + // "local midnight in +0100", but if we go via date it interprets it + // as local midnight in the server's timezone: + + // template1=# select '2005-01-01+0100'::timestamptz; + // timestamptz + // ------------------------ + // 2005-01-01 02:00:00+03 + // (1 row) + + // template1=# select '2005-01-01+0100'::date::timestamptz; + // timestamptz + // ------------------------ + // 2005-01-01 00:00:00+03 + // (1 row) + + if (cal == null) { + cal = getDefaultCalendar(); + } + bindString(i, getTimestampUtils().toString(cal, d), Oid.UNSPECIFIED); + } + + @Override + public void setTime(int i, Time t, + Calendar cal) throws SQLException { + checkClosed(); + + if (t == null) { + setNull(i, Types.TIME); + return; + } + + int oid = Oid.UNSPECIFIED; + + // If a PGTime is used, we can define the OID explicitly. + if (t instanceof PGTime) { + PGTime pgTime = (PGTime) t; + if (pgTime.getCalendar() == null) { + oid = Oid.TIME; + } else { + oid = Oid.TIMETZ; + cal = pgTime.getCalendar(); + } + } + + if (cal == null) { + cal = getDefaultCalendar(); + } + bindString(i, getTimestampUtils().toString(cal, t), oid); + } + + @Override + public void setTimestamp(int i, Timestamp t, + Calendar cal) throws SQLException { + checkClosed(); + + if (t == null) { + setNull(i, Types.TIMESTAMP); + return; + } + + int oid = Oid.UNSPECIFIED; + + // Use UNSPECIFIED as a compromise to get both TIMESTAMP and TIMESTAMPTZ working. + // This is because you get this in a +1300 timezone: + // + // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz; + // timestamptz + // ------------------------ + // 2005-01-01 18:00:00+13 + // (1 row) + + // template1=# select '2005-01-01 15:00:00 +1000'::timestamp; + // timestamp + // --------------------- + // 2005-01-01 15:00:00 + // (1 row) + + // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz::timestamp; + // timestamp + // --------------------- + // 2005-01-01 18:00:00 + // (1 row) + + // So we want to avoid doing a timestamptz -> timestamp conversion, as that + // will first convert the timestamptz to an equivalent time in the server's + // timezone (+1300, above), then turn it into a timestamp with the "wrong" + // time compared to the string we originally provided. But going straight + // to timestamp is OK as the input parser for timestamp just throws away + // the timezone part entirely. Since we don't know ahead of time what type + // we're actually dealing with, UNSPECIFIED seems the lesser evil, even if it + // does give more scope for type-mismatch errors being silently hidden. + + // If a PGTimestamp is used, we can define the OID explicitly. + if (t instanceof PGTimestamp) { + PGTimestamp pgTimestamp = (PGTimestamp) t; + if (pgTimestamp.getCalendar() == null) { + oid = Oid.TIMESTAMP; + } else { + oid = Oid.TIMESTAMPTZ; + cal = pgTimestamp.getCalendar(); + } + } + if (cal == null) { + cal = getDefaultCalendar(); + } + bindString(i, getTimestampUtils().toString(cal, t), oid); + } + + private void setDate(int i, LocalDate localDate) throws SQLException { + int oid = Oid.DATE; + bindString(i, getTimestampUtils().toString(localDate), oid); + } + + private void setTime(int i, LocalTime localTime) throws SQLException { + int oid = Oid.TIME; + bindString(i, getTimestampUtils().toString(localTime), oid); + } + + private void setTime(int i, OffsetTime offsetTime) throws SQLException { + int oid = Oid.TIMETZ; + bindString(i, getTimestampUtils().toString(offsetTime), oid); + } + + private void setTimestamp(int i, LocalDateTime localDateTime) + throws SQLException { + int oid = Oid.TIMESTAMP; + bindString(i, getTimestampUtils().toString(localDateTime), oid); + } + + private void setTimestamp(int i, OffsetDateTime offsetDateTime) + throws SQLException { + int oid = Oid.TIMESTAMPTZ; + bindString(i, getTimestampUtils().toString(offsetDateTime), oid); + } + + public ParameterMetaData createParameterMetaData(BaseConnection conn, int[] oids) + throws SQLException { + return new PgParameterMetaData(conn, oids); + } + + @Override + public void setObject(int parameterIndex, Object x, + SQLType targetSqlType, + int scaleOrLength) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setObject"); + } + + @Override + public void setObject(int parameterIndex, Object x, + SQLType targetSqlType) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setObject"); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setRowId(int, RowId)"); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNString(int, String)"); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader, long)"); + } + + @Override + public void setNCharacterStream(int parameterIndex, + Reader value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader)"); + } + + @Override + public void setCharacterStream(int parameterIndex, + Reader value, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setCharacterStream(int, Reader, long)"); + } + + @Override + public void setCharacterStream(int parameterIndex, + Reader value) throws SQLException { + if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) { + String s = value != null ? readerToString(value, Integer.MAX_VALUE) : null; + setString(parameterIndex, s); + return; + } + InputStream is = value != null ? new ReaderInputStream(value) : null; + setObject(parameterIndex, is, Types.LONGVARCHAR); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream value, long length) + throws SQLException { + if (length > Integer.MAX_VALUE) { + throw new PSQLException(GT.tr("Object is too large to send over the protocol."), + PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE); + } + if (value == null) { + preparedParameters.setNull(parameterIndex, Oid.BYTEA); + } else { + preparedParameters.setBytea(parameterIndex, value, (int) length); + } + } + + @Override + public void setBinaryStream(int parameterIndex, + InputStream value) throws SQLException { + if (value == null) { + preparedParameters.setNull(parameterIndex, Oid.BYTEA); + } else { + preparedParameters.setBytea(parameterIndex, value); + } + } + + @Override + public void setAsciiStream(int parameterIndex, + InputStream value, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream, long)"); + } + + @Override + public void setAsciiStream(int parameterIndex, + InputStream value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream)"); + } + + @Override + public void setNClob(int parameterIndex, + NClob value) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNClob(int, NClob)"); + } + + @Override + public void setClob(int parameterIndex, + Reader reader, long length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setClob(int, Reader, long)"); + } + + @Override + public void setClob(int parameterIndex, + Reader reader) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setClob(int, Reader)"); + } + + @Override + public void setBlob(int parameterIndex, + InputStream inputStream, long length) + throws SQLException { + checkClosed(); + + if (inputStream == null) { + setNull(parameterIndex, Types.BLOB); + return; + } + + if (length < 0) { + throw new PSQLException(GT.tr("Invalid stream length {0}.", length), + PSQLState.INVALID_PARAMETER_VALUE); + } + + long oid = createBlob(parameterIndex, inputStream, length); + setLong(parameterIndex, oid); + } + + @Override + public void setBlob(int parameterIndex, + InputStream inputStream) throws SQLException { + checkClosed(); + + if (inputStream == null) { + setNull(parameterIndex, Types.BLOB); + return; + } + + long oid = createBlob(parameterIndex, inputStream, Long.MAX_VALUE); + setLong(parameterIndex, oid); + } + + @Override + public void setNClob(int parameterIndex, + Reader reader, long length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader, long)"); + } + + @Override + public void setNClob(int parameterIndex, + Reader reader) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader)"); + } + + @Override + public void setSQLXML(int parameterIndex, + SQLXML xmlObject) throws SQLException { + checkClosed(); + String stringValue = xmlObject == null ? null : xmlObject.getString(); + if (stringValue == null) { + setNull(parameterIndex, Types.SQLXML); + } else { + setString(parameterIndex, stringValue, Oid.XML); + } + } + + private void setUuid(int parameterIndex, UUID uuid) throws SQLException { + if (connection.binaryTransferSend(Oid.UUID)) { + byte[] val = new byte[16]; + ByteConverter.int8(val, 0, uuid.getMostSignificantBits()); + ByteConverter.int8(val, 8, uuid.getLeastSignificantBits()); + bindBytes(parameterIndex, val, Oid.UUID); + } else { + bindLiteral(parameterIndex, uuid.toString(), Oid.UUID); + } + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setURL(int,URL)"); + } + + @Override + public int[] executeBatch() throws SQLException { + try { + // Note: in batch prepared statements batchStatements == 1, and batchParameters is equal + // to the number of addBatch calls + // batchParameters might be empty in case of empty batch + if (batchParameters != null && batchParameters.size() > 1 && mPrepareThreshold > 0) { + // Use server-prepared statements when there's more than one statement in a batch + // Technically speaking, it might cause to create a server-prepared statement + // just for 2 executions even for prepareThreshold=5. That however should be + // acceptable since prepareThreshold is a optimization kind of parameter. + this.preparedQuery.increaseExecuteCount(mPrepareThreshold); + } + return super.executeBatch(); + } finally { + defaultTimeZone = null; + } + } + + private Calendar getDefaultCalendar() { + if (getTimestampUtils().hasFastDefaultTimeZone()) { + return getTimestampUtils().getSharedCalendar(null); + } + Calendar sharedCalendar = getTimestampUtils().getSharedCalendar(defaultTimeZone); + if (defaultTimeZone == null) { + defaultTimeZone = sharedCalendar.getTimeZone(); + } + return sharedCalendar; + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY + | QueryExecutor.QUERY_SUPPRESS_BEGIN; + StatementResultHandler handler = new StatementResultHandler(); + connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0, + flags); + + int[] oids = preparedParameters.getTypeOIDs(); + return createParameterMetaData(connection, oids); + } + + @Override + protected void transformQueriesAndParameters() throws SQLException { + ArrayList batchParameters = this.batchParameters; + if (batchParameters == null || batchParameters.size() <= 1 + || !(preparedQuery.query instanceof BatchedQuery)) { + return; + } + BatchedQuery originalQuery = (BatchedQuery) preparedQuery.query; + // Single query cannot have more than {@link Short#MAX_VALUE} binds, thus + // the number of multi-values blocks should be capped. + // Typically, it does not make much sense to batch more than 128 rows: performance + // does not improve much after updating 128 statements with 1 multi-valued one, thus + // we cap maximum batch size and split there. + final int bindCount = originalQuery.getBindCount(); + final int highestBlockCount = 128; + final int maxValueBlocks = bindCount == 0 ? 1024 /* if no binds, use 1024 rows */ + : Integer.highestOneBit( // deriveForMultiBatch supports powers of two only + Math.min(Math.max(1, maximumNumberOfParameters() / bindCount), highestBlockCount)); + int unprocessedBatchCount = batchParameters.size(); + final int fullValueBlocksCount = unprocessedBatchCount / maxValueBlocks; + final int partialValueBlocksCount = Integer.bitCount(unprocessedBatchCount % maxValueBlocks); + final int count = fullValueBlocksCount + partialValueBlocksCount; + ArrayList newBatchStatements = new ArrayList<>(count); + ArrayList newBatchParameters = + new ArrayList(count); + int offset = 0; + for (int i = 0; i < count; i++) { + int valueBlock; + if (unprocessedBatchCount >= maxValueBlocks) { + valueBlock = maxValueBlocks; + } else { + valueBlock = Integer.highestOneBit(unprocessedBatchCount); + } + // Find appropriate batch for block count. + BatchedQuery bq = originalQuery.deriveForMultiBatch(valueBlock); + ParameterList newPl = bq.createParameterList(); + for (int j = 0; j < valueBlock; j++) { + ParameterList pl = batchParameters.get(offset++); + if (pl != null) { + newPl.appendAll(pl); + } + } + newBatchStatements.add(bq); + newBatchParameters.add(newPl); + unprocessedBatchCount -= valueBlock; + } + this.batchStatements = newBatchStatements; + this.batchParameters = newBatchParameters; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSet.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSet.java new file mode 100644 index 0000000..3eb3796 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSet.java @@ -0,0 +1,4300 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.PGRefCursorResultSet; +import org.postgresql.PGResultSetMetaData; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.BaseStatement; +import org.postgresql.core.Encoding; +import org.postgresql.core.Field; +import org.postgresql.core.Oid; +import org.postgresql.core.Provider; +import org.postgresql.core.Query; +import org.postgresql.core.ResultCursor; +import org.postgresql.core.ResultHandlerBase; +import org.postgresql.core.TransactionState; +import org.postgresql.core.Tuple; +import org.postgresql.core.TypeInfo; +import org.postgresql.core.Utils; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.GT; +import org.postgresql.util.HStoreConverter; +import org.postgresql.util.JdbcBlackHole; +import org.postgresql.util.NumberParser; +import org.postgresql.util.PGbytea; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.ByteArrayInputStream; +import java.io.CharArrayReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.io.UnsupportedEncodingException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.URL; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLType; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.StringTokenizer; +import java.util.TimeZone; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; + +@SuppressWarnings({"try", "deprecation"}) +public class PgResultSet implements ResultSet, PGRefCursorResultSet { + + // needed for updateable result set support + private boolean updateable; + private boolean doingUpdates; + private HashMap updateValues; + private boolean usingOID; // are we using the OID for the primary key? + private List primaryKeys; // list of primary keys + private boolean singleTable; + private String onlyTable = ""; + private String tableName; + private PreparedStatement deleteStatement; + private final int resultsettype; + private final int resultsetconcurrency; + private int fetchdirection = ResultSet.FETCH_UNKNOWN; + private TimeZone defaultTimeZone; + protected final BaseConnection connection; // the connection we belong to + protected final BaseStatement statement; // the statement we belong to + protected final Field[] fields; // Field metadata for this resultset. + protected final Query originalQuery; // Query we originated from + private TimestampUtils timestampUtils; // our own Object because it's not thread safe + + protected final int maxRows; // Maximum rows in this resultset (might be 0). + protected final int maxFieldSize; // Maximum field size in this resultset (might be 0). + + protected List rows; // Current page of results. + protected int currentRow = -1; // Index into 'rows' of our current row (0-based) + protected int rowOffset; // Offset of row 0 in the actual resultset + protected Tuple thisRow; // copy of the current result row + protected SQLWarning warnings; // The warning chain + /** + * True if the last obtained column value was SQL NULL as specified by {@link #wasNull}. The value + * is always updated by the {@link #getRawValue} method. + */ + protected boolean wasNullFlag; + protected boolean onInsertRow; + // are we on the insert row (for JDBC2 updatable resultsets)? + + private Tuple rowBuffer; // updateable rowbuffer + + protected int fetchSize; // Current fetch size (might be 0). + protected int lastUsedFetchSize; // Fetch size used during last fetch + protected boolean adaptiveFetch; + protected ResultCursor cursor; // Cursor for fetching additional data. + + // Speed up findColumn by caching lookups + private Map columnNameIndexMap; + + private ResultSetMetaData rsMetaData; + private final ResourceLock lock = new ResourceLock(); + + protected ResultSetMetaData createMetaData() throws SQLException { + return new PgResultSetMetaData(connection, fields); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + checkClosed(); + if (rsMetaData == null) { + rsMetaData = createMetaData(); + } + return rsMetaData; + } + + PgResultSet(Query originalQuery, BaseStatement statement, + Field[] fields, List tuples, + ResultCursor cursor, int maxRows, int maxFieldSize, int rsType, int rsConcurrency, + int rsHoldability, boolean adaptiveFetch) throws SQLException { + // Fail-fast on invalid null inputs + if (tuples == null) { + throw new NullPointerException("tuples must be non-null"); + } + if (fields == null) { + throw new NullPointerException("fields must be non-null"); + } + + this.originalQuery = originalQuery; + this.connection = (BaseConnection) statement.getConnection(); + this.statement = statement; + this.fields = fields; + this.rows = tuples; + this.cursor = cursor; + this.maxRows = maxRows; + this.maxFieldSize = maxFieldSize; + this.resultsettype = rsType; + this.resultsetconcurrency = rsConcurrency; + this.adaptiveFetch = adaptiveFetch; + + // Constructor doesn't have fetch size and can't be sure if fetch size was used so initial value would be the number of rows + this.lastUsedFetchSize = tuples.size(); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getURL columnIndex: {0}", columnIndex); + checkClosed(); + throw Driver.notImplemented(this.getClass(), "getURL(int)"); + } + + @Override + public URL getURL(String columnName) throws SQLException { + return getURL(findColumn(columnName)); + } + + protected Object internalGetObject(int columnIndex, Field field) throws SQLException { + switch (getSQLType(columnIndex)) { + case Types.BOOLEAN: + case Types.BIT: + if (field.getOID() == Oid.BOOL) { + return getBoolean(columnIndex); + } + + if (field.getOID() == Oid.BIT) { + // Let's peek at the data - I tried to use the field.getLength() but it returns 65535 and + // it doesn't reflect the real length of the field, which is odd. + // If we have 1 byte, it's a bit(1) and return a boolean to preserve the backwards + // compatibility. If the value is null, it doesn't really matter + byte[] data = getRawValue(columnIndex); + if (data == null || data.length == 1) { + return getBoolean(columnIndex); + } + } + // Returning null here will lead to another value processing path for the bit field + // which will return a PGobject + return null; + case Types.SQLXML: + return getSQLXML(columnIndex); + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + return getInt(columnIndex); + case Types.BIGINT: + return getLong(columnIndex); + case Types.NUMERIC: + case Types.DECIMAL: + return getNumeric(columnIndex, + field.getMod() == -1 ? -1 : ((field.getMod() - 4) & 0xffff), true); + case Types.REAL: + return getFloat(columnIndex); + case Types.FLOAT: + case Types.DOUBLE: + return getDouble(columnIndex); + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + return getString(columnIndex); + case Types.DATE: + return getDate(columnIndex); + case Types.TIME: + return getTime(columnIndex); + case Types.TIMESTAMP: + return getTimestamp(columnIndex, null); + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + return getBytes(columnIndex); + case Types.ARRAY: + return getArray(columnIndex); + case Types.CLOB: + return getClob(columnIndex); + case Types.BLOB: + return getBlob(columnIndex); + + default: + String type = getPGType(columnIndex); + + // if the backend doesn't know the type then coerce to String + if ("unknown".equals(type)) { + return getString(columnIndex); + } + + if ("uuid".equals(type)) { + if (isBinary(columnIndex)) { + return getUUID(thisRow.get(columnIndex - 1)); + } + return getUUID(getString(columnIndex)); + } + + // Specialized support for ref cursors is neater. + if ("refcursor".equals(type)) { + // Fetch all results. + String cursorName = getString(columnIndex); + + StringBuilder sb = new StringBuilder("FETCH ALL IN "); + Utils.escapeIdentifier(sb, cursorName); + + // nb: no BEGIN triggered here. This is fine. If someone + // committed, and the cursor was not holdable (closing the + // cursor), we avoid starting a new xact and promptly causing + // it to fail. If the cursor *was* holdable, we don't want a + // new xact anyway since holdable cursor state isn't affected + // by xact boundaries. If our caller didn't commit at all, or + // autocommit was on, then we wouldn't issue a BEGIN anyway. + // + // We take the scrollability from the statement, but until + // we have updatable cursors it must be readonly. + ResultSet rs = + connection.execSQLQuery(sb.toString(), resultsettype, ResultSet.CONCUR_READ_ONLY); + ((PgResultSet) rs).setRefCursor(cursorName); + // In long-running transactions these backend cursors take up memory space + // we could close in rs.close(), but if the transaction is closed before the result set, + // then + // the cursor no longer exists + ((PgResultSet) rs).closeRefCursor(); + return rs; + } + if ("hstore".equals(type)) { + if (isBinary(columnIndex)) { + return HStoreConverter.fromBytes(thisRow.get(columnIndex - 1), + connection.getEncoding()); + } + return HStoreConverter.fromString(getString(columnIndex)); + } + + // Caller determines what to do (JDBC3 overrides in this case) + return null; + } + } + + private void checkScrollable() throws SQLException { + checkClosed(); + if (resultsettype == ResultSet.TYPE_FORWARD_ONLY) { + throw new PSQLException( + GT.tr("Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."), + PSQLState.INVALID_CURSOR_STATE); + } + } + + @Override + public boolean absolute(int index) throws SQLException { + checkScrollable(); + + // index is 1-based, but internally we use 0-based indices + int internalIndex; + + if (index == 0) { + beforeFirst(); + return false; + } + + final int rowsSize = rows.size(); + + // if index<0, count from the end of the result set, but check + // to be sure that it is not beyond the first index + if (index < 0) { + if (index >= -rowsSize) { + internalIndex = rowsSize + index; + } else { + beforeFirst(); + return false; + } + } else { + // must be the case that index>0, + // find the correct place, assuming that + // the index is not too large + if (index <= rowsSize) { + internalIndex = index - 1; + } else { + afterLast(); + return false; + } + } + + currentRow = internalIndex; + initRowBuffer(); + onInsertRow = false; + + return true; + } + + @Override + public void afterLast() throws SQLException { + checkScrollable(); + + final int rowsSize = rows.size(); + if (rowsSize > 0) { + currentRow = rowsSize; + } + + onInsertRow = false; + thisRow = null; + rowBuffer = null; + } + + @Override + public void beforeFirst() throws SQLException { + checkScrollable(); + + if (!rows.isEmpty()) { + currentRow = -1; + } + + onInsertRow = false; + thisRow = null; + rowBuffer = null; + } + + @Override + public boolean first() throws SQLException { + checkScrollable(); + + if (rows.size() <= 0) { + return false; + } + + currentRow = 0; + initRowBuffer(); + onInsertRow = false; + + return true; + } + + @Override + public Array getArray(String colName) throws SQLException { + return getArray(findColumn(colName)); + } + + protected Array makeArray(int oid, byte[] value) throws SQLException { + return new PgArray(connection, oid, value); + } + + protected Array makeArray(int oid, String value) throws SQLException { + return new PgArray(connection, oid, value); + } + + + @Override + public Array getArray(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + int oid = fields[i - 1].getOID(); + if (isBinary(i)) { + return makeArray(oid, value); + } + return makeArray(oid, getFixedString(i)); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + return getBigDecimal(columnIndex, -1); + } + + @Override + public BigDecimal getBigDecimal(String columnName) throws SQLException { + return getBigDecimal(findColumn(columnName)); + } + + @Override + public Blob getBlob(String columnName) throws SQLException { + return getBlob(findColumn(columnName)); + } + + protected Blob makeBlob(long oid) throws SQLException { + return new PgBlob(connection, oid); + } + + @Override + + public Blob getBlob(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + return makeBlob(getLong(i)); + } + + @Override + public Reader getCharacterStream(String columnName) throws SQLException { + return getCharacterStream(findColumn(columnName)); + } + + @Override + public Reader getCharacterStream(int i) throws SQLException { + String value = getString(i); + if (value == null) { + return null; + } + + // Version 7.2 supports AsciiStream for all the PG text types + // As the spec/javadoc for this method indicate this is to be used for + // large text values (i.e. LONGVARCHAR) PG doesn't have a separate + // long string datatype, but with toast the text datatype is capable of + // handling very large values. Thus the implementation ends up calling + // getString() since there is no current way to stream the value from the server + return new CharArrayReader(value.toCharArray()); + } + + @Override + public Clob getClob(String columnName) throws SQLException { + return getClob(findColumn(columnName)); + } + + protected Clob makeClob(long oid) throws SQLException { + return new PgClob(connection, oid); + } + + @Override + + public Clob getClob(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + return makeClob(getLong(i)); + } + + @Override + public int getConcurrency() throws SQLException { + checkClosed(); + return resultsetconcurrency; + } + + @Override + public Date getDate( + int i, Calendar cal) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + if (cal == null) { + cal = getDefaultCalendar(); + } + if (isBinary(i)) { + int col = i - 1; + int oid = fields[col].getOID(); + TimeZone tz = cal.getTimeZone(); + if (oid == Oid.DATE) { + return getTimestampUtils().toDateBin(tz, value); + } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) { + // If backend provides just TIMESTAMP, we use "cal" timezone + // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value + Timestamp timestamp = getTimestamp(i, cal); + // Here we just truncate date to 00:00 in a given time zone + return getTimestampUtils().convertToDate(timestamp.getTime(), tz); + } else { + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "date"), + PSQLState.DATA_TYPE_MISMATCH); + } + } + + return getTimestampUtils().toDate(cal, getString(i)); + } + + @Override + public Time getTime( + int i, Calendar cal) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + if (cal == null) { + cal = getDefaultCalendar(); + } + if (isBinary(i)) { + int col = i - 1; + int oid = fields[col].getOID(); + TimeZone tz = cal.getTimeZone(); + if (oid == Oid.TIME || oid == Oid.TIMETZ) { + return getTimestampUtils().toTimeBin(tz, value); + } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) { + // If backend provides just TIMESTAMP, we use "cal" timezone + // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value + Timestamp timestamp = getTimestamp(i, cal); + if (timestamp == null) { + return null; + } + long timeMillis = timestamp.getTime(); + if (oid == Oid.TIMESTAMPTZ) { + // time zone == UTC since BINARY "timestamp with time zone" is always sent in UTC + // So we truncate days + return new Time(timeMillis % TimeUnit.DAYS.toMillis(1)); + } + // Here we just truncate date part + return getTimestampUtils().convertToTime(timeMillis, tz); + } else { + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "time"), + PSQLState.DATA_TYPE_MISMATCH); + } + } + + String string = getString(i); + return getTimestampUtils().toTime(cal, string); + } + + + @Override + public Timestamp getTimestamp( + int i, Calendar cal) throws SQLException { + + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + if (cal == null) { + cal = getDefaultCalendar(); + } + int col = i - 1; + int oid = fields[col].getOID(); + + if (isBinary(i)) { + byte [] row = thisRow.get(col); + if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) { + boolean hasTimeZone = oid == Oid.TIMESTAMPTZ; + TimeZone tz = cal.getTimeZone(); + return getTimestampUtils().toTimestampBin(tz, row, hasTimeZone); + } else if (oid == Oid.TIME) { + // JDBC spec says getTimestamp of Time and Date must be supported + Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(cal.getTimeZone(), row, false); + // If server sends us a TIME, we ensure java counterpart has date of 1970-01-01 + Timestamp tsUnixEpochDate = new Timestamp(getTime(i, cal).getTime()); + tsUnixEpochDate.setNanos(tsWithMicros.getNanos()); + return tsUnixEpochDate; + } else if (oid == Oid.TIMETZ) { + TimeZone tz = cal.getTimeZone(); + byte[] timeBytesWithoutTimeZone = Arrays.copyOfRange(row, 0, 8); + Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(tz, timeBytesWithoutTimeZone, false); + // If server sends us a TIMETZ, we ensure java counterpart has date of 1970-01-01 + Timestamp tsUnixEpochDate = new Timestamp(getTime(i, cal).getTime()); + tsUnixEpochDate.setNanos(tsWithMicros.getNanos()); + return tsUnixEpochDate; + } else if (oid == Oid.DATE) { + new Timestamp(getDate(i, cal).getTime()); + } else { + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "timestamp"), + PSQLState.DATA_TYPE_MISMATCH); + } + } + + // If this is actually a timestamptz, the server-provided timezone will override + // the one we pass in, which is the desired behaviour. Otherwise, we'll + // interpret the timezone-less value in the provided timezone. + String string = getString(i); + if (oid == Oid.TIME || oid == Oid.TIMETZ) { + // If server sends us a TIME, we ensure java counterpart has date of 1970-01-01 + Timestamp tsWithMicros = getTimestampUtils().toTimestamp(cal, string); + Timestamp tsUnixEpochDate = new Timestamp(getTimestampUtils().toTime(cal, string).getTime()); + tsUnixEpochDate.setNanos(tsWithMicros.getNanos()); + return tsUnixEpochDate; + } + + return getTimestampUtils().toTimestamp(cal, string); + + } + + // TODO: In Java 8 this constant is missing, later versions (at least 11) have LocalDate#EPOCH: + private static final LocalDate LOCAL_DATE_EPOCH = LocalDate.of(1970, 1, 1); + + private OffsetDateTime getOffsetDateTime(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + int col = i - 1; + int oid = fields[col].getOID(); + + // TODO: Disallow getting OffsetDateTime from a non-TZ field + if (isBinary(i)) { + if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) { + return getTimestampUtils().toOffsetDateTimeBin(value); + } else if (oid == Oid.TIMETZ) { + // JDBC spec says timetz must be supported + return getTimestampUtils().toOffsetTimeBin(value).atDate(LOCAL_DATE_EPOCH); + } + } else { + // string + + if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP ) { + + OffsetDateTime offsetDateTime = getTimestampUtils().toOffsetDateTime(getString(i)); + if ( offsetDateTime != OffsetDateTime.MAX && offsetDateTime != OffsetDateTime.MIN ) { + return offsetDateTime.withOffsetSameInstant(ZoneOffset.UTC); + } else { + return offsetDateTime; + } + + } + if ( oid == Oid.TIMETZ ) { + return getTimestampUtils().toOffsetDateTime(getString(i)); + } + } + + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "java.time.OffsetDateTime"), + PSQLState.DATA_TYPE_MISMATCH); + } + + private OffsetTime getOffsetTime(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + int col = i - 1; + int oid = fields[col].getOID(); + + if (oid == Oid.TIMETZ) { + if (isBinary(i)) { + return getTimestampUtils().toOffsetTimeBin(value); + } else { + return getTimestampUtils().toOffsetTime(getString(i)); + } + } + + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "java.time.OffsetTime"), + PSQLState.DATA_TYPE_MISMATCH); + } + + private LocalDateTime getLocalDateTime(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + int col = i - 1; + int oid = fields[col].getOID(); + + if (oid == Oid.TIMESTAMP) { + if (isBinary(i)) { + return getTimestampUtils().toLocalDateTimeBin(value); + } else { + return getTimestampUtils().toLocalDateTime(getString(i)); + } + } + + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "java.time.LocalDateTime"), + PSQLState.DATA_TYPE_MISMATCH); + } + + private LocalDate getLocalDate(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + int col = i - 1; + int oid = fields[col].getOID(); + + if (isBinary(i)) { + if (oid == Oid.DATE) { + return getTimestampUtils().toLocalDateBin(value); + } else if (oid == Oid.TIMESTAMP) { + return getTimestampUtils().toLocalDateTimeBin(value).toLocalDate(); + } + } else { + // string + if (oid == Oid.DATE || oid == Oid.TIMESTAMP) { + return getTimestampUtils().toLocalDateTime(getString(i)).toLocalDate(); + } + } + + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "java.time.LocalDate"), + PSQLState.DATA_TYPE_MISMATCH); + } + + private LocalTime getLocalTime(int i) throws SQLException { + byte[] value = getRawValue(i); + if (value == null) { + return null; + } + + int col = i - 1; + int oid = fields[col].getOID(); + + if (oid == Oid.TIME) { + if (isBinary(i)) { + return getTimestampUtils().toLocalTimeBin(value); + } else { + return getTimestampUtils().toLocalTime(getString(i)); + } + } + + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), "java.time.LocalTime"), + PSQLState.DATA_TYPE_MISMATCH); + } + + @Override + public Date getDate( + String c, Calendar cal) throws SQLException { + return getDate(findColumn(c), cal); + } + + @Override + public Time getTime( + String c, Calendar cal) throws SQLException { + return getTime(findColumn(c), cal); + } + + @Override + public Timestamp getTimestamp( + String c, Calendar cal) throws SQLException { + return getTimestamp(findColumn(c), cal); + } + + @Override + public int getFetchDirection() throws SQLException { + checkClosed(); + return fetchdirection; + } + + public Object getObjectImpl( + String columnName, Map> map) throws SQLException { + return getObjectImpl(findColumn(columnName), map); + } + + /* + * This checks against map for the type of column i, and if found returns an object based on that + * mapping. The class must implement the SQLData interface. + */ + public Object getObjectImpl( + int i, Map> map) throws SQLException { + checkClosed(); + if (map == null || map.isEmpty()) { + return getObject(i); + } + throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)"); + } + + @Override + public Ref getRef(String columnName) throws SQLException { + return getRef(findColumn(columnName)); + } + + @Override + public Ref getRef(int i) throws SQLException { + checkClosed(); + // The backend doesn't yet have SQL3 REF types + throw Driver.notImplemented(this.getClass(), "getRef(int)"); + } + + @Override + public int getRow() throws SQLException { + checkClosed(); + + if (onInsertRow) { + return 0; + } + + final int rowsSize = rows.size(); + + if (currentRow < 0 || currentRow >= rowsSize) { + return 0; + } + + return rowOffset + currentRow + 1; + } + + // This one needs some thought, as not all ResultSets come from a statement + @Override + public Statement getStatement() throws SQLException { + checkClosed(); + return statement; + } + + @Override + public int getType() throws SQLException { + checkClosed(); + return resultsettype; + } + + + @Override + public boolean isAfterLast() throws SQLException { + checkClosed(); + if (onInsertRow) { + return false; + } + + final int rowsSize = rows.size(); + if (rowOffset + rowsSize == 0) { + return false; + } + return currentRow >= rowsSize; + } + + + @Override + public boolean isBeforeFirst() throws SQLException { + checkClosed(); + if (onInsertRow) { + return false; + } + + return (rowOffset + currentRow) < 0 && !rows.isEmpty(); + } + + @Override + public boolean isFirst() throws SQLException { + checkClosed(); + if (onInsertRow) { + return false; + } + + final int rowsSize = rows.size(); + if (rowOffset + rowsSize == 0) { + return false; + } + + return (rowOffset + currentRow) == 0; + } + + @Override + public boolean isLast() throws SQLException { + checkClosed(); + if (onInsertRow) { + return false; + } + + List rows = this.rows; + final int rowsSize = rows.size(); + + if (rowsSize == 0) { + return false; // No rows. + } + + if (currentRow != (rowsSize - 1)) { + return false; // Not on the last row of this block. + } + + // We are on the last row of the current block. + + ResultCursor cursor = this.cursor; + if (cursor == null) { + // This is the last block and therefore the last row. + return true; + } + + if (maxRows > 0 && rowOffset + currentRow == maxRows) { + // We are implicitly limited by maxRows. + return true; + } + + // Now the more painful case begins. + // We are on the last row of the current block, but we don't know if the + // current block is the last block; we must try to fetch some more data to + // find out. + + // We do a fetch of the next block, then prepend the current row to that + // block (so currentRow == 0). This works as the current row + // must be the last row of the current block if we got this far. + + rowOffset += rowsSize - 1; // Discarding all but one row. + + // Work out how many rows maxRows will let us fetch. + int fetchRows = fetchSize; + int adaptiveFetchRows = connection.getQueryExecutor() + .getAdaptiveFetchSize(adaptiveFetch, cursor); + + if (adaptiveFetchRows != -1) { + fetchRows = adaptiveFetchRows; + } + + if (maxRows != 0) { + if (fetchRows == 0 || rowOffset + fetchRows > maxRows) { + // Fetch would exceed maxRows, limit it. + fetchRows = maxRows - rowOffset; + } + } + + // Do the actual fetch. + connection.getQueryExecutor() + .fetch(cursor, new CursorResultHandler(), fetchRows, adaptiveFetch); + + // After fetch, update last used fetch size (could be useful during adaptive fetch). + lastUsedFetchSize = fetchRows; + + rows = this.rows; + // Now prepend our one saved row and move to it. + rows.add(0, thisRow); + currentRow = 0; + + // Finally, now we can tell if we're the last row or not. + return rows.size() == 1; + } + + @Override + public boolean last() throws SQLException { + checkScrollable(); + List rows = this.rows; + final int rowsSize = rows.size(); + if (rowsSize <= 0) { + return false; + } + + currentRow = rowsSize - 1; + initRowBuffer(); + onInsertRow = false; + + return true; + } + + @Override + public boolean previous() throws SQLException { + checkScrollable(); + + if (onInsertRow) { + throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (currentRow - 1 < 0) { + currentRow = -1; + thisRow = null; + rowBuffer = null; + return false; + } else { + currentRow--; + } + initRowBuffer(); + return true; + } + + @Override + public boolean relative(int rows) throws SQLException { + checkScrollable(); + + if (onInsertRow) { + throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."), + PSQLState.INVALID_CURSOR_STATE); + } + + // have to add 1 since absolute expects a 1-based index + int index = currentRow + 1 + rows; + if (index < 0) { + beforeFirst(); + return false; + } + return absolute(index); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + checkClosed(); + switch (direction) { + case ResultSet.FETCH_FORWARD: + break; + case ResultSet.FETCH_REVERSE: + case ResultSet.FETCH_UNKNOWN: + checkScrollable(); + break; + default: + throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction), + PSQLState.INVALID_PARAMETER_VALUE); + } + + this.fetchdirection = direction; + } + + @Override + public void cancelRowUpdates() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + if (onInsertRow) { + throw new PSQLException(GT.tr("Cannot call cancelRowUpdates() when on the insert row."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (doingUpdates) { + doingUpdates = false; + + clearRowBuffer(true); + } + } + } + + @Override + public void deleteRow() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkUpdateable(); + + if (onInsertRow) { + throw new PSQLException(GT.tr("Cannot call deleteRow() when on the insert row."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (isBeforeFirst()) { + throw new PSQLException( + GT.tr( + "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."), + PSQLState.INVALID_CURSOR_STATE); + } + if (isAfterLast()) { + throw new PSQLException( + GT.tr( + "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."), + PSQLState.INVALID_CURSOR_STATE); + } + List rows = this.rows; + if (rows.isEmpty()) { + throw new PSQLException(GT.tr("There are no rows in this ResultSet."), + PSQLState.INVALID_CURSOR_STATE); + } + + List primaryKeys = this.primaryKeys; + int numKeys = primaryKeys.size(); + PreparedStatement deleteStatement = this.deleteStatement; + if (deleteStatement == null) { + StringBuilder deleteSQL = + new StringBuilder("DELETE FROM ").append(onlyTable).append(tableName).append(" where "); + + for (int i = 0; i < numKeys; i++) { + Utils.escapeIdentifier(deleteSQL, primaryKeys.get(i).name); + deleteSQL.append(" = ?"); + if (i < numKeys - 1) { + deleteSQL.append(" and "); + } + } + + this.deleteStatement = deleteStatement = connection.prepareStatement(deleteSQL.toString()); + } + deleteStatement.clearParameters(); + + for (int i = 0; i < numKeys; i++) { + deleteStatement.setObject(i + 1, primaryKeys.get(i).getValue()); + } + + deleteStatement.executeUpdate(); + + rows.remove(currentRow); + currentRow--; + moveToCurrentRow(); + } + } + + @Override + public void insertRow() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkUpdateable(); + if (!onInsertRow) { + throw new PSQLException(GT.tr("Not on the insert row."), PSQLState.INVALID_CURSOR_STATE); + } + HashMap updateValues = this.updateValues; + if (updateValues == null || updateValues.isEmpty()) { + throw new PSQLException(GT.tr("You must specify at least one column value to insert a row."), + PSQLState.INVALID_PARAMETER_VALUE); + } + + // loop through the keys in the insertTable and create the sql statement + // we have to create the sql every time since the user could insert different + // columns each time + + StringBuilder insertSQL = new StringBuilder("INSERT INTO ").append(tableName).append(" ("); + StringBuilder paramSQL = new StringBuilder(") values ("); + + Iterator columnNames = updateValues.keySet().iterator(); + int numColumns = updateValues.size(); + + for (int i = 0; columnNames.hasNext(); i++) { + String columnName = columnNames.next(); + + Utils.escapeIdentifier(insertSQL, columnName); + if (i < numColumns - 1) { + insertSQL.append(", "); + paramSQL.append("?,"); + } else { + paramSQL.append("?)"); + } + + } + + insertSQL.append(paramSQL.toString()); + PreparedStatement insertStatement = null; + + Tuple rowBuffer = this.rowBuffer; + try { + insertStatement = connection.prepareStatement(insertSQL.toString(), Statement.RETURN_GENERATED_KEYS); + + Iterator values = updateValues.values().iterator(); + + for (int i = 1; values.hasNext(); i++) { + insertStatement.setObject(i, values.next()); + } + + insertStatement.executeUpdate(); + + if (usingOID) { + // we have to get the last inserted OID and put it in the resultset + + long insertedOID = ((PgStatement) insertStatement).getLastOID(); + + updateValues.put("oid", insertedOID); + + } + + // update the underlying row to the new inserted data + updateRowBuffer(insertStatement, rowBuffer, updateValues); + } finally { + JdbcBlackHole.close(insertStatement); + } + + rows.add(rowBuffer); + + // we should now reflect the current data in thisRow + // that way getXXX will get the newly inserted data + thisRow = rowBuffer; + + // need to clear this in case of another insert + clearRowBuffer(false); + } + } + + @Override + public void moveToCurrentRow() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkUpdateable(); + + if (currentRow < 0 || currentRow >= rows.size()) { + thisRow = null; + rowBuffer = null; + } else { + initRowBuffer(); + } + + onInsertRow = false; + doingUpdates = false; + } + } + + @Override + public void moveToInsertRow() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkUpdateable(); + + // make sure the underlying data is null + clearRowBuffer(false); + + onInsertRow = true; + doingUpdates = false; + } + } + + // rowBuffer is the temporary storage for the row + private void clearRowBuffer(boolean copyCurrentRow) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + // inserts want an empty array while updates want a copy of the current row + if (copyCurrentRow) { + rowBuffer = thisRow.updateableCopy(); + } else { + rowBuffer = new Tuple(fields.length); + } + + // clear the updateValues hash map for the next set of updates + HashMap updateValues = this.updateValues; + if (updateValues != null) { + updateValues.clear(); + } + } + } + + @Override + public boolean rowDeleted() throws SQLException { + checkClosed(); + return false; + } + + @Override + public boolean rowInserted() throws SQLException { + checkClosed(); + return false; + } + + @Override + public boolean rowUpdated() throws SQLException { + checkClosed(); + return false; + } + + @Override + public void updateAsciiStream(int columnIndex, + InputStream x, int length) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (x == null) { + updateNull(columnIndex); + return; + } + + try { + InputStreamReader reader = new InputStreamReader(x, StandardCharsets.US_ASCII); + char[] data = new char[length]; + int numRead = 0; + while (true) { + int n = reader.read(data, numRead, length - numRead); + if (n == -1) { + break; + } + + numRead += n; + + if (numRead == length) { + break; + } + } + updateString(columnIndex, new String(data, 0, numRead)); + } catch (IOException ie) { + throw new PSQLException(GT.tr("Provided InputStream failed."), null, ie); + } + } + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateBinaryStream(int columnIndex, + InputStream x, int length) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (x == null) { + updateNull(columnIndex); + return; + } + + byte[] data = new byte[length]; + int numRead = 0; + try { + while (true) { + int n = x.read(data, numRead, length - numRead); + if (n == -1) { + break; + } + + numRead += n; + + if (numRead == length) { + break; + } + } + } catch (IOException ie) { + throw new PSQLException(GT.tr("Provided InputStream failed."), null, ie); + } + + if (numRead == length) { + updateBytes(columnIndex, data); + } else { + // the stream contained less data than they said + // perhaps this is an error? + byte[] data2 = new byte[numRead]; + System.arraycopy(data, 0, data2, 0, numRead); + updateBytes(columnIndex, data2); + } + } + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, String.valueOf(x)); + } + } + + @Override + public void updateBytes(int columnIndex, byte [] x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateCharacterStream(int columnIndex, + Reader x, int length) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (x == null) { + updateNull(columnIndex); + return; + } + + try { + char[] data = new char[length]; + int numRead = 0; + while (true) { + int n = x.read(data, numRead, length - numRead); + if (n == -1) { + break; + } + + numRead += n; + + if (numRead == length) { + break; + } + } + updateString(columnIndex, new String(data, 0, numRead)); + } catch (IOException ie) { + throw new PSQLException(GT.tr("Provided Reader failed."), null, ie); + } + } + } + + @Override + public void updateDate(int columnIndex, + Date x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkColumnIndex(columnIndex); + String columnTypeName = getPGType(columnIndex); + updateValue(columnIndex, new NullObject(columnTypeName)); + } + } + + @Override + public void updateObject( + int columnIndex, Object x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateObject( + int columnIndex, Object x, int scale) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + this.updateObject(columnIndex, x); + } + } + + @Override + public void refreshRow() throws SQLException { + checkUpdateable(); + if (onInsertRow) { + throw new PSQLException(GT.tr("Can''t refresh the insert row."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) { + return; + } + + StringBuilder selectSQL = new StringBuilder("select "); + + ResultSetMetaData rsmd = getMetaData(); + PGResultSetMetaData pgmd = (PGResultSetMetaData) rsmd; + for (int i = 1; i <= rsmd.getColumnCount(); i++) { + if (i > 1) { + selectSQL.append(", "); + } + Utils.escapeIdentifier(selectSQL, pgmd.getBaseColumnName(i)); + } + selectSQL.append(" from ").append(onlyTable).append(tableName).append(" where "); + + List primaryKeys = this.primaryKeys; + int numKeys = primaryKeys.size(); + + for (int i = 0; i < numKeys; i++) { + + PrimaryKey primaryKey = primaryKeys.get(i); + Utils.escapeIdentifier(selectSQL, primaryKey.name); + selectSQL.append(" = ?"); + + if (i < numKeys - 1) { + selectSQL.append(" and "); + } + } + String sqlText = selectSQL.toString(); + if (connection.getLogger().isLoggable(Level.FINE)) { + connection.getLogger().log(Level.FINE, "selecting {0}", sqlText); + } + // because updateable result sets do not yet support binary transfers we must request refresh + // with updateable result set to get field data in correct format + PreparedStatement selectStatement = null; + try { + selectStatement = connection.prepareStatement(sqlText, + ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + + for (int i = 0; i < numKeys; i++) { + selectStatement.setObject(i + 1, primaryKeys.get(i).getValue()); + } + + PgResultSet rs = (PgResultSet) selectStatement.executeQuery(); + + if (rs.next()) { + // we know that the row is updatable as it was tested above. + if ( rs.thisRow == null ) { + rowBuffer = null; + } else { + rowBuffer = rs.thisRow.updateableCopy(); + } + } + + rows.set(currentRow, rowBuffer); + thisRow = rowBuffer; + + connection.getLogger().log(Level.FINE, "done updates"); + + rs.close(); + } finally { + JdbcBlackHole.close(selectStatement); + } + } + + @Override + public void updateRow() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkUpdateable(); + + if (onInsertRow) { + throw new PSQLException(GT.tr("Cannot call updateRow() when on the insert row."), + PSQLState.INVALID_CURSOR_STATE); + } + + List rows = this.rows; + if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) { + throw new PSQLException( + GT.tr( + "Cannot update the ResultSet because it is either before the start or after the end of the results."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (!doingUpdates) { + return; // No work pending. + } + + StringBuilder updateSQL = new StringBuilder("UPDATE " + onlyTable + tableName + " SET "); + + HashMap updateValues = this.updateValues; + int numColumns = updateValues.size(); + Iterator columns = updateValues.keySet().iterator(); + + for (int i = 0; columns.hasNext(); i++) { + String column = columns.next(); + Utils.escapeIdentifier(updateSQL, column); + updateSQL.append(" = ?"); + + if (i < numColumns - 1) { + updateSQL.append(", "); + } + } + + updateSQL.append(" WHERE "); + + List primaryKeys = this.primaryKeys; + int numKeys = primaryKeys.size(); + + for (int i = 0; i < numKeys; i++) { + PrimaryKey primaryKey = primaryKeys.get(i); + Utils.escapeIdentifier(updateSQL, primaryKey.name); + updateSQL.append(" = ?"); + + if (i < numKeys - 1) { + updateSQL.append(" and "); + } + } + + String sqlText = updateSQL.toString(); + if (connection.getLogger().isLoggable(Level.FINE)) { + connection.getLogger().log(Level.FINE, "updating {0}", sqlText); + } + PreparedStatement updateStatement = null; + try { + updateStatement = connection.prepareStatement(sqlText); + + int i = 0; + Iterator iterator = updateValues.values().iterator(); + for (; iterator.hasNext(); i++) { + Object o = iterator.next(); + updateStatement.setObject(i + 1, o); + } + + for (int j = 0; j < numKeys; j++, i++) { + updateStatement.setObject(i + 1, primaryKeys.get(j).getValue()); + } + + updateStatement.executeUpdate(); + } finally { + JdbcBlackHole.close(updateStatement); + } + + Tuple rowBuffer = this.rowBuffer; + updateRowBuffer(null, rowBuffer, updateValues); + + connection.getLogger().log(Level.FINE, "copying data"); + thisRow = rowBuffer.readOnlyCopy(); + rows.set(currentRow, rowBuffer); + + connection.getLogger().log(Level.FINE, "done updates"); + updateValues.clear(); + doingUpdates = false; + } + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @SuppressWarnings("try") + @Override + public void updateString(int columnIndex, String x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateTimestamp( + int columnIndex, Timestamp x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateValue(columnIndex, x); + } + } + + @Override + public void updateNull(String columnName) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateNull(findColumn(columnName)); + } + } + + @Override + public void updateBoolean(String columnName, boolean x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateBoolean(findColumn(columnName), x); + } + } + + @Override + public void updateByte(String columnName, byte x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateByte(findColumn(columnName), x); + } + } + + @Override + public void updateShort(String columnName, short x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateShort(findColumn(columnName), x); + } + } + + @Override + public void updateInt(String columnName, int x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateInt(findColumn(columnName), x); + } + } + + @Override + public void updateLong(String columnName, long x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateLong(findColumn(columnName), x); + } + } + + @Override + public void updateFloat(String columnName, float x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateFloat(findColumn(columnName), x); + } + } + + @Override + public void updateDouble(String columnName, double x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateDouble(findColumn(columnName), x); + } + } + + @Override + public void updateBigDecimal( + String columnName, BigDecimal x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateBigDecimal(findColumn(columnName), x); + } + } + + @Override + public void updateString( + String columnName, String x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateString(findColumn(columnName), x); + } + } + + @Override + public void updateBytes( + String columnName, byte [] x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateBytes(findColumn(columnName), x); + } + } + + @Override + public void updateDate( + String columnName, Date x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateDate(findColumn(columnName), x); + } + } + + @Override + public void updateTime( + String columnName, Time x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateTime(findColumn(columnName), x); + } + } + + @Override + public void updateTimestamp( + String columnName, Timestamp x) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateTimestamp(findColumn(columnName), x); + } + } + + @Override + public void updateAsciiStream( + String columnName, InputStream x, int length) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateAsciiStream(findColumn(columnName), x, length); + } + } + + @Override + public void updateBinaryStream( + String columnName, InputStream x, int length) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateBinaryStream(findColumn(columnName), x, length); + } + } + + @Override + public void updateCharacterStream( + String columnName, Reader reader, + int length) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateCharacterStream(findColumn(columnName), reader, length); + } + } + + @Override + public void updateObject( + String columnName, Object x, int scale) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateObject(findColumn(columnName), x); + } + } + + @Override + public void updateObject( + String columnName, Object x) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + updateObject(findColumn(columnName), x); + } + } + + /** + * Is this ResultSet updateable? + */ + + boolean isUpdateable() throws SQLException { + checkClosed(); + + if (resultsetconcurrency == ResultSet.CONCUR_READ_ONLY) { + throw new PSQLException( + GT.tr("ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (updateable) { + return true; + } + + connection.getLogger().log(Level.FINE, "checking if rs is updateable"); + + parseQuery(); + + if (tableName == null) { + connection.getLogger().log(Level.FINE, "tableName is not found"); + return false; + } + + if (!singleTable) { + connection.getLogger().log(Level.FINE, "not a single table"); + return false; + } + + usingOID = false; + + connection.getLogger().log(Level.FINE, "getting primary keys"); + + // + // Contains the primary key? + // + + List primaryKeys = new ArrayList<>(); + this.primaryKeys = primaryKeys; + + int i = 0; + int numPKcolumns = 0; + + // otherwise go and get the primary keys and create a list of keys + String[] s = quotelessTableName(tableName); + String quotelessTableName = s[0]; + String quotelessSchemaName = s[1]; + ResultSet rs = ((PgDatabaseMetaData) connection.getMetaData()).getPrimaryUniqueKeys("", + quotelessSchemaName, quotelessTableName); + + String lastConstraintName = null; + + while (rs.next()) { + String constraintName = rs.getString(6); // get the constraintName + if (lastConstraintName == null || !lastConstraintName.equals(constraintName)) { + if (lastConstraintName != null) { + if (i == numPKcolumns && numPKcolumns > 0) { + break; + } + connection.getLogger().log(Level.FINE, "no of keys={0} from constraint {1}", new Object[]{i, lastConstraintName}); + } + i = 0; + numPKcolumns = 0; + + primaryKeys.clear(); + lastConstraintName = constraintName; + } + numPKcolumns++; + + boolean isNotNull = rs.getBoolean("IS_NOT_NULL"); + + /* make sure that only unique keys with all non-null attributes are handled */ + if (isNotNull) { + String columnName = rs.getString(4); // get the columnName + int index = findColumnIndex(columnName); + + /* make sure that the user has included the primary key in the resultset */ + if (index > 0) { + i++; + primaryKeys.add(new PrimaryKey(index, columnName)); // get the primary key information + } + } + } + + rs.close(); + connection.getLogger().log(Level.FINE, "no of keys={0} from constraint {1}", new Object[]{i, lastConstraintName}); + + /* + it is only updatable if the primary keys are available in the resultset + */ + updateable = (i == numPKcolumns) && (numPKcolumns > 0); + + connection.getLogger().log(Level.FINE, "checking primary key {0}", updateable); + + /* + if we haven't found a primary key we can check to see if the query includes the oid + This is now a questionable check as oid's have been deprecated. Might still be useful for + catalog tables, but again the query would have to include the oid. + */ + if (!updateable) { + int oidIndex = findColumnIndex("oid"); // 0 if not present + + // oidIndex will be >0 if the oid was in the select list + if (oidIndex > 0) { + primaryKeys.add(new PrimaryKey(oidIndex, "oid")); + usingOID = true; + updateable = true; + } + } + + if (!updateable) { + throw new PSQLException(GT.tr("No eligible primary or unique key found for table {0}.", tableName), + PSQLState.INVALID_CURSOR_STATE); + } + + return updateable; + } + + /** + * Turn on/off adaptive fetch for ResultSet. + * + * @param adaptiveFetch desired state of adaptive fetch. + * @throws SQLException exception returned if ResultSet is closed + */ + public void setAdaptiveFetch(boolean adaptiveFetch) throws SQLException { + checkClosed(); + updateQueryInsideAdaptiveFetchCache(adaptiveFetch); + this.adaptiveFetch = adaptiveFetch; + } + + /** + * Update adaptive fetch cache during changing state of adaptive fetch inside + * ResultSet. Update inside AdaptiveFetchCache is required to collect data about max result + * row length for that query to compute adaptive fetch size. + * + * @param newAdaptiveFetch new state of adaptive fetch + */ + private void updateQueryInsideAdaptiveFetchCache(boolean newAdaptiveFetch) { + if (Objects.nonNull(cursor)) { + ResultCursor resultCursor = cursor; + if (!this.adaptiveFetch && newAdaptiveFetch) { + // If we are here, that means we want to be added to adaptive fetch. + connection.getQueryExecutor().addQueryToAdaptiveFetchCache(true, resultCursor); + } + + if (this.adaptiveFetch && !newAdaptiveFetch && Objects.nonNull(cursor)) { + // If we are here, that means we want to be removed from adaptive fetch. + connection.getQueryExecutor().removeQueryFromAdaptiveFetchCache(true, resultCursor); + } + } + } + + /** + * Get state of adaptive fetch for resultSet. + * + * @return state of adaptive fetch (turned on or off) + * @throws SQLException exception returned if ResultSet is closed + */ + public boolean getAdaptiveFetch() throws SQLException { + checkClosed(); + return adaptiveFetch; + } + + /** + * Cracks out the table name and schema (if it exists) from a fully qualified table name. + * + * @param fullname string that we are trying to crack. Test cases: + * + *
+   *
+   *                 Table: table
+   *                                 ()
+   *
+   *                 "Table": Table
+   *                                 ()
+   *
+   *                 Schema.Table:
+   *                                 table (schema)
+   *
+   *                                 "Schema"."Table": Table
+   *                                                 (Schema)
+   *
+   *                                 "Schema"."Dot.Table": Dot.Table
+   *                                                 (Schema)
+   *
+   *                                 Schema."Dot.Table": Dot.Table
+   *                                                 (schema)
+   *
+   *        
+ * + * @return String array with element zero always being the tablename and element 1 the schema name + * which may be a zero length string. + */ + public static String[] quotelessTableName(String fullname) { + + String[] parts = new String[]{null, ""}; + StringBuilder acc = new StringBuilder(); + boolean betweenQuotes = false; + for (int i = 0; i < fullname.length(); i++) { + char c = fullname.charAt(i); + switch (c) { + case '"': + if ((i < fullname.length() - 1) && (fullname.charAt(i + 1) == '"')) { + // two consecutive quotes - keep one + i++; + acc.append(c); // keep the quote + } else { // Discard it + betweenQuotes = !betweenQuotes; + } + break; + case '.': + if (betweenQuotes) { // Keep it + acc.append(c); + } else { // Have schema name + parts[1] = acc.toString(); + acc = new StringBuilder(); + } + break; + default: + acc.append(betweenQuotes ? c : Character.toLowerCase(c)); + break; + } + } + // Always put table in slot 0 + parts[0] = acc.toString(); + return parts; + } + + private void parseQuery() { + Query originalQuery = this.originalQuery; + if (originalQuery == null) { + return; + } + String sql = originalQuery.toString(null); + StringTokenizer st = new StringTokenizer(sql, " \r\t\n"); + boolean tableFound = false; + boolean tablesChecked = false; + String name = ""; + + singleTable = true; + + while (!tableFound && !tablesChecked && st.hasMoreTokens()) { + name = st.nextToken(); + if ("from".equalsIgnoreCase(name)) { + tableName = st.nextToken(); + if ("only".equalsIgnoreCase(tableName)) { + tableName = st.nextToken(); + onlyTable = "ONLY "; + } + tableFound = true; + } + } + } + + private void setRowBufferColumn(Tuple rowBuffer, + int columnIndex, Object valueObject) throws SQLException { + if (valueObject instanceof PGobject) { + String value = ((PGobject) valueObject).getValue(); + rowBuffer.set(columnIndex, value == null ? null : connection.encodeString(value)); + } else { + if (valueObject == null) { + rowBuffer.set(columnIndex, null); + return; + } + switch (getSQLType(columnIndex + 1)) { + + // boolean needs to be formatted as t or f instead of true or false + case Types.BIT: + case Types.BOOLEAN: + rowBuffer.set(columnIndex, connection + .encodeString((Boolean) valueObject ? "t" : "f")); + break; + // + // toString() isn't enough for date and time types; we must format it correctly + // or we won't be able to re-parse it. + // + case Types.DATE: + rowBuffer.set(columnIndex, connection + .encodeString( + getTimestampUtils().toString( + getDefaultCalendar(), (Date) valueObject))); + break; + + case Types.TIME: + rowBuffer.set(columnIndex, connection + .encodeString( + getTimestampUtils().toString( + getDefaultCalendar(), (Time) valueObject))); + break; + + case Types.TIMESTAMP: + rowBuffer.set(columnIndex, connection.encodeString( + getTimestampUtils().toString( + getDefaultCalendar(), (Timestamp) valueObject))); + break; + + case Types.NULL: + // Should never happen? + break; + + case Types.BINARY: + case Types.LONGVARBINARY: + case Types.VARBINARY: + if (isBinary(columnIndex + 1)) { + rowBuffer.set(columnIndex, (byte[]) valueObject); + } else { + try { + rowBuffer.set(columnIndex, + PGbytea.toPGString((byte[]) valueObject).getBytes(connection.getEncoding().name())); + } catch (UnsupportedEncodingException e) { + throw new PSQLException( + GT.tr("The JVM claims not to support the encoding: {0}", connection.getEncoding().name()), + PSQLState.UNEXPECTED_ERROR, e); + } + } + break; + + default: + rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject))); + break; + } + + } + } + + private void updateRowBuffer(PreparedStatement insertStatement, + Tuple rowBuffer, HashMap updateValues) throws SQLException { + for (Map.Entry entry : updateValues.entrySet()) { + int columnIndex = findColumn(entry.getKey()) - 1; + Object valueObject = entry.getValue(); + setRowBufferColumn(rowBuffer, columnIndex, valueObject); + } + + if (insertStatement == null) { + return; + } + final ResultSet generatedKeys = insertStatement.getGeneratedKeys(); + try { + generatedKeys.next(); + + List primaryKeys = this.primaryKeys; + int numKeys = primaryKeys.size(); + + for (int i = 0; i < numKeys; i++) { + final PrimaryKey key = primaryKeys.get(i); + int columnIndex = key.index - 1; + Object valueObject = generatedKeys.getObject(key.name); + setRowBufferColumn(rowBuffer, columnIndex, valueObject); + } + } finally { + generatedKeys.close(); + } + } + + public class CursorResultHandler extends ResultHandlerBase { + + public CursorResultHandler() { + } + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + PgResultSet.this.rows = tuples; + PgResultSet.this.cursor = cursor; + } + + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status), + PSQLState.PROTOCOL_VIOLATION)); + } + + @Override + public void handleCompletion() throws SQLException { + SQLWarning warning = getWarning(); + if (warning != null) { + PgResultSet.this.addWarning(warning); + } + super.handleCompletion(); + } + } + + public BaseStatement getPGStatement() { + return statement; + } + + // + // Backwards compatibility with PGRefCursorResultSet + // + + private String refCursorName; + + @Override + @SuppressWarnings("deprecation") + public String getRefCursor() { + // Can't check this because the PGRefCursorResultSet + // interface doesn't allow throwing a SQLException + // + // checkClosed(); + return refCursorName; + } + + private void setRefCursor(String refCursorName) { + this.refCursorName = refCursorName; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + checkClosed(); + if (rows < 0) { + throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."), + PSQLState.INVALID_PARAMETER_VALUE); + } + fetchSize = rows; + } + + @Override + public int getFetchSize() throws SQLException { + checkClosed(); + if (adaptiveFetch) { + return lastUsedFetchSize; + } else { + return fetchSize; + } + } + + /** + * Get fetch size used during last fetch. Returned value can be useful if using adaptive + * fetch. + * + * @return fetch size used during last fetch. + * @throws SQLException exception returned if ResultSet is closed + */ + public int getLastUsedFetchSize() throws SQLException { + checkClosed(); + return lastUsedFetchSize; + } + + @Override + public boolean next() throws SQLException { + checkClosed(); + + if (onInsertRow) { + throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (currentRow + 1 >= rows.size()) { + ResultCursor cursor = this.cursor; + if (cursor == null || (maxRows > 0 && rowOffset + rows.size() >= maxRows)) { + currentRow = rows.size(); + thisRow = null; + rowBuffer = null; + return false; // End of the resultset. + } + + // Ask for some more data. + rowOffset += rows.size(); // We are discarding some data. + + int fetchRows = fetchSize; + int adaptiveFetchRows = connection.getQueryExecutor() + .getAdaptiveFetchSize(adaptiveFetch, cursor); + + if (adaptiveFetchRows != -1) { + fetchRows = adaptiveFetchRows; + } + + if (maxRows != 0) { + if (fetchRows == 0 || rowOffset + fetchRows > maxRows) { + // Fetch would exceed maxRows, limit it. + fetchRows = maxRows - rowOffset; + } + } + + // Execute the fetch and update this resultset. + connection.getQueryExecutor() + .fetch(cursor, new CursorResultHandler(), fetchRows, adaptiveFetch); + + // .fetch(...) could update this.cursor, and cursor==null means + // there are no more rows to fetch + closeRefCursor(); + + // After fetch, update last used fetch size (could be useful for adaptive fetch). + lastUsedFetchSize = fetchRows; + + currentRow = 0; + + // Test the new rows array. + if (rows == null || rows.isEmpty()) { + thisRow = null; + rowBuffer = null; + return false; + } + } else { + currentRow++; + } + + initRowBuffer(); + return true; + } + + @Override + public void close() throws SQLException { + try { + closeInternally(); + } finally { + ((PgStatement) statement).checkCompletion(); + } + } + + /* + used by PgStatement.closeForNextExecution to avoid + closing the firstUnclosedResult twice. + checkCompletion above modifies firstUnclosedResult + fixes issue #684 + */ + protected void closeInternally() throws SQLException { + // release resources held (memory for tuples) + rows = null; + JdbcBlackHole.close(deleteStatement); + deleteStatement = null; + if (cursor != null) { + cursor.close(); + cursor = null; + } + closeRefCursor(); + } + + /** + * Closes {@code } if no more fetch calls expected ({@code cursor==null}) + * @throws SQLException if portal close fails + */ + private void closeRefCursor() throws SQLException { + String refCursorName = this.refCursorName; + if (refCursorName == null || cursor != null) { + return; + } + try { + if (connection.getTransactionState() == TransactionState.OPEN) { + StringBuilder sb = new StringBuilder("CLOSE "); + Utils.escapeIdentifier(sb, refCursorName); + connection.execSQLUpdate(sb.toString()); + } + } finally { + this.refCursorName = null; + } + } + + @Override + public boolean wasNull() throws SQLException { + checkClosed(); + return wasNullFlag; + } + + + @Override + public String getString(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getString columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return null; + } + + // varchar in binary is same as text, other binary fields are converted to their text format + if (isBinary(columnIndex) && getSQLType(columnIndex) != Types.VARCHAR) { + Field field = fields[columnIndex - 1]; + TimestampUtils ts = getTimestampUtils(); + // internalGetObject is used in getObject(int), so we can't easily alter the returned type + // Currently, internalGetObject delegates to getTime(), getTimestamp(), so it has issues + // with timezone conversions. + // However, as we know the explicit oids, we can do a better job here + switch (field.getOID()) { + case Oid.TIME: + return ts.toString(ts.toLocalTimeBin(value)); + case Oid.TIMETZ: + return ts.toStringOffsetTimeBin(value); + case Oid.DATE: + return ts.toString(ts.toLocalDateBin(value)); + case Oid.TIMESTAMP: + return ts.toString(ts.toLocalDateTimeBin(value)); + case Oid.TIMESTAMPTZ: + return ts.toStringOffsetDateTime(value); + } + Object obj = internalGetObject(columnIndex, field); + if (obj == null) { + // internalGetObject() knows jdbc-types and some extra like hstore. It does not know of + // PGobject based types like geometric types but getObject does + obj = getObject(columnIndex); + if (obj == null) { + return null; + } + return obj.toString(); + } + if ("hstore".equals(getPGType(columnIndex))) { + return HStoreConverter.toString((Map) obj); + } + return trimString(columnIndex, obj.toString()); + } + + Encoding encoding = connection.getEncoding(); + try { + return trimString(columnIndex, encoding.decode(value)); + } catch (IOException ioe) { + throw new PSQLException( + GT.tr( + "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), + PSQLState.DATA_ERROR, ioe); + } + } + + /** + *

Retrieves the value of the designated column in the current row of this ResultSet + * object as a boolean in the Java programming language.

+ * + *

If the designated column has a Character datatype and is one of the following values: "1", + * "true", "t", "yes", "y" or "on", a value of true is returned. If the designated + * column has a Character datatype and is one of the following values: "0", "false", "f", "no", + * "n" or "off", a value of false is returned. Leading or trailing whitespace is + * ignored, and case does not matter.

+ * + *

If the designated column has a Numeric datatype and is a 1, a value of true is + * returned. If the designated column has a Numeric datatype and is a 0, a value of + * false is returned.

+ * + * @param columnIndex the first column is 1, the second is 2, ... + * @return the column value; if the value is SQL NULL, the value returned is + * false + * @exception SQLException if the columnIndex is not valid; if a database access error occurs; if + * this method is called on a closed result set or is an invalid cast to boolean type. + * @see PostgreSQL + * Boolean Type + */ + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getBoolean columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return false; + } + + int col = columnIndex - 1; + if (Oid.BOOL == fields[col].getOID()) { + final byte[] v = value; + return (1 == v.length) && ((116 == v[0] && !isBinary(columnIndex)) || (1 == v[0] && isBinary(columnIndex))); // 116 = 't' + } + + if (isBinary(columnIndex)) { + return BooleanTypeUtil.castToBoolean(readDoubleValue(value, fields[col].getOID(), "boolean")); + } + + String stringValue = getString(columnIndex); + return BooleanTypeUtil.castToBoolean(stringValue); + } + + private static final BigInteger BYTEMAX = new BigInteger(Byte.toString(Byte.MAX_VALUE)); + private static final BigInteger BYTEMIN = new BigInteger(Byte.toString(Byte.MIN_VALUE)); + + @Override + public byte getByte(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getByte columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return 0; // SQL NULL + } + + if (isBinary(columnIndex)) { + int col = columnIndex - 1; + // there is no Oid for byte so must always do conversion from + // some other numeric type + return (byte) readLongValue(value, fields[col].getOID(), Byte.MIN_VALUE, + Byte.MAX_VALUE, "byte"); + } + + Encoding encoding = connection.getEncoding(); + if (encoding.hasAsciiNumbers()) { + try { + return (byte) NumberParser.getFastLong(value, Byte.MIN_VALUE, Byte.MAX_VALUE); + } catch (NumberFormatException ignored) { + } + } + + String s = getString(columnIndex); + + if (s != null) { + s = s.trim(); + if (s.isEmpty()) { + return 0; + } + try { + // try the optimal parse + return Byte.parseByte(s); + } catch (NumberFormatException e) { + // didn't work, assume the column is not a byte + try { + BigDecimal n = new BigDecimal(s); + BigInteger i = n.toBigInteger(); + + int gt = i.compareTo(BYTEMAX); + int lt = i.compareTo(BYTEMIN); + + if (gt > 0 || lt < 0) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "byte", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + return i.byteValue(); + } catch (NumberFormatException ex) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "byte", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + } + return 0; // SQL NULL + } + + @Override + public short getShort(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getShort columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return 0; // SQL NULL + } + + if (isBinary(columnIndex)) { + int col = columnIndex - 1; + int oid = fields[col].getOID(); + if (oid == Oid.INT2) { + return ByteConverter.int2(value, 0); + } + return (short) readLongValue(value, oid, Short.MIN_VALUE, Short.MAX_VALUE, "short"); + } + Encoding encoding = connection.getEncoding(); + if (encoding.hasAsciiNumbers()) { + try { + return (short) NumberParser.getFastLong(value, Short.MIN_VALUE, Short.MAX_VALUE); + } catch (NumberFormatException ignored) { + } + } + return toShort(getFixedString(columnIndex)); + } + + + @Override + public int getInt(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getInt columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return 0; // SQL NULL + } + + if (isBinary(columnIndex)) { + int col = columnIndex - 1; + int oid = fields[col].getOID(); + if (oid == Oid.INT4) { + return ByteConverter.int4(value, 0); + } + return (int) readLongValue(value, oid, Integer.MIN_VALUE, Integer.MAX_VALUE, "int"); + } + + Encoding encoding = connection.getEncoding(); + if (encoding.hasAsciiNumbers()) { + try { + return (int) NumberParser.getFastLong(value, Integer.MIN_VALUE, Integer.MAX_VALUE); + } catch (NumberFormatException ignored) { + } + } + return toInt(getFixedString(columnIndex)); + } + + + @Override + public long getLong(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getLong columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return 0; // SQL NULL + } + + if (isBinary(columnIndex)) { + int col = columnIndex - 1; + int oid = fields[col].getOID(); + if (oid == Oid.INT8) { + return ByteConverter.int8(value, 0); + } + return readLongValue(value, oid, Long.MIN_VALUE, Long.MAX_VALUE, "long"); + } + + Encoding encoding = connection.getEncoding(); + if (encoding.hasAsciiNumbers()) { + try { + return NumberParser.getFastLong(value, Long.MIN_VALUE, Long.MAX_VALUE); + } catch (NumberFormatException ignored) { + } + } + return toLong(getFixedString(columnIndex)); + } + + /** + * A dummy exception thrown when fast byte[] to number parsing fails and no value can be returned. + * The exact stack trace does not matter because the exception is always caught and is not visible + * to users. + */ + private static final NumberFormatException FAST_NUMBER_FAILED = new NumberFormatException() { + + // Override fillInStackTrace to prevent memory leak via Throwable.backtrace hidden field + // The field is not observable via reflection, however when throwable contains stacktrace, it + // does + // hold strong references to user objects (e.g. classes -> classloaders), thus it might lead to + // OutOfMemory conditions. + @Override + public Throwable fillInStackTrace() { + return this; + } + }; + + /** + * Optimised byte[] to number parser. This code does not handle null values, so the caller must do + * checkResultSet and handle null values prior to calling this function. + * + * @param bytes integer represented as a sequence of ASCII bytes + * @return The parsed number. + * @throws NumberFormatException If the number is invalid or the out of range for fast parsing. + * The value must then be parsed by {@link #toBigDecimal(String, int)}. + */ + private BigDecimal getFastBigDecimal(byte[] bytes) throws NumberFormatException { + if (bytes.length == 0) { + throw FAST_NUMBER_FAILED; + } + + int scale = 0; + long val = 0; + int start; + boolean neg; + if (bytes[0] == '-') { + neg = true; + start = 1; + if (bytes.length == 1 || bytes.length > 19) { + throw FAST_NUMBER_FAILED; + } + } else { + start = 0; + neg = false; + if (bytes.length > 18) { + throw FAST_NUMBER_FAILED; + } + } + + int periodsSeen = 0; + while (start < bytes.length) { + byte b = bytes[start++]; + if (b < '0' || b > '9') { + if (b == '.' && periodsSeen == 0) { + scale = bytes.length - start; + periodsSeen++; + continue; + } else { + throw FAST_NUMBER_FAILED; + } + } + val *= 10; + val += b - '0'; + } + + int numNonSignChars = neg ? bytes.length - 1 : bytes.length; + if (periodsSeen > 1 || periodsSeen == numNonSignChars) { + throw FAST_NUMBER_FAILED; + } + + if (neg) { + val = -val; + } + + return BigDecimal.valueOf(val, scale); + } + + + @Override + public float getFloat(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getFloat columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return 0; // SQL NULL + } + + if (isBinary(columnIndex)) { + int col = columnIndex - 1; + int oid = fields[col].getOID(); + if (oid == Oid.FLOAT4) { + return ByteConverter.float4(value, 0); + } + return (float) readDoubleValue(value, oid, "float"); + } + + return toFloat(getFixedString(columnIndex)); + } + + + @Override + public double getDouble(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getDouble columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return 0; // SQL NULL + } + + if (isBinary(columnIndex)) { + int col = columnIndex - 1; + int oid = fields[col].getOID(); + if (oid == Oid.FLOAT8) { + return ByteConverter.float8(value, 0); + } + return readDoubleValue(value, oid, "double"); + } + + return toDouble(getFixedString(columnIndex)); + } + + @Override + @SuppressWarnings("deprecation") + public BigDecimal getBigDecimal( + int columnIndex, int scale) throws SQLException { + connection.getLogger().log(Level.FINEST, " getBigDecimal columnIndex: {0}", columnIndex); + return (BigDecimal) getNumeric(columnIndex, scale, false); + } + + + private Number getNumeric( + int columnIndex, int scale, boolean allowNaN) throws SQLException { + byte[] value = getRawValue(columnIndex); + if (value == null) { + return null; + } + + if (isBinary(columnIndex)) { + int sqlType = getSQLType(columnIndex); + if (sqlType != Types.NUMERIC && sqlType != Types.DECIMAL) { + Object obj = internalGetObject(columnIndex, fields[columnIndex - 1]); + if (obj == null) { + return null; + } + if (obj instanceof Long || obj instanceof Integer || obj instanceof Byte) { + BigDecimal res = BigDecimal.valueOf(((Number) obj).longValue()); + res = scaleBigDecimal(res, scale); + return res; + } + return toBigDecimal(trimMoney(String.valueOf(obj)), scale); + } else { + Number num = ByteConverter.numeric(value); + if (allowNaN && Double.isNaN(num.doubleValue())) { + return Double.NaN; + } + + return num; + } + } + + Encoding encoding = connection.getEncoding(); + if (encoding.hasAsciiNumbers()) { + try { + BigDecimal res = getFastBigDecimal(value); + res = scaleBigDecimal(res, scale); + return res; + } catch (NumberFormatException ignore) { + } + } + + String stringValue = getFixedString(columnIndex); + if (allowNaN && "NaN".equalsIgnoreCase(stringValue)) { + return Double.NaN; + } + return toBigDecimal(stringValue, scale); + } + + /** + * {@inheritDoc} + * + *

In normal use, the bytes represent the raw values returned by the backend. However, if the + * column is an OID, then it is assumed to refer to a Large Object, and that object is returned as + * a byte array.

+ * + *

Be warned If the large object is huge, then you may run out of memory.

+ */ + + @Override + public byte [] getBytes(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getBytes columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return null; + } + + if (isBinary(columnIndex)) { + // If the data is already binary then just return it + return value; + } + if (fields[columnIndex - 1].getOID() == Oid.BYTEA) { + return trimBytes(columnIndex, PGbytea.toBytes(value)); + } else { + return trimBytes(columnIndex, value); + } + } + + @Override + + public Date getDate(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getDate columnIndex: {0}", columnIndex); + return getDate(columnIndex, null); + } + + @Override + + public Time getTime(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getTime columnIndex: {0}", columnIndex); + return getTime(columnIndex, null); + } + + @Override + + public Timestamp getTimestamp(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getTimestamp columnIndex: {0}", columnIndex); + return getTimestamp(columnIndex, null); + } + + @Override + + public InputStream getAsciiStream(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getAsciiStream columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return null; + } + + // Version 7.2 supports AsciiStream for all the PG text types + // As the spec/javadoc for this method indicate this is to be used for + // large text values (i.e. LONGVARCHAR) PG doesn't have a separate + // long string datatype, but with toast the text datatype is capable of + // handling very large values. Thus the implementation ends up calling + // getString() since there is no current way to stream the value from the server + String stringValue = getString(columnIndex); + return new ByteArrayInputStream(stringValue.getBytes(StandardCharsets.US_ASCII)); + } + + @Override + + @SuppressWarnings("deprecation") + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getUnicodeStream columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return null; + } + + // Version 7.2 supports AsciiStream for all the PG text types + // As the spec/javadoc for this method indicate this is to be used for + // large text values (i.e. LONGVARCHAR) PG doesn't have a separate + // long string datatype, but with toast the text datatype is capable of + // handling very large values. Thus the implementation ends up calling + // getString() since there is no current way to stream the value from the server + String stringValue = getString(columnIndex); + return new ByteArrayInputStream(stringValue.getBytes(StandardCharsets.UTF_8)); + } + + @Override + + public InputStream getBinaryStream(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getBinaryStream columnIndex: {0}", columnIndex); + byte[] value = getRawValue(columnIndex); + if (value == null) { + return null; + } + + // Version 7.2 supports BinaryStream for all PG bytea type + // As the spec/javadoc for this method indicate this is to be used for + // large binary values (i.e. LONGVARBINARY) PG doesn't have a separate + // long binary datatype, but with toast the bytea datatype is capable of + // handling very large values. Thus the implementation ends up calling + // getBytes() since there is no current way to stream the value from the server + byte[] b = getBytes(columnIndex); + if (b != null) { + return new ByteArrayInputStream(b); + } + return null; + } + + @Override + + public String getString(String columnName) throws SQLException { + return getString(findColumn(columnName)); + } + + + @Override + public boolean getBoolean(String columnName) throws SQLException { + return getBoolean(findColumn(columnName)); + } + + @Override + + public byte getByte(String columnName) throws SQLException { + return getByte(findColumn(columnName)); + } + + @Override + + public short getShort(String columnName) throws SQLException { + return getShort(findColumn(columnName)); + } + + @Override + + public int getInt(String columnName) throws SQLException { + return getInt(findColumn(columnName)); + } + + @Override + + public long getLong(String columnName) throws SQLException { + return getLong(findColumn(columnName)); + } + + @Override + + public float getFloat(String columnName) throws SQLException { + return getFloat(findColumn(columnName)); + } + + @Override + + public double getDouble(String columnName) throws SQLException { + return getDouble(findColumn(columnName)); + } + + @Override + + @SuppressWarnings("deprecation") + public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException { + return getBigDecimal(findColumn(columnName), scale); + } + + @Override + + public byte [] getBytes(String columnName) throws SQLException { + return getBytes(findColumn(columnName)); + } + + @Override + + public Date getDate(String columnName) throws SQLException { + return getDate(findColumn(columnName), null); + } + + @Override + + public Time getTime(String columnName) throws SQLException { + return getTime(findColumn(columnName), null); + } + + @Override + + public Timestamp getTimestamp(String columnName) throws SQLException { + return getTimestamp(findColumn(columnName), null); + } + + @Override + + public InputStream getAsciiStream(String columnName) throws SQLException { + return getAsciiStream(findColumn(columnName)); + } + + @Override + + @SuppressWarnings("deprecation") + public InputStream getUnicodeStream(String columnName) throws SQLException { + return getUnicodeStream(findColumn(columnName)); + } + + @Override + + public InputStream getBinaryStream(String columnName) throws SQLException { + return getBinaryStream(findColumn(columnName)); + } + + @Override + + public SQLWarning getWarnings() throws SQLException { + checkClosed(); + return warnings; + } + + @Override + public void clearWarnings() throws SQLException { + checkClosed(); + warnings = null; + } + + protected void addWarning(SQLWarning warnings) { + if (this.warnings != null) { + this.warnings.setNextWarning(warnings); + } else { + this.warnings = warnings; + } + } + + @Override + public String getCursorName() throws SQLException { + checkClosed(); + return null; + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getObject columnIndex: {0}", columnIndex); + Field field; + + byte[] value = getRawValue(columnIndex); + if (value == null) { + return null; + } + + field = fields[columnIndex - 1]; + + // some fields can be null, mainly from those returned by MetaData methods + if (field == null) { + wasNullFlag = true; + return null; + } + + Object result = internalGetObject(columnIndex, field); + if (result != null) { + return result; + } + + if (isBinary(columnIndex)) { + return connection.getObject(getPGType(columnIndex), null, value); + } + String stringValue = getString(columnIndex); + return connection.getObject(getPGType(columnIndex), stringValue, null); + } + + @Override + public Object getObject(String columnName) throws SQLException { + return getObject(findColumn(columnName)); + } + + @Override + public int findColumn(String columnName) throws SQLException { + checkClosed(); + + int col = findColumnIndex(columnName); + if (col == 0) { + throw new PSQLException( + GT.tr("The column name {0} was not found in this ResultSet.", columnName), + PSQLState.UNDEFINED_COLUMN); + } + return col; + } + + public static Map createColumnNameIndexMap(Field[] fields, + boolean isSanitiserDisabled) { + Map columnNameIndexMap = new HashMap<>(fields.length * 2); + // The JDBC spec says when you have duplicate columns names, + // the first one should be returned. So load the map in + // reverse order so the first ones will overwrite later ones. + for (int i = fields.length - 1; i >= 0; i--) { + String columnLabel = fields[i].getColumnLabel(); + if (isSanitiserDisabled) { + columnNameIndexMap.put(columnLabel, i + 1); + } else { + columnNameIndexMap.put(columnLabel.toLowerCase(Locale.US), i + 1); + } + } + return columnNameIndexMap; + } + + private int findColumnIndex(String columnName) { + if (columnNameIndexMap == null) { + if (originalQuery != null) { + columnNameIndexMap = originalQuery.getResultSetColumnNameIndexMap(); + } + if (columnNameIndexMap == null) { + columnNameIndexMap = createColumnNameIndexMap(fields, connection.isColumnSanitiserDisabled()); + } + } + + Integer index = columnNameIndexMap.get(columnName); + if (index != null) { + return index; + } + + index = columnNameIndexMap.get(columnName.toLowerCase(Locale.US)); + if (index != null) { + columnNameIndexMap.put(columnName, index); + return index; + } + + index = columnNameIndexMap.get(columnName.toUpperCase(Locale.US)); + if (index != null) { + columnNameIndexMap.put(columnName, index); + return index; + } + + return 0; + } + + /** + * Returns the OID of a field. It is used internally by the driver. + * + * @param field field index + * @return OID of a field + */ + public int getColumnOID(int field) { + return fields[field - 1].getOID(); + } + + /** + *

This is used to fix get*() methods on Money fields. It should only be used by those methods!

+ * + *

It converts ($##.##) to -##.## and $##.## to ##.##

+ * + * @param col column position (1-based) + * @return numeric-parsable representation of money string literal + * @throws SQLException if something wrong happens + */ + public String getFixedString(int col) throws SQLException { + String stringValue = getString(col); + return trimMoney(stringValue); + } + + private String trimMoney(String s) { + if (s == null) { + return null; + } + + // if we don't have at least 2 characters it can't be money. + if (s.length() < 2) { + return s; + } + + // Handle Money + char ch = s.charAt(0); + + // optimise for non-money type: return immediately with one check + // if the first char cannot be '(', '$' or '-' + if (ch > '-') { + return s; + } + + if (ch == '(') { + s = "-" + PGtokenizer.removePara(s).substring(1); + } else if (ch == '$') { + s = s.substring(1); + } else if (ch == '-' && s.charAt(1) == '$') { + s = "-" + s.substring(2); + } + + return s; + } + + + protected String getPGType(int column) throws SQLException { + Field field = fields[column - 1]; + initSqlType(field); + return field.getPGType(); + } + + + protected int getSQLType(int column) throws SQLException { + Field field = fields[column - 1]; + initSqlType(field); + return field.getSQLType(); + } + + + private void initSqlType(Field field) throws SQLException { + if (field.isTypeInitialized()) { + return; + } + TypeInfo typeInfo = connection.getTypeInfo(); + int oid = field.getOID(); + String pgType = typeInfo.getPGType(oid); + int sqlType = typeInfo.getSQLType(pgType); + field.setSQLType(sqlType); + field.setPGType(pgType); + } + + private void checkUpdateable() throws SQLException { + checkClosed(); + + if (!isUpdateable()) { + throw new PSQLException( + GT.tr( + "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."), + PSQLState.INVALID_CURSOR_STATE); + } + + if (updateValues == null) { + // allow every column to be updated without a rehash. + updateValues = new HashMap<>((int) (fields.length / 0.75), 0.75f); + } + } + + + + protected void checkClosed() throws SQLException { + if (rows == null) { + throw new PSQLException(GT.tr("This ResultSet is closed."), PSQLState.OBJECT_NOT_IN_STATE); + } + } + + /* + * for jdbc3 to call internally + */ + protected boolean isResultSetClosed() { + return rows == null; + } + + + protected void checkColumnIndex(int column) throws SQLException { + if (column < 1 || column > fields.length) { + throw new PSQLException( + GT.tr("The column index is out of range: {0}, number of columns: {1}.", + column, fields.length), + PSQLState.INVALID_PARAMETER_VALUE); + } + } + + /** + * Checks that the result set is not closed, it's positioned on a valid row and that the given + * column number is valid. Also updates the {@link #wasNullFlag} to correct value. + * + * @param column The column number to check. Range starts from 1. + * @return raw value or null + * @throws SQLException If state or column is invalid. + */ + protected byte [] getRawValue(int column) throws SQLException { + checkClosed(); + if (thisRow == null) { + throw new PSQLException( + GT.tr("ResultSet not positioned properly, perhaps you need to call next."), + PSQLState.INVALID_CURSOR_STATE); + } + checkColumnIndex(column); + byte[] bytes = thisRow.get(column - 1); + wasNullFlag = bytes == null; + return bytes; + } + + /** + * Returns true if the value of the given column is in binary format. + * + * @param column The column to check. Range starts from 1. + * @return True if the column is in binary format. + */ + + protected boolean isBinary(int column) { + return fields[column - 1].getFormat() == Field.BINARY_FORMAT; + } + + // ----------------- Formatting Methods ------------------- + + private static final BigInteger SHORTMAX = new BigInteger(Short.toString(Short.MAX_VALUE)); + private static final BigInteger SHORTMIN = new BigInteger(Short.toString(Short.MIN_VALUE)); + + public static short toShort(String s) throws SQLException { + if (s != null) { + try { + s = s.trim(); + return Short.parseShort(s); + } catch (NumberFormatException e) { + try { + BigDecimal n = new BigDecimal(s); + BigInteger i = n.toBigInteger(); + int gt = i.compareTo(SHORTMAX); + int lt = i.compareTo(SHORTMIN); + + if (gt > 0 || lt < 0) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "short", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + return i.shortValue(); + + } catch (NumberFormatException ne) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "short", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + } + return 0; // SQL NULL + } + + private static final BigInteger INTMAX = new BigInteger(Integer.toString(Integer.MAX_VALUE)); + private static final BigInteger INTMIN = new BigInteger(Integer.toString(Integer.MIN_VALUE)); + + public static int toInt(String s) throws SQLException { + if (s != null) { + try { + s = s.trim(); + return Integer.parseInt(s); + } catch (NumberFormatException e) { + try { + BigDecimal n = new BigDecimal(s); + BigInteger i = n.toBigInteger(); + + int gt = i.compareTo(INTMAX); + int lt = i.compareTo(INTMIN); + + if (gt > 0 || lt < 0) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "int", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + return i.intValue(); + + } catch (NumberFormatException ne) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "int", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + } + return 0; // SQL NULL + } + + private static final BigInteger LONGMAX = new BigInteger(Long.toString(Long.MAX_VALUE)); + private static final BigInteger LONGMIN = new BigInteger(Long.toString(Long.MIN_VALUE)); + + public static long toLong(String s) throws SQLException { + if (s != null) { + try { + s = s.trim(); + return Long.parseLong(s); + } catch (NumberFormatException e) { + try { + BigDecimal n = new BigDecimal(s); + BigInteger i = n.toBigInteger(); + int gt = i.compareTo(LONGMAX); + int lt = i.compareTo(LONGMIN); + + if (gt > 0 || lt < 0) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + return i.longValue(); + } catch (NumberFormatException ne) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + } + return 0; // SQL NULL + } + + public static BigDecimal toBigDecimal(String s) throws SQLException { + if (s == null) { + return null; + } + try { + s = s.trim(); + return new BigDecimal(s); + } catch (NumberFormatException e) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "BigDecimal", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + + public BigDecimal toBigDecimal(String s, int scale) throws SQLException { + if (s == null) { + return null; + } + BigDecimal val = toBigDecimal(s); + return scaleBigDecimal(val, scale); + } + + private BigDecimal scaleBigDecimal(BigDecimal val, int scale) throws PSQLException { + if (scale == -1) { + return val; + } + try { + return val.setScale(scale); + } catch (ArithmeticException e) { + throw new PSQLException( + GT.tr("Bad value for type {0} : {1}", "BigDecimal", val), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + + public static float toFloat(String s) throws SQLException { + if (s != null) { + try { + s = s.trim(); + return Float.parseFloat(s); + } catch (NumberFormatException e) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "float", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + return 0; // SQL NULL + } + + public static double toDouble(String s) throws SQLException { + if (s != null) { + try { + s = s.trim(); + return Double.parseDouble(s); + } catch (NumberFormatException e) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "double", s), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + } + return 0; // SQL NULL + } + + private void initRowBuffer() { + thisRow = rows.get(currentRow); + // We only need a copy of the current row if we're going to + // modify it via an updatable resultset. + if (resultsetconcurrency == ResultSet.CONCUR_UPDATABLE) { + rowBuffer = thisRow.updateableCopy(); + } else { + rowBuffer = null; + } + } + + private boolean isColumnTrimmable(int columnIndex) throws SQLException { + switch (getSQLType(columnIndex)) { + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + return true; + } + return false; + } + + private byte[] trimBytes(int columnIndex, byte[] bytes) throws SQLException { + // we need to trim if maxsize is set and the length is greater than maxsize and the + // type of this column is a candidate for trimming + if (maxFieldSize > 0 && bytes.length > maxFieldSize && isColumnTrimmable(columnIndex)) { + byte[] newBytes = new byte[maxFieldSize]; + System.arraycopy(bytes, 0, newBytes, 0, maxFieldSize); + return newBytes; + } else { + return bytes; + } + } + + private String trimString(int columnIndex, String string) throws SQLException { + // we need to trim if maxsize is set and the length is greater than maxsize and the + // type of this column is a candidate for trimming + if (maxFieldSize > 0 && string.length() > maxFieldSize && isColumnTrimmable(columnIndex)) { + return string.substring(0, maxFieldSize); + } else { + return string; + } + } + + /** + * Converts any numeric binary field to double value. This method does no overflow checking. + * + * @param bytes The bytes of the numeric field. + * @param oid The oid of the field. + * @param targetType The target type. Used for error reporting. + * @return The value as double. + * @throws PSQLException If the field type is not supported numeric type. + */ + private double readDoubleValue(byte[] bytes, int oid, String targetType) throws PSQLException { + // currently implemented binary encoded fields + switch (oid) { + case Oid.INT2: + return ByteConverter.int2(bytes, 0); + case Oid.INT4: + return ByteConverter.int4(bytes, 0); + case Oid.INT8: + // might not fit but there still should be no overflow checking + return ByteConverter.int8(bytes, 0); + case Oid.FLOAT4: + return ByteConverter.float4(bytes, 0); + case Oid.FLOAT8: + return ByteConverter.float8(bytes, 0); + case Oid.NUMERIC: + return ByteConverter.numeric(bytes).doubleValue(); + } + throw new PSQLException(GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), targetType), PSQLState.DATA_TYPE_MISMATCH); + } + + private static final float LONG_MAX_FLOAT = StrictMath.nextDown(Long.MAX_VALUE); + private static final float LONG_MIN_FLOAT = StrictMath.nextUp(Long.MIN_VALUE); + private static final double LONG_MAX_DOUBLE = StrictMath.nextDown((double) Long.MAX_VALUE); + private static final double LONG_MIN_DOUBLE = StrictMath.nextUp((double) Long.MIN_VALUE); + + /** + *

Converts any numeric binary field to long value.

+ * + *

This method is used by getByte,getShort,getInt and getLong. It must support a subset of the + * following java types that use Binary encoding. (fields that use text encoding use a different + * code path). + * + * byte,short,int,long,float,double,BigDecimal,boolean,string. + *

+ * + * @param bytes The bytes of the numeric field. + * @param oid The oid of the field. + * @param minVal the minimum value allowed. + * @param maxVal the maximum value allowed. + * @param targetType The target type. Used for error reporting. + * @return The value as long. + * @throws PSQLException If the field type is not supported numeric type or if the value is out of + * range. + */ + + private long readLongValue(byte[] bytes, int oid, long minVal, long maxVal, String targetType) + throws PSQLException { + long val; + // currently implemented binary encoded fields + switch (oid) { + case Oid.INT2: + val = ByteConverter.int2(bytes, 0); + break; + case Oid.INT4: + val = ByteConverter.int4(bytes, 0); + break; + case Oid.INT8: + val = ByteConverter.int8(bytes, 0); + break; + case Oid.FLOAT4: + float f = ByteConverter.float4(bytes, 0); + // for float values we know to be within values of long, just cast directly to long + if (f <= LONG_MAX_FLOAT && f >= LONG_MIN_FLOAT) { + val = (long) f; + } else { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, f), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + break; + case Oid.FLOAT8: + double d = ByteConverter.float8(bytes, 0); + // for double values within the values of a long, just directly cast to long + if (d <= LONG_MAX_DOUBLE && d >= LONG_MIN_DOUBLE) { + val = (long) d; + } else { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, d), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + break; + case Oid.NUMERIC: + Number num = ByteConverter.numeric(bytes); + BigInteger i = ((BigDecimal) num).toBigInteger(); + int gt = i.compareTo(LONGMAX); + int lt = i.compareTo(LONGMIN); + + if (gt > 0 || lt < 0) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", num), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } else { + val = num.longValue(); + } + break; + default: + throw new PSQLException( + GT.tr("Cannot convert the column of type {0} to requested type {1}.", + Oid.toString(oid), targetType), + PSQLState.DATA_TYPE_MISMATCH); + } + if (val < minVal || val > maxVal) { + throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, val), + PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + return val; + } + + protected void updateValue(int columnIndex, Object value) throws SQLException { + checkUpdateable(); + + if (!onInsertRow && (isBeforeFirst() || isAfterLast() || rows.isEmpty())) { + throw new PSQLException( + GT.tr( + "Cannot update the ResultSet because it is either before the start or after the end of the results."), + PSQLState.INVALID_CURSOR_STATE); + } + + checkColumnIndex(columnIndex); + + doingUpdates = !onInsertRow; + if (value == null) { + updateNull(columnIndex); + } else { + PGResultSetMetaData md = (PGResultSetMetaData) getMetaData(); + updateValues.put(md.getBaseColumnName(columnIndex), value); + } + } + + + protected Object getUUID(String data) throws SQLException { + UUID uuid; + try { + uuid = UUID.fromString(data); + } catch (IllegalArgumentException iae) { + throw new PSQLException(GT.tr("Invalid UUID data."), PSQLState.INVALID_PARAMETER_VALUE, iae); + } + + return uuid; + } + + + protected Object getUUID(byte[] data) throws SQLException { + return new UUID(ByteConverter.int8(data, 0), ByteConverter.int8(data, 8)); + } + + private class PrimaryKey { + int index; // where in the result set is this primaryKey + String name; // what is the columnName of this primary Key + + PrimaryKey(int index, String name) { + this.index = index; + this.name = name; + } + + Object getValue() throws SQLException { + return getObject(index); + } + } + + // + // We need to specify the type of NULL when updating a column to NULL, so + // NullObject is a simple extension of PGobject that always returns null + // values but retains column type info. + // + + @SuppressWarnings("serial") + static class NullObject extends PGobject { + NullObject(String type) { + this.type = type; + } + + @Override + public String getValue() { + return null; + } + } + + /** + * Used to add rows to an already existing ResultSet that exactly match the existing rows. + * Currently only used for assembling generated keys from batch statement execution. + */ + void addRows(List tuples) { + rows.addAll(tuples); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateRef(int,Ref)"); + } + + @Override + public void updateRef(String columnName, Ref x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateRef(String,Ref)"); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateBlob(int,Blob)"); + } + + @Override + public void updateBlob(String columnName, Blob x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateBlob(String,Blob)"); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateClob(int,Clob)"); + } + + @Override + public void updateClob(String columnName, Clob x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateClob(String,Clob)"); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + updateObject(columnIndex, x); + } + + @Override + public void updateArray(String columnName, Array x) throws SQLException { + updateArray(findColumn(columnName), x); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + if (type == null) { + throw new SQLException("type is null"); + } + int sqlType = getSQLType(columnIndex); + if (type == BigDecimal.class) { + if (sqlType == Types.NUMERIC || sqlType == Types.DECIMAL) { + return type.cast(getBigDecimal(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == String.class) { + if (sqlType == Types.CHAR || sqlType == Types.VARCHAR) { + return type.cast(getString(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Boolean.class) { + if (sqlType == Types.BOOLEAN || sqlType == Types.BIT) { + boolean booleanValue = getBoolean(columnIndex); + if (wasNull()) { + return null; + } + return type.cast(booleanValue); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Short.class) { + if (sqlType == Types.SMALLINT) { + short shortValue = getShort(columnIndex); + if (wasNull()) { + return null; + } + return type.cast(shortValue); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Integer.class) { + if (sqlType == Types.INTEGER || sqlType == Types.SMALLINT) { + int intValue = getInt(columnIndex); + if (wasNull()) { + return null; + } + return type.cast(intValue); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Long.class) { + if (sqlType == Types.BIGINT) { + long longValue = getLong(columnIndex); + if (wasNull()) { + return null; + } + return type.cast(longValue); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == BigInteger.class) { + if (sqlType == Types.BIGINT) { + long longValue = getLong(columnIndex); + if (wasNull()) { + return null; + } + return type.cast(BigInteger.valueOf(longValue)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Float.class) { + if (sqlType == Types.REAL) { + float floatValue = getFloat(columnIndex); + if (wasNull()) { + return null; + } + return type.cast(floatValue); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Double.class) { + if (sqlType == Types.FLOAT || sqlType == Types.DOUBLE) { + double doubleValue = getDouble(columnIndex); + if (wasNull()) { + return null; + } + return type.cast(doubleValue); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Date.class) { + if (sqlType == Types.DATE) { + return type.cast(getDate(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Time.class) { + if (sqlType == Types.TIME) { + return type.cast(getTime(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Timestamp.class) { + if (sqlType == Types.TIMESTAMP + || sqlType == Types.TIMESTAMP_WITH_TIMEZONE + ) { + return type.cast(getTimestamp(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Calendar.class) { + if (sqlType == Types.TIMESTAMP + || sqlType == Types.TIMESTAMP_WITH_TIMEZONE + ) { + Timestamp timestampValue = getTimestamp(columnIndex); + if (timestampValue == null) { + return null; + } + Calendar calendar = Calendar.getInstance(getDefaultCalendar().getTimeZone()); + calendar.setTimeInMillis(timestampValue.getTime()); + return type.cast(calendar); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Blob.class) { + if (sqlType == Types.BLOB || sqlType == Types.BINARY || sqlType == Types.BIGINT) { + return type.cast(getBlob(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Clob.class) { + if (sqlType == Types.CLOB || sqlType == Types.BIGINT) { + return type.cast(getClob(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == java.util.Date.class) { + if (sqlType == Types.TIMESTAMP) { + Timestamp timestamp = getTimestamp(columnIndex); + if (timestamp == null) { + return null; + } + return type.cast(new java.util.Date(timestamp.getTime())); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == Array.class) { + if (sqlType == Types.ARRAY) { + return type.cast(getArray(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == SQLXML.class) { + if (sqlType == Types.SQLXML) { + return type.cast(getSQLXML(columnIndex)); + } else { + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + } else if (type == UUID.class) { + return type.cast(getObject(columnIndex)); + } else if (type == InetAddress.class) { + String inetText = getString(columnIndex); + if (inetText == null) { + return null; + } + int slash = inetText.indexOf("/"); + try { + return type.cast(InetAddress.getByName(slash < 0 ? inetText : inetText.substring(0, slash))); + } catch (UnknownHostException ex) { + throw new PSQLException(GT.tr("Invalid Inet data."), PSQLState.INVALID_PARAMETER_VALUE, ex); + } + // JSR-310 support + } else if (type == LocalDate.class) { + return type.cast(getLocalDate(columnIndex)); + } else if (type == LocalTime.class) { + return type.cast(getLocalTime(columnIndex)); + } else if (type == LocalDateTime.class) { + return type.cast(getLocalDateTime(columnIndex)); + } else if (type == OffsetDateTime.class) { + return type.cast(getOffsetDateTime(columnIndex)); + } else if (type == OffsetTime.class) { + return type.cast(getOffsetTime(columnIndex)); + } else if (PGobject.class.isAssignableFrom(type)) { + Object object; + if (isBinary(columnIndex)) { + byte[] byteValue = thisRow.get(columnIndex - 1); + object = connection.getObject(getPGType(columnIndex), null, byteValue); + } else { + object = connection.getObject(getPGType(columnIndex), getString(columnIndex), null); + } + return type.cast(object); + } + throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)), + PSQLState.INVALID_PARAMETER_VALUE); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + return getObject(findColumn(columnLabel), type); + } + + @Override + public Object getObject(String s, Map> map) throws SQLException { + return getObjectImpl(s, map); + } + + @Override + public Object getObject(int i, Map> map) throws SQLException { + return getObjectImpl(i, map); + } + + @Override + public void updateObject(int columnIndex, Object x, SQLType targetSqlType, + int scaleOrLength) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateObject"); + } + + @Override + public void updateObject(String columnLabel, Object x, SQLType targetSqlType, + int scaleOrLength) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateObject"); + } + + @Override + public void updateObject(int columnIndex, Object x, SQLType targetSqlType) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateObject"); + } + + @Override + public void updateObject(String columnLabel, Object x, SQLType targetSqlType) + throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateObject"); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getRowId columnIndex: {0}", columnIndex); + throw Driver.notImplemented(this.getClass(), "getRowId(int)"); + } + + @Override + public RowId getRowId(String columnName) throws SQLException { + return getRowId(findColumn(columnName)); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateRowId(int, RowId)"); + } + + @Override + public void updateRowId(String columnName, RowId x) throws SQLException { + updateRowId(findColumn(columnName), x); + } + + @Override + public int getHoldability() throws SQLException { + throw Driver.notImplemented(this.getClass(), "getHoldability()"); + } + + @Override + public boolean isClosed() throws SQLException { + return rows == null; + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateNString(int, String)"); + } + + @Override + public void updateNString(String columnName, String nString) throws SQLException { + updateNString(findColumn(columnName), nString); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateNClob(int, NClob)"); + } + + @Override + public void updateNClob(String columnName, NClob nClob) throws SQLException { + updateNClob(findColumn(columnName), nClob); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateNClob(int, Reader)"); + } + + @Override + public void updateNClob(String columnName, Reader reader) throws SQLException { + updateNClob(findColumn(columnName), reader); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateNClob(int, Reader, long)"); + } + + @Override + public void updateNClob(String columnName, Reader reader, long length) throws SQLException { + updateNClob(findColumn(columnName), reader, length); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getNClob columnIndex: {0}", columnIndex); + throw Driver.notImplemented(this.getClass(), "getNClob(int)"); + } + + @Override + public NClob getNClob(String columnName) throws SQLException { + return getNClob(findColumn(columnName)); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateBlob(int, InputStream, long)"); + } + + @Override + public void updateBlob(String columnName, InputStream inputStream, long length) + throws SQLException { + updateBlob(findColumn(columnName), inputStream, length); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateBlob(int, InputStream)"); + } + + @Override + public void updateBlob(String columnName, InputStream inputStream) throws SQLException { + updateBlob(findColumn(columnName), inputStream); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateClob(int, Reader, long)"); + } + + @Override + public void updateClob(String columnName, Reader reader, long length) throws SQLException { + updateClob(findColumn(columnName), reader, length); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + throw Driver.notImplemented(this.getClass(), "updateClob(int, Reader)"); + } + + @Override + public void updateClob(String columnName, Reader reader) throws SQLException { + updateClob(findColumn(columnName), reader); + } + + @Override + + public SQLXML getSQLXML(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getSQLXML columnIndex: {0}", columnIndex); + String data = getString(columnIndex); + if (data == null) { + return null; + } + + return new PgSQLXML(connection, data); + } + + @Override + public SQLXML getSQLXML(String columnName) throws SQLException { + return getSQLXML(findColumn(columnName)); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + updateValue(columnIndex, xmlObject); + } + + @Override + public void updateSQLXML(String columnName, SQLXML xmlObject) throws SQLException { + updateSQLXML(findColumn(columnName), xmlObject); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getNString columnIndex: {0}", columnIndex); + throw Driver.notImplemented(this.getClass(), "getNString(int)"); + } + + @Override + public String getNString(String columnName) throws SQLException { + return getNString(findColumn(columnName)); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + connection.getLogger().log(Level.FINEST, " getNCharacterStream columnIndex: {0}", columnIndex); + throw Driver.notImplemented(this.getClass(), "getNCharacterStream(int)"); + } + + @Override + public Reader getNCharacterStream(String columnName) throws SQLException { + return getNCharacterStream(findColumn(columnName)); + } + + public void updateNCharacterStream(int columnIndex, + Reader x, int length) throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateNCharacterStream(int, Reader, int)"); + } + + public void updateNCharacterStream(String columnName, + Reader x, int length) throws SQLException { + updateNCharacterStream(findColumn(columnName), x, length); + } + + @Override + public void updateNCharacterStream(int columnIndex, + Reader x) throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateNCharacterStream(int, Reader)"); + } + + @Override + public void updateNCharacterStream(String columnName, + Reader x) throws SQLException { + updateNCharacterStream(findColumn(columnName), x); + } + + @Override + public void updateNCharacterStream(int columnIndex, + Reader x, long length) throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateNCharacterStream(int, Reader, long)"); + } + + @Override + public void updateNCharacterStream(String columnName, + Reader x, long length) throws SQLException { + updateNCharacterStream(findColumn(columnName), x, length); + } + + @Override + public void updateCharacterStream(int columnIndex, + Reader reader, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateCharacterStream(int, Reader, long)"); + } + + @Override + public void updateCharacterStream(String columnName, + Reader reader, long length) + throws SQLException { + updateCharacterStream(findColumn(columnName), reader, length); + } + + @Override + public void updateCharacterStream(int columnIndex, + Reader reader) throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateCharacterStream(int, Reader)"); + } + + @Override + public void updateCharacterStream(String columnName, + Reader reader) throws SQLException { + updateCharacterStream(findColumn(columnName), reader); + } + + @Override + public void updateBinaryStream(int columnIndex, + InputStream inputStream, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateBinaryStream(int, InputStream, long)"); + } + + @Override + public void updateBinaryStream(String columnName, + InputStream inputStream, long length) + throws SQLException { + updateBinaryStream(findColumn(columnName), inputStream, length); + } + + @Override + public void updateBinaryStream(int columnIndex, + InputStream inputStream) throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateBinaryStream(int, InputStream)"); + } + + @Override + public void updateBinaryStream(String columnName, + InputStream inputStream) throws SQLException { + updateBinaryStream(findColumn(columnName), inputStream); + } + + @Override + public void updateAsciiStream(int columnIndex, + InputStream inputStream, long length) + throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateAsciiStream(int, InputStream, long)"); + } + + @Override + public void updateAsciiStream(String columnName, + InputStream inputStream, long length) + throws SQLException { + updateAsciiStream(findColumn(columnName), inputStream, length); + } + + @Override + public void updateAsciiStream(int columnIndex, + InputStream inputStream) throws SQLException { + throw Driver.notImplemented(this.getClass(), + "updateAsciiStream(int, InputStream)"); + } + + @Override + public void updateAsciiStream(String columnName, + InputStream inputStream) throws SQLException { + updateAsciiStream(findColumn(columnName), inputStream); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } + + private Calendar getDefaultCalendar() { + if (getTimestampUtils().hasFastDefaultTimeZone()) { + return getTimestampUtils().getSharedCalendar(null); + } + Calendar sharedCalendar = getTimestampUtils().getSharedCalendar(defaultTimeZone); + if (defaultTimeZone == null) { + defaultTimeZone = sharedCalendar.getTimeZone(); + } + return sharedCalendar; + } + + private TimestampUtils getTimestampUtils() { + if (timestampUtils == null) { + timestampUtils = new TimestampUtils(!connection.getQueryExecutor().getIntegerDateTimes(), (Provider) new QueryExecutorTimeZoneProvider(connection.getQueryExecutor())); + } + return timestampUtils; + } + + /** + * This is here to be used by metadata functions + * to make all column labels upper case. + * Because postgres folds columns to lower case in queries it will be easier + * to change the fields after the fact rather than try to coerce all the columns + * to upper case in the queries as this would require surrounding all columns with " and + * escaping them making them even harder to read than they are now. + * @return PgResultSet + */ + protected PgResultSet upperCaseFieldLabels() { + for (Field field: fields ) { + field.upperCaseLabel(); + } + return this; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java new file mode 100644 index 0000000..fe5f98e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java @@ -0,0 +1,468 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.PGResultSetMetaData; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.Field; +import org.postgresql.core.ServerVersion; +import org.postgresql.util.GT; +import org.postgresql.util.Gettable; +import org.postgresql.util.GettableHashMap; +import org.postgresql.util.JdbcBlackHole; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; + +public class PgResultSetMetaData implements ResultSetMetaData, PGResultSetMetaData { + protected final BaseConnection connection; + protected final Field[] fields; + + private boolean fieldInfoFetched; + + /** + * Initialise for a result with a tuple set and a field descriptor set + * + * @param connection the connection to retrieve metadata + * @param fields the array of field descriptors + */ + public PgResultSetMetaData(BaseConnection connection, Field[] fields) { + this.connection = connection; + this.fields = fields; + this.fieldInfoFetched = false; + } + + @Override + public int getColumnCount() throws SQLException { + return fields.length; + } + + /** + * {@inheritDoc} + * + *

It is believed that PostgreSQL does not support this feature. + * + * @param column the first column is 1, the second is 2... + * @return true if so + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isAutoIncrement(int column) throws SQLException { + fetchFieldMetaData(); + Field field = getField(column); + FieldMetadata metadata = field.getMetadata(); + return metadata != null && metadata.autoIncrement; + } + + /** + * {@inheritDoc} + * + *

Does a column's case matter? ASSUMPTION: Any field that is not obviously case insensitive is + * assumed to be case sensitive + * + * @param column the first column is 1, the second is 2... + * @return true if so + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isCaseSensitive(int column) throws SQLException { + Field field = getField(column); + return connection.getTypeInfo().isCaseSensitive(field.getOID()); + } + + /** + * {@inheritDoc} + * + *

Can the column be used in a WHERE clause? Basically for this, I split the functions into two + * types: recognised types (which are always useable), and OTHER types (which may or may not be + * useable). The OTHER types, for now, I will assume they are useable. We should really query the + * catalog to see if they are useable. + * + * @param column the first column is 1, the second is 2... + * @return true if they can be used in a WHERE clause + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isSearchable(int column) throws SQLException { + return true; + } + + /** + * {@inheritDoc} + * + *

Is the column a cash value? 6.1 introduced the cash/money type, which haven't been incorporated + * as of 970414, so I just check the type name for both 'cash' and 'money' + * + * @param column the first column is 1, the second is 2... + * @return true if its a cash column + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isCurrency(int column) throws SQLException { + String typeName = getPGType(column); + + return "cash".equals(typeName) || "money".equals(typeName); + } + + @Override + public int isNullable(int column) throws SQLException { + fetchFieldMetaData(); + Field field = getField(column); + FieldMetadata metadata = field.getMetadata(); + return metadata == null ? ResultSetMetaData.columnNullable : metadata.nullable; + } + + /** + * {@inheritDoc} + * + *

Is the column a signed number? In PostgreSQL, all numbers are signed, so this is trivial. + * However, strings are not signed (duh!) + * + * @param column the first column is 1, the second is 2... + * @return true if so + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isSigned(int column) throws SQLException { + Field field = getField(column); + return connection.getTypeInfo().isSigned(field.getOID()); + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + Field field = getField(column); + return connection.getTypeInfo().getDisplaySize(field.getOID(), field.getMod()); + } + + @Override + public String getColumnLabel(int column) throws SQLException { + Field field = getField(column); + return field.getColumnLabel(); + } + + @Override + public String getColumnName(int column) throws SQLException { + return getColumnLabel(column); + } + + @Override + public String getBaseColumnName(int column) throws SQLException { + Field field = getField(column); + if (field.getTableOid() == 0) { + return ""; + } + fetchFieldMetaData(); + FieldMetadata metadata = field.getMetadata(); + return metadata == null ? "" : metadata.columnName; + } + + @Override + public String getSchemaName(int column) throws SQLException { + return ""; + } + + private boolean populateFieldsWithMetadata(Gettable metadata) { + boolean allOk = true; + for (Field field : fields) { + if (field.getMetadata() != null) { + // No need to update metadata + continue; + } + + final FieldMetadata fieldMetadata = + metadata.get(new FieldMetadata.Key(field.getTableOid(), field.getPositionInTable())); + if (fieldMetadata == null) { + allOk = false; + } else { + field.setMetadata(fieldMetadata); + } + } + fieldInfoFetched |= allOk; + return allOk; + } + + private void fetchFieldMetaData() throws SQLException { + if (fieldInfoFetched) { + return; + } + + if (populateFieldsWithMetadata(connection.getFieldMetadataCache())) { + return; + } + + StringBuilder sql = new StringBuilder( + "SELECT c.oid, a.attnum, a.attname, c.relname, n.nspname, " + + "a.attnotnull OR (t.typtype = 'd' AND t.typnotnull), "); + + if ( connection.haveMinimumServerVersion(ServerVersion.v10)) { + sql.append("a.attidentity != '' OR pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' "); + } else { + sql.append("pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' "); + } + sql.append("FROM pg_catalog.pg_class c " + + "JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) " + + "JOIN pg_catalog.pg_attribute a ON (c.oid = a.attrelid) " + + "JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) " + + "LEFT JOIN pg_catalog.pg_attrdef d ON (d.adrelid = a.attrelid AND d.adnum = a.attnum) " + + "JOIN ("); + + // 7.4 servers don't support row IN operations (a,b) IN ((c,d),(e,f)) + // so we've got to fake that with a JOIN here. + // + boolean hasSourceInfo = false; + for (Field field : fields) { + if (field.getMetadata() != null) { + continue; + } + + if (hasSourceInfo) { + sql.append(" UNION ALL "); + } + + sql.append("SELECT "); + sql.append(field.getTableOid()); + if (!hasSourceInfo) { + sql.append(" AS oid "); + } + sql.append(", "); + sql.append(field.getPositionInTable()); + if (!hasSourceInfo) { + sql.append(" AS attnum"); + } + + if (!hasSourceInfo) { + hasSourceInfo = true; + } + } + sql.append(") vals ON (c.oid = vals.oid AND a.attnum = vals.attnum) "); + + if (!hasSourceInfo) { + fieldInfoFetched = true; + return; + } + + Statement stmt = connection.createStatement(); + ResultSet rs = null; + GettableHashMap md = new GettableHashMap<>(); + try { + rs = stmt.executeQuery(sql.toString()); + while (rs.next()) { + int table = (int) rs.getLong(1); + int column = (int) rs.getLong(2); + String columnName = rs.getString(3); + String tableName = rs.getString(4); + String schemaName = rs.getString(5); + int nullable = + rs.getBoolean(6) ? ResultSetMetaData.columnNoNulls : ResultSetMetaData.columnNullable; + boolean autoIncrement = rs.getBoolean(7); + FieldMetadata fieldMetadata = + new FieldMetadata(columnName, tableName, schemaName, nullable, autoIncrement); + FieldMetadata.Key key = new FieldMetadata.Key(table, column); + md.put(key, fieldMetadata); + } + } finally { + JdbcBlackHole.close(rs); + JdbcBlackHole.close(stmt); + } + populateFieldsWithMetadata(md); + connection.getFieldMetadataCache().putAll(md); + } + + @Override + public String getBaseSchemaName(int column) throws SQLException { + fetchFieldMetaData(); + Field field = getField(column); + FieldMetadata metadata = field.getMetadata(); + return metadata == null ? "" : metadata.schemaName; + } + + @Override + public int getPrecision(int column) throws SQLException { + Field field = getField(column); + return connection.getTypeInfo().getPrecision(field.getOID(), field.getMod()); + } + + @Override + public int getScale(int column) throws SQLException { + Field field = getField(column); + return connection.getTypeInfo().getScale(field.getOID(), field.getMod()); + } + + @Override + public String getTableName(int column) throws SQLException { + return getBaseTableName(column); + } + + @Override + public String getBaseTableName(int column) throws SQLException { + fetchFieldMetaData(); + Field field = getField(column); + FieldMetadata metadata = field.getMetadata(); + return metadata == null ? "" : metadata.tableName; + } + + /** + * {@inheritDoc} + * + *

As with getSchemaName(), we can say that if + * getTableName() returns n/a, then we can too - otherwise, we need to work on it. + * + * @param column the first column is 1, the second is 2... + * @return catalog name, or "" if not applicable + * @exception SQLException if a database access error occurs + */ + @Override + public String getCatalogName(int column) throws SQLException { + return ""; + } + + @Override + public int getColumnType(int column) throws SQLException { + return getSQLType(column); + } + + @Override + public int getFormat(int column) throws SQLException { + return getField(column).getFormat(); + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + String type = getPGType(column); + if (isAutoIncrement(column)) { + if ("int4".equals(type)) { + return "serial"; + } else if ("int8".equals(type)) { + return "bigserial"; + } else if ("int2".equals(type) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) { + return "smallserial"; + } + } + + return type; + } + + /** + * {@inheritDoc} + * + *

In reality, we would have to check the GRANT/REVOKE + * stuff for this to be effective, and I haven't really looked into that yet, so this will get + * re-visited. + * + * @param column the first column is 1, the second is 2, etc.* + * @return true if so* + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isReadOnly(int column) throws SQLException { + return false; + } + + /** + * {@inheritDoc} + * + *

In reality have to check + * the GRANT/REVOKE stuff, which I haven't worked with as yet. However, if it isn't ReadOnly, then + * it is obviously writable. + * + * @param column the first column is 1, the second is 2, etc. + * @return true if so + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isWritable(int column) throws SQLException { + return !isReadOnly(column); + } + + /** + * {@inheritDoc} + * + *

Hmmm...this is a bad one, since the two + * preceding functions have not been really defined. I cannot tell is the short answer. I thus + * return isWritable() just to give us an idea. + * + * @param column the first column is 1, the second is 2, etc.. + * @return true if so + * @exception SQLException if a database access error occurs + */ + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + return false; + } + + // ******************************************************** + // END OF PUBLIC INTERFACE + // ******************************************************** + + /** + * For several routines in this package, we need to convert a columnIndex into a Field[] + * descriptor. Rather than do the same code several times, here it is. + * + * @param columnIndex the first column is 1, the second is 2... + * @return the Field description + * @exception SQLException if a database access error occurs + */ + protected Field getField(int columnIndex) throws SQLException { + if (columnIndex < 1 || columnIndex > fields.length) { + throw new PSQLException( + GT.tr("The column index is out of range: {0}, number of columns: {1}.", + columnIndex, fields.length), + PSQLState.INVALID_PARAMETER_VALUE); + } + return fields[columnIndex - 1]; + } + + protected String getPGType(int columnIndex) throws SQLException { + return connection.getTypeInfo().getPGType(getField(columnIndex).getOID()); + } + + protected int getSQLType(int columnIndex) throws SQLException { + return connection.getTypeInfo().getSQLType(getField(columnIndex).getOID()); + } + + // ** JDBC 2 Extensions ** + + // This can hook into our PG_Object mechanism + + @Override + public String getColumnClassName(int column) throws SQLException { + Field field = getField(column); + String result = connection.getTypeInfo().getJavaClass(field.getOID()); + + if (result != null) { + return result; + } + + int sqlType = getSQLType(column); + if (sqlType == Types.ARRAY) { + return "java.sql.Array"; + } else { + String type = getPGType(column); + if ("unknown".equals(type)) { + return "java.lang.String"; + } + return "java.lang.Object"; + } + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java new file mode 100644 index 0000000..a904d76 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.BaseConnection; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.xml.DefaultPGXmlFactoryFactory; +import org.postgresql.xml.PGXmlFactoryFactory; + +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.io.Writer; +import java.sql.SQLException; +import java.sql.SQLXML; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.stream.XMLStreamException; +import javax.xml.stream.XMLStreamReader; +import javax.xml.stream.XMLStreamWriter; +import javax.xml.transform.Result; +import javax.xml.transform.Source; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMResult; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.sax.SAXResult; +import javax.xml.transform.sax.SAXSource; +import javax.xml.transform.sax.SAXTransformerFactory; +import javax.xml.transform.sax.TransformerHandler; +import javax.xml.transform.stax.StAXResult; +import javax.xml.transform.stax.StAXSource; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; + +@SuppressWarnings("try") +public class PgSQLXML implements SQLXML { + + private final ResourceLock lock = new ResourceLock(); + private final BaseConnection conn; + private String data; // The actual data contained. + private boolean initialized; // Has someone assigned the data for this object? + private boolean active; // Is anyone in the process of loading data into us? + private boolean freed; + + private ByteArrayOutputStream byteArrayOutputStream; + private StringWriter stringWriter; + private DOMResult domResult; + + public PgSQLXML(BaseConnection conn) { + this(conn, null, false); + } + + public PgSQLXML(BaseConnection conn, String data) { + this(conn, data, true); + } + + private PgSQLXML(BaseConnection conn, String data, boolean initialized) { + this.conn = conn; + this.data = data; + this.initialized = initialized; + this.active = false; + this.freed = false; + } + + private PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException { + if (conn != null) { + return conn.getXmlFactoryFactory(); + } + return DefaultPGXmlFactoryFactory.INSTANCE; + } + + @Override + public void free() { + try (ResourceLock ignore = lock.obtain()) { + freed = true; + data = null; + } + } + + @Override + public InputStream getBinaryStream() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + ensureInitialized(); + + if (data == null) { + return null; + } + + try { + return new ByteArrayInputStream(conn.getEncoding().encode(data)); + } catch (IOException ioe) { + // This should be a can't happen exception. We just + // decoded this data, so it would be surprising that + // we couldn't encode it. + // For this reason don't make it translatable. + throw new PSQLException("Failed to re-encode xml data.", PSQLState.DATA_ERROR, ioe); + } + } + } + + @Override + public Reader getCharacterStream() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + ensureInitialized(); + + if (data == null) { + return null; + } + + return new StringReader(data); + } + } + + // We must implement this unsafely because that's what the + // interface requires. Because it says we're returning T + // which is unknown, none of the return values can satisfy it + // as Java isn't going to understand the if statements that + // ensure they are the same. + // + @SuppressWarnings("unchecked") + @Override + public T getSource(Class sourceClass) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + ensureInitialized(); + + String data = this.data; + if (data == null) { + return null; + } + + try { + if (sourceClass == null || DOMSource.class.equals(sourceClass)) { + DocumentBuilder builder = getXmlFactoryFactory().newDocumentBuilder(); + InputSource input = new InputSource(new StringReader(data)); + DOMSource domSource = new DOMSource(builder.parse(input)); + return (T) domSource; + } else if (SAXSource.class.equals(sourceClass)) { + XMLReader reader = getXmlFactoryFactory().createXMLReader(); + InputSource is = new InputSource(new StringReader(data)); + return sourceClass.cast(new SAXSource(reader, is)); + } else if (StreamSource.class.equals(sourceClass)) { + return sourceClass.cast(new StreamSource(new StringReader(data))); + } else if (StAXSource.class.equals(sourceClass)) { + XMLInputFactory xif = getXmlFactoryFactory().newXMLInputFactory(); + XMLStreamReader xsr = xif.createXMLStreamReader(new StringReader(data)); + return sourceClass.cast(new StAXSource(xsr)); + } + } catch (Exception e) { + throw new PSQLException(GT.tr("Unable to decode xml data."), PSQLState.DATA_ERROR, e); + } + + throw new PSQLException(GT.tr("Unknown XML Source class: {0}", sourceClass), + PSQLState.INVALID_PARAMETER_TYPE); + } + } + + @Override + public String getString() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + ensureInitialized(); + return data; + } + } + + @Override + public OutputStream setBinaryStream() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + initialize(); + active = true; + byteArrayOutputStream = new ByteArrayOutputStream(); + return byteArrayOutputStream; + } + } + + @Override + public Writer setCharacterStream() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + initialize(); + active = true; + stringWriter = new StringWriter(); + return stringWriter; + } + } + + @SuppressWarnings("unchecked") + @Override + public T setResult(Class resultClass) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + initialize(); + + if (resultClass == null || DOMResult.class.equals(resultClass)) { + domResult = new DOMResult(); + active = true; + return (T) domResult; + } else if (SAXResult.class.equals(resultClass)) { + try { + SAXTransformerFactory transformerFactory = getXmlFactoryFactory().newSAXTransformerFactory(); + TransformerHandler transformerHandler = transformerFactory.newTransformerHandler(); + stringWriter = new StringWriter(); + transformerHandler.setResult(new StreamResult(stringWriter)); + active = true; + return resultClass.cast(new SAXResult(transformerHandler)); + } catch (TransformerException te) { + throw new PSQLException(GT.tr("Unable to create SAXResult for SQLXML."), + PSQLState.UNEXPECTED_ERROR, te); + } + } else if (StreamResult.class.equals(resultClass)) { + stringWriter = new StringWriter(); + active = true; + return resultClass.cast(new StreamResult(stringWriter)); + } else if (StAXResult.class.equals(resultClass)) { + StringWriter stringWriter = new StringWriter(); + this.stringWriter = stringWriter; + try { + XMLOutputFactory xof = getXmlFactoryFactory().newXMLOutputFactory(); + XMLStreamWriter xsw = xof.createXMLStreamWriter(stringWriter); + active = true; + return resultClass.cast(new StAXResult(xsw)); + } catch (XMLStreamException xse) { + throw new PSQLException(GT.tr("Unable to create StAXResult for SQLXML"), + PSQLState.UNEXPECTED_ERROR, xse); + } + } + + throw new PSQLException(GT.tr("Unknown XML Result class: {0}", resultClass), + PSQLState.INVALID_PARAMETER_TYPE); + } + } + + @Override + public void setString(String value) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkFreed(); + initialize(); + data = value; + } + } + + private void checkFreed() throws SQLException { + if (freed) { + throw new PSQLException(GT.tr("This SQLXML object has already been freed."), + PSQLState.OBJECT_NOT_IN_STATE); + } + } + + private void ensureInitialized() throws SQLException { + if (!initialized) { + throw new PSQLException( + GT.tr( + "This SQLXML object has not been initialized, so you cannot retrieve data from it."), + PSQLState.OBJECT_NOT_IN_STATE); + } + + // Is anyone loading data into us at the moment? + if (!active) { + return; + } + + if (byteArrayOutputStream != null) { + try { + data = conn.getEncoding().decode(byteArrayOutputStream.toByteArray()); + } catch (IOException ioe) { + throw new PSQLException(GT.tr("Failed to convert binary xml data to encoding: {0}.", + conn.getEncoding().name()), PSQLState.DATA_ERROR, ioe); + } finally { + byteArrayOutputStream = null; + active = false; + } + } else if (stringWriter != null) { + // This is also handling the work for Stream, SAX, and StAX Results + // as they will use the same underlying stringwriter variable. + // + data = stringWriter.toString(); + stringWriter = null; + active = false; + } else if (domResult != null) { + DOMResult domResult = this.domResult; + // Copy the content from the result to a source + // and use the identify transform to get it into a + // friendlier result format. + try { + TransformerFactory factory = getXmlFactoryFactory().newTransformerFactory(); + Transformer transformer = factory.newTransformer(); + DOMSource domSource = new DOMSource(domResult.getNode()); + StringWriter stringWriter = new StringWriter(); + StreamResult streamResult = new StreamResult(stringWriter); + transformer.transform(domSource, streamResult); + data = stringWriter.toString(); + } catch (TransformerException te) { + throw new PSQLException(GT.tr("Unable to convert DOMResult SQLXML data to a string."), + PSQLState.DATA_ERROR, te); + } finally { + domResult = null; + active = false; + } + } + } + + private void initialize() throws SQLException { + if (initialized) { + throw new PSQLException( + GT.tr( + "This SQLXML object has already been initialized, so you cannot manipulate it further."), + PSQLState.OBJECT_NOT_IN_STATE); + } + initialized = true; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java new file mode 100644 index 0000000..e27c6ad --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java @@ -0,0 +1,1372 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.Driver; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.BaseStatement; +import org.postgresql.core.CachedQuery; +import org.postgresql.core.Field; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.ResultCursor; +import org.postgresql.core.ResultHandlerBase; +import org.postgresql.core.SqlCommand; +import org.postgresql.core.Tuple; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.TimerTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +@SuppressWarnings("try") +public class PgStatement implements Statement, BaseStatement { + private static final String[] NO_RETURNING_COLUMNS = new String[0]; + + /** + * Default state for use or not binary transfers. Can use only for testing purposes + */ + private static final boolean DEFAULT_FORCE_BINARY_TRANSFERS = + Boolean.getBoolean("org.postgresql.forceBinary"); + // only for testing purposes. even single shot statements will use binary transfers + private boolean forceBinaryTransfers = DEFAULT_FORCE_BINARY_TRANSFERS; + + protected final ResourceLock lock = new ResourceLock(); + protected ArrayList batchStatements; + protected ArrayList batchParameters; + protected final int resultsettype; // the resultset type to return (ResultSet.TYPE_xxx) + protected final int concurrency; // is it updateable or not? (ResultSet.CONCUR_xxx) + private final int rsHoldability; + private boolean poolable; + private boolean closeOnCompletion; + protected int fetchdirection = ResultSet.FETCH_FORWARD; + // fetch direction hint (currently ignored) + + /** + * Protects current statement from cancelTask starting, waiting for a bit, and waking up exactly + * on subsequent query execution. The idea is to atomically compare and swap the reference to the + * task, so the task can detect that statement executes different query than the one the + * cancelTask was created. Note: the field must be set/get/compareAndSet via + * {@link #CANCEL_TIMER_UPDATER} as per {@link AtomicReferenceFieldUpdater} javadoc. + */ + private volatile TimerTask cancelTimerTask; + + private static final AtomicReferenceFieldUpdater CANCEL_TIMER_UPDATER = + AtomicReferenceFieldUpdater.newUpdater( + PgStatement.class, TimerTask.class, "cancelTimerTask"); + + /** + * Protects statement from out-of-order cancels. It protects from both + * {@link #setQueryTimeout(int)} and {@link #cancel()} induced ones. + * + * {@link #execute(String)} and friends change the field to + * {@link StatementCancelState#IN_QUERY} during execute. {@link #cancel()} + * ignores cancel request if state is {@link StatementCancelState#IDLE}. + * In case {@link #execute(String)} observes non-{@link StatementCancelState#IDLE} state as it + * completes the query, it waits till {@link StatementCancelState#CANCELLED}. Note: the field must be + * set/get/compareAndSet via {@link #STATE_UPDATER} as per {@link AtomicIntegerFieldUpdater} + * javadoc. + */ + private volatile StatementCancelState statementState = StatementCancelState.IDLE; + + private static final AtomicReferenceFieldUpdater STATE_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(PgStatement.class, StatementCancelState.class, "statementState"); + + /** + * Does the caller of execute/executeUpdate want generated keys for this execution? This is set by + * Statement methods that have generated keys arguments and cleared after execution is complete. + */ + protected boolean wantsGeneratedKeysOnce; + + /** + * Was this PreparedStatement created to return generated keys for every execution? This is set at + * creation time and never cleared by execution. + */ + public boolean wantsGeneratedKeysAlways; + + // The connection who created us + protected final PgConnection connection; + + /** + * The warnings chain. + */ + protected volatile PSQLWarningWrapper warnings; + + /** + * Maximum number of rows to return, 0 = unlimited. + */ + protected int maxrows; + + /** + * Number of rows to get in a batch. + */ + protected int fetchSize; + + /** + * Timeout (in milliseconds) for a query. + */ + protected long timeout; + + protected boolean replaceProcessingEnabled = true; + + /** + * The current results. + */ + protected ResultWrapper result; + + /** + * The first unclosed result. + */ + protected ResultWrapper firstUnclosedResult; + + /** + * Results returned by a statement that wants generated keys. + */ + protected ResultWrapper generatedKeys; + + protected int mPrepareThreshold; // Reuse threshold to enable use of PREPARE + + protected int maxFieldSize; + + protected boolean adaptiveFetch; + + private TimestampUtils timestampUtils; // our own Object because it's not thread safe + + PgStatement(PgConnection c, int rsType, int rsConcurrency, int rsHoldability) + throws SQLException { + this.connection = c; + forceBinaryTransfers |= c.getForceBinary(); + // validation check for allowed values of resultset type + if (rsType != ResultSet.TYPE_FORWARD_ONLY && rsType != ResultSet.TYPE_SCROLL_INSENSITIVE && rsType != ResultSet.TYPE_SCROLL_SENSITIVE) { + throw new PSQLException(GT.tr("Unknown value for ResultSet type"), + PSQLState.INVALID_PARAMETER_VALUE); + } + resultsettype = rsType; + // validation check for allowed values of resultset concurrency + if (rsConcurrency != ResultSet.CONCUR_READ_ONLY && rsConcurrency != ResultSet.CONCUR_UPDATABLE) { + throw new PSQLException(GT.tr("Unknown value for ResultSet concurrency"), + PSQLState.INVALID_PARAMETER_VALUE); + } + concurrency = rsConcurrency; + setFetchSize(c.getDefaultFetchSize()); + setPrepareThreshold(c.getPrepareThreshold()); + setAdaptiveFetch(c.getAdaptiveFetch()); + // validation check for allowed values of resultset holdability + if (rsHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT && rsHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) { + throw new PSQLException(GT.tr("Unknown value for ResultSet holdability"), + PSQLState.INVALID_PARAMETER_VALUE); + } + this.rsHoldability = rsHoldability; + } + + @Override + public ResultSet createResultSet(Query originalQuery, Field[] fields, List tuples, + ResultCursor cursor) throws SQLException { + PgResultSet newResult = new PgResultSet(originalQuery, this, fields, tuples, cursor, + getMaxRows(), getMaxFieldSize(), getResultSetType(), getResultSetConcurrency(), + getResultSetHoldability(), getAdaptiveFetch()); + newResult.setFetchSize(getFetchSize()); + newResult.setFetchDirection(getFetchDirection()); + return newResult; + } + + public BaseConnection getPGConnection() { + return connection; + } + + public String getFetchingCursorName() { + return null; + } + + @Override + public int getFetchSize() { + return fetchSize; + } + + protected boolean wantsScrollableResultSet() { + return resultsettype != ResultSet.TYPE_FORWARD_ONLY; + } + + protected boolean wantsHoldableResultSet() { + // FIXME: false if not supported + return rsHoldability == ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + /** + * ResultHandler implementations for updates, queries, and either-or. + */ + public class StatementResultHandler extends ResultHandlerBase { + private ResultWrapper results; + private ResultWrapper lastResult; + + public StatementResultHandler() { + } + + ResultWrapper getResults() { + return results; + } + + private void append(ResultWrapper newResult) { + if (results == null) { + lastResult = results = newResult; + } else { + lastResult.append(newResult); + } + } + + @Override + public void handleResultRows(Query fromQuery, Field[] fields, List tuples, + ResultCursor cursor) { + try { + ResultSet rs = PgStatement.this.createResultSet(fromQuery, fields, tuples, cursor); + append(new ResultWrapper(rs)); + } catch (SQLException e) { + handleError(e); + } + } + + @Override + public void handleCommandStatus(String status, long updateCount, long insertOID) { + append(new ResultWrapper(updateCount, insertOID)); + } + + @Override + public void handleWarning(SQLWarning warning) { + PgStatement.this.addWarning(warning); + } + + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (!executeWithFlags(sql, 0)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + return getSingleResultSet(); + } + } + + protected ResultSet getSingleResultSet() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + ResultWrapper result = this.result; + if (result.getNext() != null) { + throw new PSQLException(GT.tr("Multiple ResultSets were returned by the query."), + PSQLState.TOO_MANY_RESULTS); + } + + return result.getResultSet(); + } + } + + @Override + public int executeUpdate(String sql) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS); + checkNoResultUpdate(); + return getUpdateCount(); + } + } + + protected final void checkNoResultUpdate() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + ResultWrapper iter = result; + while (iter != null) { + if (iter.getResultSet() != null) { + throw new PSQLException(GT.tr("A result was returned when none was expected."), + PSQLState.TOO_MANY_RESULTS); + } + iter = iter.getNext(); + } + } + } + + @Override + public boolean execute(String sql) throws SQLException { + return executeWithFlags(sql, 0); + } + + @Override + public boolean executeWithFlags(String sql, int flags) throws SQLException { + return executeCachedSql(sql, flags, NO_RETURNING_COLUMNS); + } + + private boolean executeCachedSql(String sql, int flags, + String [] columnNames) throws SQLException { + PreferQueryMode preferQueryMode = connection.getPreferQueryMode(); + // Simple statements should not replace ?, ? with $1, $2 + boolean shouldUseParameterized = false; + QueryExecutor queryExecutor = connection.getQueryExecutor(); + Object key = queryExecutor + .createQueryKey(sql, replaceProcessingEnabled, shouldUseParameterized, columnNames); + CachedQuery cachedQuery; + boolean shouldCache = preferQueryMode == PreferQueryMode.EXTENDED_CACHE_EVERYTHING; + if (shouldCache) { + cachedQuery = queryExecutor.borrowQueryByKey(key); + } else { + cachedQuery = queryExecutor.createQueryByKey(key); + } + if (wantsGeneratedKeysOnce) { + SqlCommand sqlCommand = cachedQuery.query.getSqlCommand(); + wantsGeneratedKeysOnce = sqlCommand != null && sqlCommand.isReturningKeywordPresent(); + } + boolean res; + try { + res = executeWithFlags(cachedQuery, flags); + } finally { + if (shouldCache) { + queryExecutor.releaseQuery(cachedQuery); + } + } + return res; + } + + @Override + public boolean executeWithFlags(CachedQuery simpleQuery, int flags) throws SQLException { + checkClosed(); + if (connection.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) < 0) { + flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; + } + execute(simpleQuery, null, flags); + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + return result != null && result.getResultSet() != null; + } + } + + @Override + public boolean executeWithFlags(int flags) throws SQLException { + checkClosed(); + throw new PSQLException(GT.tr("Can''t use executeWithFlags(int) on a Statement."), + PSQLState.WRONG_OBJECT_TYPE); + } + + /* + If there are multiple result sets we close any that have been processed and left open + by the client. + */ + private void closeUnclosedProcessedResults() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + ResultWrapper resultWrapper = this.firstUnclosedResult; + ResultWrapper currentResult = this.result; + for (; resultWrapper != currentResult && resultWrapper != null; + resultWrapper = resultWrapper.getNext()) { + PgResultSet rs = (PgResultSet) resultWrapper.getResultSet(); + if (rs != null) { + rs.closeInternally(); + } + } + firstUnclosedResult = resultWrapper; + } + } + + protected void closeForNextExecution() throws SQLException { + + // Every statement execution clears any previous warnings. + clearWarnings(); + + // Close any existing resultsets associated with this statement. + try (ResourceLock ignore = lock.obtain()) { + closeUnclosedProcessedResults(); + + if ( this.result != null && this.result.getResultSet() != null ) { + this.result.getResultSet().close(); + } + result = null; + + ResultWrapper generatedKeys = this.generatedKeys; + if (generatedKeys != null) { + ResultSet resultSet = generatedKeys.getResultSet(); + if (resultSet != null) { + resultSet.close(); + } + this.generatedKeys = null; + } + } + } + + /** + * Returns true if query is unlikely to be reused. + * + * @param cachedQuery to check (null if current query) + * @return true if query is unlikely to be reused + */ + protected boolean isOneShotQuery(CachedQuery cachedQuery) { + if (cachedQuery == null) { + return true; + } + cachedQuery.increaseExecuteCount(); + return (mPrepareThreshold == 0 || cachedQuery.getExecuteCount() < mPrepareThreshold) + && !getForceBinaryTransfer(); + } + + protected final void execute(CachedQuery cachedQuery, + ParameterList queryParameters, int flags) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + try { + executeInternal(cachedQuery, queryParameters, flags); + } catch (SQLException e) { + // Don't retry composite queries as it might get partially executed + if (cachedQuery.query.getSubqueries() != null + || !connection.getQueryExecutor().willHealOnRetry(e)) { + throw e; + } + cachedQuery.query.close(); + // Execute the query one more time + executeInternal(cachedQuery, queryParameters, flags); + } + } + } + + private void executeInternal(CachedQuery cachedQuery, + ParameterList queryParameters, int flags) + throws SQLException { + closeForNextExecution(); + + // Enable cursor-based resultset if possible. + if (fetchSize > 0 && !wantsScrollableResultSet() && !connection.getAutoCommit() + && !wantsHoldableResultSet()) { + flags |= QueryExecutor.QUERY_FORWARD_CURSOR; + } + + if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) { + flags |= QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS; + + // If the no results flag is set (from executeUpdate) + // clear it so we get the generated keys results. + // + if ((flags & QueryExecutor.QUERY_NO_RESULTS) != 0) { + flags &= ~(QueryExecutor.QUERY_NO_RESULTS); + } + } + + // Only use named statements after we hit the threshold. Note that only + // named statements can be transferred in binary format. + // isOneShotQuery will check to see if we have hit the prepareThreshold count + + if (isOneShotQuery(cachedQuery)) { + flags |= QueryExecutor.QUERY_ONESHOT; + } + + if (connection.getAutoCommit()) { + flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN; + } + if (connection.hintReadOnly()) { + flags |= QueryExecutor.QUERY_READ_ONLY_HINT; + } + + // updateable result sets do not yet support binary updates + if (concurrency != ResultSet.CONCUR_READ_ONLY) { + flags |= QueryExecutor.QUERY_NO_BINARY_TRANSFER; + } + + Query queryToExecute = cachedQuery.query; + + if (queryToExecute.isEmpty()) { + flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN; + } + + if (!queryToExecute.isStatementDescribed() && forceBinaryTransfers + && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) { + // Simple 'Q' execution does not need to know parameter types + // When binaryTransfer is forced, then we need to know resulting parameter and column types, + // thus sending a describe request. + int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY; + StatementResultHandler handler2 = new StatementResultHandler(); + connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler2, 0, 0, + flags2); + ResultWrapper result2 = handler2.getResults(); + if (result2 != null) { + result2.getResultSet().close(); + } + } + + StatementResultHandler handler = new StatementResultHandler(); + try (ResourceLock ignore = lock.obtain()) { + result = null; + } + try { + startTimer(); + connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler, maxrows, + fetchSize, flags, adaptiveFetch); + } finally { + killTimerTask(); + } + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + + ResultWrapper currentResult = handler.getResults(); + result = firstUnclosedResult = currentResult; + + if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) { + generatedKeys = currentResult; + result = currentResult.getNext(); + + if (wantsGeneratedKeysOnce) { + wantsGeneratedKeysOnce = false; + } + } + } + } + + @Override + public void setCursorName(String name) throws SQLException { + checkClosed(); + // No-op. + } + + private volatile int isClosed; + private static final AtomicIntegerFieldUpdater IS_CLOSED_UPDATER = + AtomicIntegerFieldUpdater.newUpdater( + PgStatement.class, "isClosed"); + + @Override + public int getUpdateCount() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + if (result == null || result.getResultSet() != null) { + return -1; + } + + long count = result.getUpdateCount(); + return count > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) count; + } + } + + @Override + public boolean getMoreResults() throws SQLException { + return getMoreResults(CLOSE_ALL_RESULTS); + } + + @Override + public int getMaxRows() throws SQLException { + checkClosed(); + return maxrows; + } + + @Override + public void setMaxRows(int max) throws SQLException { + checkClosed(); + if (max < 0) { + throw new PSQLException( + GT.tr("Maximum number of rows must be a value greater than or equal to 0."), + PSQLState.INVALID_PARAMETER_VALUE); + } + maxrows = max; + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + checkClosed(); + replaceProcessingEnabled = enable; + } + + @Override + public int getQueryTimeout() throws SQLException { + checkClosed(); + long seconds = timeout / 1000; + if (seconds >= Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } + return (int) seconds; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + setQueryTimeoutMs(seconds * 1000L); + } + + /** + * The queryTimeout limit is the number of milliseconds the driver will wait for a Statement to + * execute. If the limit is exceeded, a SQLException is thrown. + * + * @return the current query timeout limit in milliseconds; 0 = unlimited + * @throws SQLException if a database access error occurs + */ + public long getQueryTimeoutMs() throws SQLException { + checkClosed(); + return timeout; + } + + /** + * Sets the queryTimeout limit. + * + * @param millis - the new query timeout limit in milliseconds + * @throws SQLException if a database access error occurs + */ + public void setQueryTimeoutMs(long millis) throws SQLException { + checkClosed(); + + if (millis < 0) { + throw new PSQLException(GT.tr("Query timeout must be a value greater than or equals to 0."), + PSQLState.INVALID_PARAMETER_VALUE); + } + timeout = millis; + } + + /** + *

Either initializes new warning wrapper, or adds warning onto the chain.

+ * + *

Although warnings are expected to be added sequentially, the warnings chain may be cleared + * concurrently at any time via {@link #clearWarnings()}, therefore it is possible that a warning + * added via this method is placed onto the end of the previous warning chain

+ * + * @param warn warning to add + */ + public void addWarning(SQLWarning warn) { + //copy reference to avoid NPE from concurrent modification of this.warnings + final PSQLWarningWrapper warnWrap = this.warnings; + if (warnWrap == null) { + this.warnings = new PSQLWarningWrapper(warn); + } else { + warnWrap.addWarning(warn); + } + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkClosed(); + //copy reference to avoid NPE from concurrent modification of this.warnings + final PSQLWarningWrapper warnWrap = this.warnings; + return warnWrap != null ? warnWrap.getFirstWarning() : null; + } + + @Override + public int getMaxFieldSize() throws SQLException { + return maxFieldSize; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + checkClosed(); + if (max < 0) { + throw new PSQLException( + GT.tr("The maximum field size must be a value greater than or equal to 0."), + PSQLState.INVALID_PARAMETER_VALUE); + } + maxFieldSize = max; + } + + /** + *

Clears the warning chain.

+ *

Note that while it is safe to clear warnings while the query is executing, warnings that are + * added between calls to {@link #getWarnings()} and #clearWarnings() may be missed. + * Therefore you should hold a reference to the tail of the previous warning chain + * and verify if its {@link SQLWarning#getNextWarning()} value is holds any new value.

+ */ + @Override + public void clearWarnings() throws SQLException { + warnings = null; + } + + @Override + public ResultSet getResultSet() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + + if (result == null) { + return null; + } + + return result.getResultSet(); + } + } + + /** + * Note: even though {@code Statement} is automatically closed when it is garbage + * collected, it is better to close it explicitly to lower resource consumption. + * + * {@inheritDoc} + */ + @Override + public final void close() throws SQLException { + // closing an already closed Statement is a no-op. + if (!IS_CLOSED_UPDATER.compareAndSet(this, 0, 1)) { + return; + } + + cancel(); + + closeForNextExecution(); + + closeImpl(); + } + + /** + * This is guaranteed to be called exactly once even in case of concurrent {@link #close()} calls. + * @throws SQLException in case of error + */ + protected void closeImpl() throws SQLException { + } + + /* + * + * The following methods are postgres extensions and are defined in the interface BaseStatement + * + */ + + @Override + public long getLastOID() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + if (result == null) { + return 0; + } + return result.getInsertOID(); + } + } + + @Override + public void setPrepareThreshold(int newThreshold) throws SQLException { + checkClosed(); + + if (newThreshold < 0) { + forceBinaryTransfers = true; + newThreshold = 1; + } + + this.mPrepareThreshold = newThreshold; + } + + @Override + public int getPrepareThreshold() { + return mPrepareThreshold; + } + + @Override + @SuppressWarnings("deprecation") + public void setUseServerPrepare(boolean flag) throws SQLException { + setPrepareThreshold(flag ? 1 : 0); + } + + @Override + public boolean isUseServerPrepare() { + return false; + } + + protected void checkClosed() throws SQLException { + if (isClosed()) { + throw new PSQLException(GT.tr("This statement has been closed."), + PSQLState.OBJECT_NOT_IN_STATE); + } + } + + // ** JDBC 2 Extensions ** + + @Override + public void addBatch(String sql) throws SQLException { + checkClosed(); + + ArrayList batchStatements = this.batchStatements; + if (batchStatements == null) { + this.batchStatements = batchStatements = new ArrayList<>(); + } + ArrayList batchParameters = this.batchParameters; + if (batchParameters == null) { + this.batchParameters = batchParameters = new ArrayList(); + } + + // Simple statements should not replace ?, ? with $1, $2 + boolean shouldUseParameterized = false; + CachedQuery cachedQuery = connection.createQuery(sql, replaceProcessingEnabled, shouldUseParameterized); + batchStatements.add(cachedQuery.query); + batchParameters.add(null); + } + + @Override + public void clearBatch() throws SQLException { + if (batchStatements != null) { + batchStatements.clear(); + } + if (batchParameters != null) { + batchParameters.clear(); + } + } + + protected BatchResultHandler createBatchHandler(Query[] queries, + ParameterList[] parameterLists) { + return new BatchResultHandler(this, queries, parameterLists, + wantsGeneratedKeysAlways); + } + + private BatchResultHandler internalExecuteBatch() throws SQLException { + // Construct query/parameter arrays. + transformQueriesAndParameters(); + ArrayList batchStatements = this.batchStatements; + ArrayList batchParameters = this.batchParameters; + // Empty arrays should be passed to toArray + // see http://shipilev.net/blog/2016/arrays-wisdom-ancients/ + Query[] queries = batchStatements.toArray(new Query[0]); + ParameterList[] parameterLists = batchParameters.toArray(new ParameterList[0]); + batchStatements.clear(); + batchParameters.clear(); + + int flags; + + // Force a Describe before any execution? We need to do this if we're going + // to send anything dependent on the Describe results, e.g. binary parameters. + boolean preDescribe = false; + + if (wantsGeneratedKeysAlways) { + /* + * This batch will return generated keys, tell the executor to expect result rows. We also + * force a Describe later so we know the size of the results to expect. + * + * If the parameter type(s) change between batch entries and the default binary-mode changes + * we might get mixed binary and text in a single result set column, which we cannot handle. + * To prevent this, disable binary transfer mode in batches that return generated keys. See + * GitHub issue #267 + */ + flags = QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS | QueryExecutor.QUERY_NO_BINARY_TRANSFER; + } else { + // If a batch hasn't specified that it wants generated keys, using the appropriate + // Connection.createStatement(...) interfaces, disallow any result set. + flags = QueryExecutor.QUERY_NO_RESULTS; + } + + PreferQueryMode preferQueryMode = connection.getPreferQueryMode(); + if (preferQueryMode == PreferQueryMode.SIMPLE + || (preferQueryMode == PreferQueryMode.EXTENDED_FOR_PREPARED + && parameterLists[0] == null)) { + flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE; + } + + boolean sameQueryAhead = queries.length > 1 && queries[0] == queries[1]; + + if (!sameQueryAhead + // If executing the same query twice in a batch, make sure the statement + // is server-prepared. In other words, "oneshot" only if the query is one in the batch + // or the queries are different + || isOneShotQuery(null)) { + flags |= QueryExecutor.QUERY_ONESHOT; + } else { + // If a batch requests generated keys and isn't already described, + // force a Describe of the query before proceeding. That way we can + // determine the appropriate size of each batch by estimating the + // maximum data returned. Without that, we don't know how many queries + // we'll be able to queue up before we risk a deadlock. + // (see v3.QueryExecutorImpl's MAX_BUFFERED_RECV_BYTES) + + // SameQueryAhead is just a quick way to issue pre-describe for batch execution + // TODO: It should be reworked into "pre-describe if query has unknown parameter + // types and same query is ahead". + preDescribe = (wantsGeneratedKeysAlways || sameQueryAhead) + && !queries[0].isStatementDescribed(); + /* + * It's also necessary to force a Describe on the first execution of the new statement, even + * though we already described it, to work around bug #267. + */ + flags |= QueryExecutor.QUERY_FORCE_DESCRIBE_PORTAL; + } + + if (connection.getAutoCommit()) { + flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN; + } + if (connection.hintReadOnly()) { + flags |= QueryExecutor.QUERY_READ_ONLY_HINT; + } + + BatchResultHandler handler; + handler = createBatchHandler(queries, parameterLists); + + if ((preDescribe || forceBinaryTransfers) + && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) { + // Do a client-server round trip, parsing and describing the query so we + // can determine its result types for use in binary parameters, batch sizing, + // etc. + int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY; + StatementResultHandler handler2 = new StatementResultHandler(); + try { + connection.getQueryExecutor().execute(queries[0], parameterLists[0], handler2, 0, 0, flags2); + } catch (SQLException e) { + // Unable to parse the first statement -> throw BatchUpdateException + handler.handleError(e); + handler.handleCompletion(); + // Will not reach here (see above) + } + ResultWrapper result2 = handler2.getResults(); + if (result2 != null) { + result2.getResultSet().close(); + } + } + + try (ResourceLock ignore = lock.obtain()) { + result = null; + } + + try { + startTimer(); + connection.getQueryExecutor().execute(queries, parameterLists, handler, maxrows, fetchSize, + flags, adaptiveFetch); + } finally { + killTimerTask(); + // There might be some rows generated even in case of failures + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + if (wantsGeneratedKeysAlways) { + generatedKeys = new ResultWrapper(handler.getGeneratedKeys()); + } + } + } + return handler; + } + + @Override + public int[] executeBatch() throws SQLException { + checkClosed(); + closeForNextExecution(); + + if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) { + return new int[0]; + } + + return internalExecuteBatch().getUpdateCount(); + } + + @Override + public void cancel() throws SQLException { + if (statementState == StatementCancelState.IDLE) { + return; + } + if (!STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY, + StatementCancelState.CANCELING)) { + // Not in query, there's nothing to cancel + return; + } + // Use connection lock to avoid spinning in killTimerTask + try (ResourceLock connectionLock = connection.obtainLock()) { + try { + connection.cancelQuery(); + } finally { + STATE_UPDATER.set(this, StatementCancelState.CANCELLED); + connection.lockCondition().signalAll(); // wake-up killTimerTask + } + } + } + + @Override + public Connection getConnection() throws SQLException { + return connection; + } + + @Override + public int getFetchDirection() { + return fetchdirection; + } + + @Override + public int getResultSetConcurrency() { + return concurrency; + } + + @Override + public int getResultSetType() { + return resultsettype; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + switch (direction) { + case ResultSet.FETCH_FORWARD: + case ResultSet.FETCH_REVERSE: + case ResultSet.FETCH_UNKNOWN: + fetchdirection = direction; + break; + default: + throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction), + PSQLState.INVALID_PARAMETER_VALUE); + } + } + + @Override + public void setFetchSize(int rows) throws SQLException { + checkClosed(); + if (rows < 0) { + throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."), + PSQLState.INVALID_PARAMETER_VALUE); + } + fetchSize = rows; + } + + private void startTimer() { + /* + * there shouldn't be any previous timer active, but better safe than sorry. + */ + cleanupTimer(); + + STATE_UPDATER.set(this, StatementCancelState.IN_QUERY); + + if (timeout == 0) { + return; + } + + TimerTask cancelTask = new StatementCancelTimerTask(this); + + CANCEL_TIMER_UPDATER.set(this, cancelTask); + connection.addTimerTask(cancelTask, timeout); + } + + void cancelIfStillNeeded(TimerTask timerTask) { + try { + if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) { + // Nothing to do here, statement has already finished and cleared + // cancelTimerTask reference + return; + } + cancel(); + } catch (SQLException e) { + } + } + + /** + * Clears {@link #cancelTimerTask} if any. Returns true if and only if "cancel" timer task would + * never invoke {@link #cancel()}. + */ + private boolean cleanupTimer() { + TimerTask timerTask = CANCEL_TIMER_UPDATER.get(this); + if (timerTask == null) { + // If timeout is zero, then timer task did not exist, so we safely report "all clear" + return timeout == 0; + } + if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) { + // Failed to update reference -> timer has just fired, so we must wait for the query state to + // become "cancelling". + return false; + } + timerTask.cancel(); + connection.purgeTimerTasks(); + // All clear + return true; + } + + private void killTimerTask() { + boolean timerTaskIsClear = cleanupTimer(); + // The order is important here: in case we need to wait for the cancel task, the state must be + // kept StatementCancelState.IN_QUERY, so cancelTask would be able to cancel the query. + // It is believed that this case is very rare, so "additional cancel and wait below" would not + // harm it. + if (timerTaskIsClear && STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY, StatementCancelState.IDLE)) { + return; + } + + // Being here means someone managed to call .cancel() and our connection did not receive + // "timeout error" + // We wait till state becomes "cancelled" + boolean interrupted = false; + try (ResourceLock connectionLock = connection.obtainLock()) { + // state check is performed with connection lock so it detects "cancelled" state faster + // In other words, it prevents unnecessary ".wait()" call + while (!STATE_UPDATER.compareAndSet(this, StatementCancelState.CANCELLED, StatementCancelState.IDLE)) { + try { + // Note: wait timeout here is irrelevant since connection.obtainLock() would block until + // .cancel finishes + connection.lockCondition().await(10, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { // NOSONAR + // Either re-interrupt this method or rethrow the "InterruptedException" + interrupted = true; + } + } + } + if (interrupted) { + Thread.currentThread().interrupt(); + } + } + + protected boolean getForceBinaryTransfer() { + return forceBinaryTransfers; + } + + @Override + public long getLargeUpdateCount() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + if (result == null || result.getResultSet() != null) { + return -1; + } + + return result.getUpdateCount(); + } + } + + @Override + public void setLargeMaxRows(long max) throws SQLException { + throw Driver.notImplemented(this.getClass(), "setLargeMaxRows"); + } + + @Override + public long getLargeMaxRows() throws SQLException { + throw Driver.notImplemented(this.getClass(), "getLargeMaxRows"); + } + + @Override + public long[] executeLargeBatch() throws SQLException { + checkClosed(); + closeForNextExecution(); + + if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) { + return new long[0]; + } + + return internalExecuteBatch().getLargeUpdateCount(); + } + + @Override + public long executeLargeUpdate(String sql) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS); + checkNoResultUpdate(); + return getLargeUpdateCount(); + } + } + + @Override + public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { + return executeLargeUpdate(sql); + } + + return executeLargeUpdate(sql, (String[]) null); + } + + @Override + public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { + if (columnIndexes == null || columnIndexes.length == 0) { + return executeLargeUpdate(sql); + } + + throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."), + PSQLState.NOT_IMPLEMENTED); + } + + @Override + public long executeLargeUpdate(String sql, String [] columnNames) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (columnNames != null && columnNames.length == 0) { + return executeLargeUpdate(sql); + } + + wantsGeneratedKeysOnce = true; + if (!executeCachedSql(sql, 0, columnNames)) { + // no resultset returned. What's a pity! + } + return getLargeUpdateCount(); + } + } + + @Override + public boolean isClosed() throws SQLException { + return isClosed == 1; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + checkClosed(); + this.poolable = poolable; + } + + @Override + public boolean isPoolable() throws SQLException { + checkClosed(); + return poolable; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (iface.isAssignableFrom(getClass())) { + return iface.cast(this); + } + throw new SQLException("Cannot unwrap to " + iface.getName()); + } + + @Override + public void closeOnCompletion() throws SQLException { + closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return closeOnCompletion; + } + + protected void checkCompletion() throws SQLException { + if (!closeOnCompletion) { + return; + } + + try (ResourceLock ignore = lock.obtain()) { + ResultWrapper result = firstUnclosedResult; + while (result != null) { + ResultSet resultSet = result.getResultSet(); + if (resultSet != null && !resultSet.isClosed()) { + return; + } + result = result.getNext(); + } + } + + // prevent all ResultSet.close arising from Statement.close to loop here + closeOnCompletion = false; + try { + close(); + } finally { + // restore the status if one rely on isCloseOnCompletion + closeOnCompletion = true; + } + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + // CLOSE_CURRENT_RESULT + if (current == Statement.CLOSE_CURRENT_RESULT && result != null + && result.getResultSet() != null) { + result.getResultSet().close(); + } + + // Advance resultset. + if (result != null) { + result = result.getNext(); + } + + // CLOSE_ALL_RESULTS + if (current == Statement.CLOSE_ALL_RESULTS) { + // Close preceding resultsets. + closeUnclosedProcessedResults(); + } + + // Done. + return result != null && result.getResultSet() != null; + } + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + checkClosed(); + if (generatedKeys == null || generatedKeys.getResultSet() == null) { + return createDriverResultSet(new Field[0], new ArrayList<>()); + } + + return generatedKeys.getResultSet(); + } + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { + return executeUpdate(sql); + } + + return executeUpdate(sql, (String[]) null); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + if (columnIndexes == null || columnIndexes.length == 0) { + return executeUpdate(sql); + } + + throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."), + PSQLState.NOT_IMPLEMENTED); + } + + @Override + public int executeUpdate(String sql, String [] columnNames) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (columnNames != null && columnNames.length == 0) { + return executeUpdate(sql); + } + + wantsGeneratedKeysOnce = true; + if (!executeCachedSql(sql, 0, columnNames)) { + // no resultset returned. What's a pity! + } + return getUpdateCount(); + } + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) { + return execute(sql); + } + return execute(sql, (String[]) null); + } + + @Override + public boolean execute(String sql, int [] columnIndexes) throws SQLException { + if (columnIndexes != null && columnIndexes.length == 0) { + return execute(sql); + } + + throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."), + PSQLState.NOT_IMPLEMENTED); + } + + @Override + public boolean execute(String sql, String [] columnNames) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (columnNames != null && columnNames.length == 0) { + return execute(sql); + } + + wantsGeneratedKeysOnce = true; + return executeCachedSql(sql, 0, columnNames); + } + } + + @Override + public int getResultSetHoldability() throws SQLException { + return rsHoldability; + } + + @Override + public ResultSet createDriverResultSet(Field[] fields, List tuples) + throws SQLException { + return createResultSet(null, fields, tuples, null); + } + + protected void transformQueriesAndParameters() throws SQLException { + } + + @Override + public void setAdaptiveFetch(boolean adaptiveFetch) { + this.adaptiveFetch = adaptiveFetch; + } + + @Override + public boolean getAdaptiveFetch() { + return adaptiveFetch; + } + + protected TimestampUtils getTimestampUtils() { + if (timestampUtils == null) { + timestampUtils = new TimestampUtils(!connection.getQueryExecutor().getIntegerDateTimes(), new QueryExecutorTimeZoneProvider(connection.getQueryExecutor())); + } + return timestampUtils; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java new file mode 100644 index 0000000..6526a1d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +/** + *

Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), + * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only.

+ * + *

Note: this is for debugging purposes only.

+ * + * @see org.postgresql.PGProperty#PREFER_QUERY_MODE + */ +public enum PreferQueryMode { + SIMPLE("simple"), + EXTENDED_FOR_PREPARED("extendedForPrepared"), + EXTENDED("extended"), + EXTENDED_CACHE_EVERYTHING("extendedCacheEverything"); + + private final String value; + + PreferQueryMode(String value) { + this.value = value; + } + + public static PreferQueryMode of(String mode) { + for (PreferQueryMode preferQueryMode : values()) { + if (preferQueryMode.value.equals(mode)) { + return preferQueryMode; + } + } + return EXTENDED; + } + + public String value() { + return value; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java b/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java new file mode 100644 index 0000000..b2b4ff2 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Provider; +import org.postgresql.core.QueryExecutor; +import org.postgresql.util.GT; + +import java.util.TimeZone; + +/** + * This class workarounds Exception when + * indexing guava-30.0-jre. + *

It looks like {@code jandex} does not support {@code new Interface<..>} with type annotations. + *

+ */ +class QueryExecutorTimeZoneProvider implements Provider { + private final QueryExecutor queryExecutor; + + QueryExecutorTimeZoneProvider(QueryExecutor queryExecutor) { + this.queryExecutor = queryExecutor; + } + + @Override + public TimeZone get() { + TimeZone timeZone = queryExecutor.getTimeZone(); + if (timeZone == null) { + throw new IllegalStateException( + GT.tr("Backend timezone is not known. Backend should have returned TimeZone when " + + "establishing a connection") + ); + } + return timeZone; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java new file mode 100644 index 0000000..19ec31c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import java.util.concurrent.locks.ReentrantLock; + +/** + * Extends a ReentrantLock for use in try-with-resources block. + * + *

Example use

+ *
{@code
+ *
+ *   try (ResourceLock ignore = lock.obtain()) {
+ *     // do something while holding the resource lock
+ *   }
+ *
+ * }
+ */ +@SuppressWarnings("serial") +public final class ResourceLock extends ReentrantLock implements AutoCloseable { + + public ResourceLock() { + } + + /** + * Obtain a lock and return the ResourceLock for use in try-with-resources block. + */ + public ResourceLock obtain() { + lock(); + return this; + } + + /** + * Unlock on exit of try-with-resources block. + */ + @Override + public void close() { + this.unlock(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java new file mode 100644 index 0000000..df79ae7 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.jdbc; + +import java.sql.ResultSet; + +/** + * Helper class that storing result info. This handles both the ResultSet and no-ResultSet result + * cases with a single interface for inspecting and stepping through them. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public class ResultWrapper { + public ResultWrapper(ResultSet rs) { + this.rs = rs; + this.updateCount = -1; + this.insertOID = -1; + } + + public ResultWrapper(long updateCount, long insertOID) { + this.rs = null; + this.updateCount = updateCount; + this.insertOID = insertOID; + } + + public ResultSet getResultSet() { + return rs; + } + + public long getUpdateCount() { + return updateCount; + } + + public long getInsertOID() { + return insertOID; + } + + public ResultWrapper getNext() { + return next; + } + + public void append(ResultWrapper newResult) { + ResultWrapper tail = this; + while (tail.next != null) { + tail = tail.next; + } + + tail.next = newResult; + } + + private final ResultSet rs; + private final long updateCount; + private final long insertOID; + private ResultWrapper next; +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java new file mode 100644 index 0000000..addea00 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.PGProperty; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.util.Properties; + +public enum SslMode { + /** + * Do not use encrypted connections. + */ + DISABLE("disable"), + /** + * Start with non-encrypted connection, then try encrypted one. + */ + ALLOW("allow"), + /** + * Start with encrypted connection, fallback to non-encrypted (default). + */ + PREFER("prefer"), + /** + * Ensure connection is encrypted. + */ + REQUIRE("require"), + /** + * Ensure connection is encrypted, and client trusts server certificate. + */ + VERIFY_CA("verify-ca"), + /** + * Ensure connection is encrypted, client trusts server certificate, and server hostname matches + * the one listed in the server certificate. + */ + VERIFY_FULL("verify-full"), + ; + + public static final SslMode[] VALUES = values(); + + public final String value; + + SslMode(String value) { + this.value = value; + } + + public boolean requireEncryption() { + return this.compareTo(REQUIRE) >= 0; + } + + public boolean verifyCertificate() { + return this == VERIFY_CA || this == VERIFY_FULL; + } + + public boolean verifyPeerName() { + return this == VERIFY_FULL; + } + + public static SslMode of(Properties info) throws PSQLException { + String sslmode = PGProperty.SSL_MODE.getOrDefault(info); + // If sslmode is not set, fallback to ssl parameter + if (sslmode == null) { + if (PGProperty.SSL.getBoolean(info) || "".equals(PGProperty.SSL.getOrDefault(info))) { + return VERIFY_FULL; + } + return PREFER; + } + + for (SslMode sslMode : VALUES) { + if (sslMode.value.equalsIgnoreCase(sslmode)) { + return sslMode; + } + } + throw new PSQLException(GT.tr("Invalid sslmode value: {0}", sslmode), + PSQLState.CONNECTION_UNABLE_TO_CONNECT); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java new file mode 100644 index 0000000..f149048 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +/** + * Represents {@link PgStatement#cancel()} state. + */ +enum StatementCancelState { + IDLE, + IN_QUERY, + CANCELING, + CANCELLED +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java new file mode 100644 index 0000000..5da4624 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import java.util.TimerTask; + +/** + * Timer task that sends {@code statement.cancel()} signal to support {@link java.sql.Statement#setQueryTimeout(int)}. + * We explicitly nullify the reference to statement to help GC since {@code java.util.TimerThread} + * might keep reference to the latest executed task in its local variable. + */ +class StatementCancelTimerTask extends TimerTask { + private PgStatement statement; + + StatementCancelTimerTask(PgStatement statement) { + this.statement = statement; + } + + @Override + public boolean cancel() { + boolean result = super.cancel(); + // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection + statement = null; + return result; + } + + @Override + public void run() { + PgStatement statement = this.statement; + if (statement != null) { + statement.cancelIfStillNeeded(this); + } + // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection + this.statement = null; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java b/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java new file mode 100644 index 0000000..57b1597 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java @@ -0,0 +1,1716 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + + +import org.postgresql.PGStatement; +import org.postgresql.core.JavaVersion; +import org.postgresql.core.Oid; +import org.postgresql.core.Provider; +import org.postgresql.util.ByteConverter; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.lang.reflect.Field; +import java.sql.Date; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneOffset; +import java.time.chrono.IsoEra; +import java.time.format.DateTimeParseException; +import java.time.temporal.ChronoField; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.HashMap; +import java.util.Objects; +import java.util.SimpleTimeZone; +import java.util.TimeZone; + +/** + * Misc utils for handling time and date values. + */ +@SuppressWarnings("try") +public class TimestampUtils { + /** + * Number of milliseconds in one day. + */ + private static final int ONEDAY = 24 * 3600 * 1000; + private static final char[] ZEROS = {'0', '0', '0', '0', '0', '0', '0', '0', '0'}; + private static final char[][] NUMBERS; + private static final HashMap GMT_ZONES = new HashMap<>(); + private static final int MAX_NANOS_BEFORE_WRAP_ON_ROUND = 999999500; + private static final Duration ONE_MICROSECOND = Duration.ofNanos(1000); + // LocalTime.MAX is 23:59:59.999_999_999, and it wraps to 24:00:00 when nanos exceed 999_999_499 + // since PostgreSQL has microsecond resolution only + private static final LocalTime MAX_TIME = LocalTime.MAX.minus(Duration.ofNanos(500)); + private static final OffsetDateTime MAX_OFFSET_DATETIME = OffsetDateTime.MAX.minus(Duration.ofMillis(500)); + private static final LocalDateTime MAX_LOCAL_DATETIME = LocalDateTime.MAX.minus(Duration.ofMillis(500)); + // low value for dates is 4713 BC + private static final LocalDate MIN_LOCAL_DATE = LocalDate.of(4713, 1, 1).with(ChronoField.ERA, IsoEra.BCE.getValue()); + private static final LocalDateTime MIN_LOCAL_DATETIME = MIN_LOCAL_DATE.atStartOfDay(); + private static final OffsetDateTime MIN_OFFSET_DATETIME = MIN_LOCAL_DATETIME.atOffset(ZoneOffset.UTC); + private static final Duration PG_EPOCH_DIFF = + Duration.between(Instant.EPOCH, LocalDate.of(2000, 1, 1).atStartOfDay().toInstant(ZoneOffset.UTC)); + + private static final Field DEFAULT_TIME_ZONE_FIELD; + + private static final TimeZone UTC_TIMEZONE = TimeZone.getTimeZone(ZoneOffset.UTC); + + private TimeZone prevDefaultZoneFieldValue; + private TimeZone defaultTimeZoneCache; + + static { + // The expected maximum value is 60 (seconds), so 64 is used "just in case" + NUMBERS = new char[64][]; + for (int i = 0; i < NUMBERS.length; i++) { + NUMBERS[i] = ((i < 10 ? "0" : "") + Integer.toString(i)).toCharArray(); + } + + // Backend's gmt-3 means GMT+03 in Java. Here a map is created so gmt-3 can be converted to + // java TimeZone + for (int i = -12; i <= 14; i++) { + TimeZone timeZone; + String pgZoneName; + if (i == 0) { + timeZone = TimeZone.getTimeZone("GMT"); + pgZoneName = "GMT"; + } else { + timeZone = TimeZone.getTimeZone("GMT" + (i <= 0 ? "+" : "-") + Math.abs(i)); + pgZoneName = "GMT" + (i >= 0 ? "+" : "-"); + } + + if (i == 0) { + GMT_ZONES.put(pgZoneName, timeZone); + continue; + } + GMT_ZONES.put(pgZoneName + Math.abs(i), timeZone); + GMT_ZONES.put(pgZoneName + new String(NUMBERS[Math.abs(i)]), timeZone); + } + // Fast path to getting the default timezone. + // Accessing the default timezone over and over creates a clone with regular API. + // Because we don't mutate that object in our use of it, we can access the field directly. + // This saves the creation of a clone everytime, and the memory associated to all these clones. + Field tzField; + try { + tzField = null; + // Avoid reflective access in Java 9+ + if (JavaVersion.getRuntimeVersion().compareTo(JavaVersion.v1_8) <= 0) { + tzField = TimeZone.class.getDeclaredField("defaultTimeZone"); + tzField.setAccessible(true); + TimeZone defaultTz = TimeZone.getDefault(); + Object tzFromField = tzField.get(null); + if (defaultTz == null || !defaultTz.equals(tzFromField)) { + tzField = null; + } + } + } catch (Exception e) { + tzField = null; + } + DEFAULT_TIME_ZONE_FIELD = tzField; + } + + private final StringBuilder sbuf = new StringBuilder(); + + // This calendar is used when user provides calendar in setX(, Calendar) method. + // It ensures calendar is Gregorian. + private final Calendar calendarWithUserTz = new GregorianCalendar(); + + private Calendar calCache; + private ZoneOffset calCacheZone; + + /** + * True if the backend uses doubles for time values. False if long is used. + */ + private final boolean usesDouble; + private final Provider timeZoneProvider; + private final ResourceLock lock = new ResourceLock(); + + public TimestampUtils(boolean usesDouble, Provider timeZoneProvider) { + this.usesDouble = usesDouble; + this.timeZoneProvider = timeZoneProvider; + } + + private Calendar getCalendar(ZoneOffset offset) { + if (calCache != null && Objects.equals(offset, calCacheZone)) { + return calCache; + } + + // normally we would use: + // calCache = new GregorianCalendar(TimeZone.getTimeZone(offset)); + // But this seems to cause issues for some crazy offsets as returned by server for BC dates! + final String tzid = offset.getTotalSeconds() == 0 ? "UTC" : "GMT".concat(offset.getId()); + final TimeZone syntheticTZ = new SimpleTimeZone(offset.getTotalSeconds() * 1000, tzid); + calCache = new GregorianCalendar(syntheticTZ); + calCacheZone = offset; + return calCache; + } + + private static class ParsedTimestamp { + boolean hasDate; + int era = GregorianCalendar.AD; + int year = 1970; + int month = 1; + + boolean hasTime; + int day = 1; + int hour; + int minute; + int second; + int nanos; + + boolean hasOffset; + ZoneOffset offset = ZoneOffset.UTC; + } + + private static class ParsedBinaryTimestamp { + Infinity infinity; + long millis; + int nanos; + } + + enum Infinity { + POSITIVE, + NEGATIVE + } + + /** + * Load date/time information into the provided calendar returning the fractional seconds. + */ + private ParsedTimestamp parseBackendTimestamp(String str) throws SQLException { + char[] s = str.toCharArray(); + int slen = s.length; + + // This is pretty gross.. + ParsedTimestamp result = new ParsedTimestamp(); + + // We try to parse these fields in order; all are optional + // (but some combinations don't make sense, e.g. if you have + // both date and time then they must be whitespace-separated). + // At least one of date and time must be present. + + // leading whitespace + // yyyy-mm-dd + // whitespace + // hh:mm:ss + // whitespace + // timezone in one of the formats: +hh, -hh, +hh:mm, -hh:mm + // whitespace + // if date is present, an era specifier: AD or BC + // trailing whitespace + + try { + int start = skipWhitespace(s, 0); // Skip leading whitespace + int end = firstNonDigit(s, start); + int num; + char sep; + + // Possibly read date. + if (charAt(s, end) == '-') { + // + // Date + // + result.hasDate = true; + + // year + result.year = number(s, start, end); + start = end + 1; // Skip '-' + + // month + end = firstNonDigit(s, start); + result.month = number(s, start, end); + + sep = charAt(s, end); + if (sep != '-') { + throw new NumberFormatException("Expected date to be dash-separated, got '" + sep + "'"); + } + + start = end + 1; // Skip '-' + + // day of month + end = firstNonDigit(s, start); + result.day = number(s, start, end); + + start = skipWhitespace(s, end); // Skip trailing whitespace + } + + // Possibly read time. + if (Character.isDigit(charAt(s, start))) { + // + // Time. + // + + result.hasTime = true; + + // Hours + + end = firstNonDigit(s, start); + result.hour = number(s, start, end); + + sep = charAt(s, end); + if (sep != ':') { + throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'"); + } + + start = end + 1; // Skip ':' + + // minutes + + end = firstNonDigit(s, start); + result.minute = number(s, start, end); + + sep = charAt(s, end); + if (sep != ':') { + throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'"); + } + + start = end + 1; // Skip ':' + + // seconds + + end = firstNonDigit(s, start); + result.second = number(s, start, end); + start = end; + + // Fractional seconds. + if (charAt(s, start) == '.') { + end = firstNonDigit(s, start + 1); // Skip '.' + num = number(s, start + 1, end); + + for (int numlength = end - (start + 1); numlength < 9; numlength++) { + num *= 10; + } + + result.nanos = num; + start = end; + } + + start = skipWhitespace(s, start); // Skip trailing whitespace + } + + // Possibly read timezone. + sep = charAt(s, start); + if (sep == '-' || sep == '+') { + result.hasOffset = true; + + int tzsign = sep == '-' ? -1 : 1; + int tzhr; + int tzmin; + int tzsec; + + end = firstNonDigit(s, start + 1); // Skip +/- + tzhr = number(s, start + 1, end); + start = end; + + sep = charAt(s, start); + if (sep == ':') { + end = firstNonDigit(s, start + 1); // Skip ':' + tzmin = number(s, start + 1, end); + start = end; + } else { + tzmin = 0; + } + + tzsec = 0; + sep = charAt(s, start); + if (sep == ':') { + end = firstNonDigit(s, start + 1); // Skip ':' + tzsec = number(s, start + 1, end); + start = end; + } + + result.offset = ZoneOffset.ofHoursMinutesSeconds(tzsign * tzhr, tzsign * tzmin, tzsign * tzsec); + + start = skipWhitespace(s, start); // Skip trailing whitespace + } + + if (result.hasDate && start < slen) { + String eraString = new String(s, start, slen - start); + if (eraString.startsWith("AD")) { + result.era = GregorianCalendar.AD; + start += 2; + } else if (eraString.startsWith("BC")) { + result.era = GregorianCalendar.BC; + start += 2; + } + } + + if (start < slen) { + throw new NumberFormatException( + "Trailing junk on timestamp: '" + new String(s, start, slen - start) + "'"); + } + + if (!result.hasTime && !result.hasDate) { + throw new NumberFormatException("Timestamp has neither date nor time"); + } + + } catch (NumberFormatException nfe) { + throw new PSQLException( + GT.tr("Bad value for type timestamp/date/time: {0}", str), + PSQLState.BAD_DATETIME_FORMAT, nfe); + } + + return result; + } + + /** + * Parse a string and return a timestamp representing its value. + * + * @param cal calendar to be used to parse the input string + * @param s The ISO formated date string to parse. + * @return null if s is null or a timestamp of the parsed string s. + * @throws SQLException if there is a problem parsing s. + */ + public Timestamp toTimestamp(Calendar cal, + String s) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (s == null) { + return null; + } + + int slen = s.length(); + + // convert postgres's infinity values to internal infinity magic value + if (slen == 8 && "infinity".equals(s)) { + return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY); + } + + if (slen == 9 && "-infinity".equals(s)) { + return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY); + } + + ParsedTimestamp ts = parseBackendTimestamp(s); + Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal); + useCal.set(Calendar.ERA, ts.era); + useCal.set(Calendar.YEAR, ts.year); + useCal.set(Calendar.MONTH, ts.month - 1); + useCal.set(Calendar.DAY_OF_MONTH, ts.day); + useCal.set(Calendar.HOUR_OF_DAY, ts.hour); + useCal.set(Calendar.MINUTE, ts.minute); + useCal.set(Calendar.SECOND, ts.second); + useCal.set(Calendar.MILLISECOND, 0); + + Timestamp result = new Timestamp(useCal.getTimeInMillis()); + result.setNanos(ts.nanos); + return result; + } + } + + /** + * Parse a string and return a LocalTime representing its value. + * + * @param s The ISO formated time string to parse. + * @return null if s is null or a LocalTime of the parsed string s. + * @throws SQLException if there is a problem parsing s. + */ + public LocalTime toLocalTime(String s) throws SQLException { + if (s == null) { + return null; + } + + if ("24:00:00".equals(s)) { + return LocalTime.MAX; + } + + try { + return LocalTime.parse(s); + } catch (DateTimeParseException nfe) { + throw new PSQLException( + GT.tr("Bad value for type timestamp/date/time: {0}", s), + PSQLState.BAD_DATETIME_FORMAT, nfe); + } + + } + + /** + * Returns the offset time object matching the given bytes with Oid#TIMETZ or Oid#TIME. + * + * @param bytes The binary encoded TIMETZ/TIME value. + * @return The parsed offset time object. + * @throws PSQLException If binary format could not be parsed. + */ + public OffsetTime toOffsetTimeBin(byte[] bytes) throws PSQLException { + if (bytes.length != 12) { + throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"), + PSQLState.BAD_DATETIME_FORMAT); + } + + final long micros; + + if (usesDouble) { + double seconds = ByteConverter.float8(bytes, 0); + micros = (long) (seconds * 1_000_000d); + } else { + micros = ByteConverter.int8(bytes, 0); + } + + // postgres offset is negative, so we have to flip sign: + final ZoneOffset timeOffset = ZoneOffset.ofTotalSeconds(-ByteConverter.int4(bytes, 8)); + + return OffsetTime.of(LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L)), timeOffset); + } + + /** + * Parse a string and return a OffsetTime representing its value. + * + * @param s The ISO formated time string to parse. + * @return null if s is null or a OffsetTime of the parsed string s. + * @throws SQLException if there is a problem parsing s. + */ + public OffsetTime toOffsetTime(String s) throws SQLException { + if (s == null) { + return null; + } + + if (s.startsWith("24:00:00")) { + return OffsetTime.MAX; + } + + final ParsedTimestamp ts = parseBackendTimestamp(s); + return OffsetTime.of(ts.hour, ts.minute, ts.second, ts.nanos, ts.offset); + } + + /** + * Parse a string and return a LocalDateTime representing its value. + * + * @param s The ISO formated date string to parse. + * @return null if s is null or a LocalDateTime of the parsed string s. + * @throws SQLException if there is a problem parsing s. + */ + public LocalDateTime toLocalDateTime(String s) throws SQLException { + if (s == null) { + return null; + } + + int slen = s.length(); + + // convert postgres's infinity values to internal infinity magic value + if (slen == 8 && "infinity".equals(s)) { + return LocalDateTime.MAX; + } + + if (slen == 9 && "-infinity".equals(s)) { + return LocalDateTime.MIN; + } + + ParsedTimestamp ts = parseBackendTimestamp(s); + + // intentionally ignore time zone + // 2004-10-19 10:23:54+03:00 is 2004-10-19 10:23:54 locally + LocalDateTime result = LocalDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos); + if (ts.era == GregorianCalendar.BC) { + return result.with(ChronoField.ERA, IsoEra.BCE.getValue()); + } else { + return result; + } + } + + /** + * Returns the offset date time object matching the given bytes with Oid#TIMETZ. + * Not used internally anymore, function is here to retain compatibility with previous versions + * + * @param t the time value + * @return the matching offset date time + * @deprecated was used internally, and not used anymore + */ + @Deprecated + public OffsetDateTime toOffsetDateTime(Time t) { + // hardcode utc because the backend does not provide us the timezone + // hardcode UNIX epoch, JDBC requires OffsetDateTime but doesn't describe what date should be used + return t.toLocalTime().atDate(LocalDate.of(1970, 1, 1)).atOffset(ZoneOffset.UTC); + } + + /** + * Parse a string and return a OffsetDateTime representing its value. + * + * @param s The ISO formatted date string to parse. + * @return null if s is null or a OffsetDateTime of the parsed string s. + * @throws SQLException if there is a problem parsing s. + */ + public OffsetDateTime toOffsetDateTime( + String s) throws SQLException { + if (s == null) { + return null; + } + + int slen = s.length(); + + // convert postgres's infinity values to internal infinity magic value + if (slen == 8 && "infinity".equals(s)) { + return OffsetDateTime.MAX; + } + + if (slen == 9 && "-infinity".equals(s)) { + return OffsetDateTime.MIN; + } + + final ParsedTimestamp ts = parseBackendTimestamp(s); + OffsetDateTime result = + OffsetDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos, ts.offset); + if (ts.era == GregorianCalendar.BC) { + return result.with(ChronoField.ERA, IsoEra.BCE.getValue()); + } else { + return result; + } + } + + /** + * Returns the offset date time object matching the given bytes with Oid#TIMESTAMPTZ. + * + * @param bytes The binary encoded local date time value. + * @return The parsed local date time object. + * @throws PSQLException If binary format could not be parsed. + */ + public OffsetDateTime toOffsetDateTimeBin(byte[] bytes) throws PSQLException { + ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes); + if (parsedTimestamp.infinity == Infinity.POSITIVE) { + return OffsetDateTime.MAX; + } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) { + return OffsetDateTime.MIN; + } + + // hardcode utc because the backend does not provide us the timezone + // Postgres is always UTC + Instant instant = Instant.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos); + return OffsetDateTime.ofInstant(instant, ZoneOffset.UTC); + } + + public Time toTime( + Calendar cal, String s) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + // 1) Parse backend string + if (s == null) { + return null; + } + ParsedTimestamp ts = parseBackendTimestamp(s); + Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal); + if (!ts.hasOffset) { + // When no time zone provided (e.g. time or timestamp) + // We get the year-month-day from the string, then truncate the day to 1970-01-01 + // This is used for timestamp -> time conversion + // Note: this cannot be merged with "else" branch since + // timestamps at which the time flips to/from DST depend on the date + // For instance, 2000-03-26 02:00:00 is invalid timestamp in Europe/Moscow time zone + // and the valid one is 2000-03-26 03:00:00. That is why we parse full timestamp + // then set year to 1970 later + useCal.set(Calendar.ERA, ts.era); + useCal.set(Calendar.YEAR, ts.year); + useCal.set(Calendar.MONTH, ts.month - 1); + useCal.set(Calendar.DAY_OF_MONTH, ts.day); + } else { + // When time zone is given, we just pick the time part and assume date to be 1970-01-01 + // this is used for time, timez, and timestamptz parsing + useCal.set(Calendar.ERA, GregorianCalendar.AD); + useCal.set(Calendar.YEAR, 1970); + useCal.set(Calendar.MONTH, Calendar.JANUARY); + useCal.set(Calendar.DAY_OF_MONTH, 1); + } + useCal.set(Calendar.HOUR_OF_DAY, ts.hour); + useCal.set(Calendar.MINUTE, ts.minute); + useCal.set(Calendar.SECOND, ts.second); + useCal.set(Calendar.MILLISECOND, 0); + + long timeMillis = useCal.getTimeInMillis() + ts.nanos / 1000000; + if (ts.hasOffset || (ts.year == 1970 && ts.era == GregorianCalendar.AD)) { + // time with time zone has proper time zone, so the value can be returned as is + return new Time(timeMillis); + } + + // 2) Truncate date part so in given time zone the date would be formatted as 01/01/1970 + return convertToTime(timeMillis, useCal.getTimeZone()); + } + } + + public Date toDate(Calendar cal, + String s) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + // 1) Parse backend string + Timestamp timestamp = toTimestamp(cal, s); + + if (timestamp == null) { + return null; + } + + // Note: infinite dates are handled in convertToDate + // 2) Truncate date part so in given time zone the date would be formatted as 00:00 + return convertToDate(timestamp.getTime(), cal == null ? null : cal.getTimeZone()); + } + } + + private Calendar setupCalendar(Calendar cal) { + TimeZone timeZone = cal == null ? null : cal.getTimeZone(); + return getSharedCalendar(timeZone); + } + + /** + * Get a shared calendar, applying the supplied time zone or the default time zone if null. + * + * @param timeZone time zone to be set for the calendar + * @return The shared calendar. + */ + public Calendar getSharedCalendar(TimeZone timeZone) { + if (timeZone == null) { + timeZone = getDefaultTz(); + } + Calendar tmp = calendarWithUserTz; + tmp.setTimeZone(timeZone); + return tmp; + } + + /** + * Returns true when microsecond part of the time should be increased + * when rounding to microseconds + * @param nanos nanosecond part of the time + * @return true when microsecond part of the time should be increased when rounding to microseconds + */ + private static boolean nanosExceed499(int nanos) { + return nanos % 1000 > 499; + } + + public String toString(Calendar cal, Timestamp x) { + return toString(cal, x, true); + } + + public String toString(Calendar cal, Timestamp x, + boolean withTimeZone) { + try (ResourceLock ignore = lock.obtain()) { + if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) { + return "infinity"; + } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) { + return "-infinity"; + } + + cal = setupCalendar(cal); + long timeMillis = x.getTime(); + + // Round to microseconds + int nanos = x.getNanos(); + if (nanos >= MAX_NANOS_BEFORE_WRAP_ON_ROUND) { + nanos = 0; + timeMillis++; + } else if (nanosExceed499(nanos)) { + // PostgreSQL does not support nanosecond resolution yet, and appendTime will just ignore + // 0..999 part of the nanoseconds, however we subtract nanos % 1000 to make the value + // a little bit saner for debugging reasons + nanos += 1000 - nanos % 1000; + } + cal.setTimeInMillis(timeMillis); + + sbuf.setLength(0); + + appendDate(sbuf, cal); + sbuf.append(' '); + appendTime(sbuf, cal, nanos); + if (withTimeZone) { + appendTimeZone(sbuf, cal); + } + appendEra(sbuf, cal); + + return sbuf.toString(); + } + } + + public String toString(Calendar cal, Date x) { + return toString(cal, x, true); + } + + public String toString(Calendar cal, Date x, + boolean withTimeZone) { + try (ResourceLock ignore = lock.obtain()) { + if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) { + return "infinity"; + } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) { + return "-infinity"; + } + + cal = setupCalendar(cal); + cal.setTime(x); + + sbuf.setLength(0); + + appendDate(sbuf, cal); + appendEra(sbuf, cal); + if (withTimeZone) { + sbuf.append(' '); + appendTimeZone(sbuf, cal); + } + + return sbuf.toString(); + } + } + + public String toString(Calendar cal, Time x) { + return toString(cal, x, true); + } + + public String toString(Calendar cal, Time x, + boolean withTimeZone) { + try (ResourceLock ignore = lock.obtain()) { + cal = setupCalendar(cal); + cal.setTime(x); + + sbuf.setLength(0); + + appendTime(sbuf, cal, cal.get(Calendar.MILLISECOND) * 1000000); + + // The 'time' parser for <= 7.3 doesn't like timezones. + if (withTimeZone) { + appendTimeZone(sbuf, cal); + } + + return sbuf.toString(); + } + } + + private static void appendDate(StringBuilder sb, Calendar cal) { + int year = cal.get(Calendar.YEAR); + int month = cal.get(Calendar.MONTH) + 1; + int day = cal.get(Calendar.DAY_OF_MONTH); + appendDate(sb, year, month, day); + } + + private static void appendDate(StringBuilder sb, int year, int month, int day) { + // always use at least four digits for the year so very + // early years, like 2, don't get misinterpreted + // + int prevLength = sb.length(); + sb.append(year); + int leadingZerosForYear = 4 - (sb.length() - prevLength); + if (leadingZerosForYear > 0) { + sb.insert(prevLength, ZEROS, 0, leadingZerosForYear); + } + + sb.append('-'); + sb.append(NUMBERS[month]); + sb.append('-'); + sb.append(NUMBERS[day]); + } + + private static void appendTime(StringBuilder sb, Calendar cal, int nanos) { + int hours = cal.get(Calendar.HOUR_OF_DAY); + int minutes = cal.get(Calendar.MINUTE); + int seconds = cal.get(Calendar.SECOND); + appendTime(sb, hours, minutes, seconds, nanos); + } + + /** + * Appends time part to the {@code StringBuilder} in PostgreSQL-compatible format. + * The function truncates {@param nanos} to microseconds. The value is expected to be rounded + * beforehand. + * @param sb destination + * @param hours hours + * @param minutes minutes + * @param seconds seconds + * @param nanos nanoseconds + */ + private static void appendTime(StringBuilder sb, int hours, int minutes, int seconds, int nanos) { + sb.append(NUMBERS[hours]); + + sb.append(':'); + sb.append(NUMBERS[minutes]); + + sb.append(':'); + sb.append(NUMBERS[seconds]); + + // Add nanoseconds. + // This won't work for server versions < 7.2 which only want + // a two digit fractional second, but we don't need to support 7.1 + // anymore and getting the version number here is difficult. + // + if (nanos < 1000) { + return; + } + sb.append('.'); + int len = sb.length(); + sb.append(nanos / 1000); // append microseconds + int needZeros = 6 - (sb.length() - len); + if (needZeros > 0) { + sb.insert(len, ZEROS, 0, needZeros); + } + + int end = sb.length() - 1; + while (sb.charAt(end) == '0') { + sb.deleteCharAt(end); + end--; + } + } + + private void appendTimeZone(StringBuilder sb, Calendar cal) { + int offset = (cal.get(Calendar.ZONE_OFFSET) + cal.get(Calendar.DST_OFFSET)) / 1000; + + appendTimeZone(sb, offset); + } + + private void appendTimeZone(StringBuilder sb, int offset) { + int absoff = Math.abs(offset); + int hours = absoff / 60 / 60; + int mins = (absoff - hours * 60 * 60) / 60; + int secs = absoff - hours * 60 * 60 - mins * 60; + + sb.append(offset >= 0 ? "+" : "-"); + + sb.append(NUMBERS[hours]); + + if (mins == 0 && secs == 0) { + return; + } + sb.append(':'); + + sb.append(NUMBERS[mins]); + + if (secs != 0) { + sb.append(':'); + sb.append(NUMBERS[secs]); + } + } + + private static void appendEra(StringBuilder sb, Calendar cal) { + if (cal.get(Calendar.ERA) == GregorianCalendar.BC) { + sb.append(" BC"); + } + } + + public String toString(LocalDate localDate) { + try (ResourceLock ignore = lock.obtain()) { + if (LocalDate.MAX.equals(localDate)) { + return "infinity"; + } else if (localDate.isBefore(MIN_LOCAL_DATE)) { + return "-infinity"; + } + + sbuf.setLength(0); + + appendDate(sbuf, localDate); + appendEra(sbuf, localDate); + + return sbuf.toString(); + } + } + + public String toString(LocalTime localTime) { + try (ResourceLock ignore = lock.obtain()) { + sbuf.setLength(0); + + if (localTime.isAfter(MAX_TIME)) { + return "24:00:00"; + } + + int nano = localTime.getNano(); + if (nanosExceed499(nano)) { + // Technically speaking this is not a proper rounding, however + // it relies on the fact that appendTime just truncates 000..999 nanosecond part + localTime = localTime.plus(ONE_MICROSECOND); + } + appendTime(sbuf, localTime); + + return sbuf.toString(); + } + } + + public String toString(OffsetTime offsetTime) { + try (ResourceLock ignore = lock.obtain()) { + sbuf.setLength(0); + + final LocalTime localTime = offsetTime.toLocalTime(); + if (localTime.isAfter(MAX_TIME)) { + sbuf.append("24:00:00"); + appendTimeZone(sbuf, offsetTime.getOffset()); + return sbuf.toString(); + } + + int nano = offsetTime.getNano(); + if (nanosExceed499(nano)) { + // Technically speaking this is not a proper rounding, however + // it relies on the fact that appendTime just truncates 000..999 nanosecond part + offsetTime = offsetTime.plus(ONE_MICROSECOND); + } + appendTime(sbuf, localTime); + appendTimeZone(sbuf, offsetTime.getOffset()); + + return sbuf.toString(); + } + } + + /** + * Converts {@code timetz} to string taking client time zone ({@link #timeZoneProvider}) + * into account. + * @param value binary representation of {@code timetz} + * @return string representation of {@code timetz} + */ + public String toStringOffsetTimeBin(byte[] value) throws PSQLException { + OffsetTime offsetTimeBin = toOffsetTimeBin(value); + return toString(withClientOffsetSameInstant(offsetTimeBin)); + } + + /** + * PostgreSQL does not store the time zone in the binary representation of timetz. + * However, we want to preserve the output of {@code getString()} in both binary and text formats + * So we try a client time zone when serializing {@link OffsetTime} to string. + * @param input input offset time + * @return adjusted offset time (it represents the same instant as the input one) + */ + public OffsetTime withClientOffsetSameInstant(OffsetTime input) { + if (input == OffsetTime.MAX || input == OffsetTime.MIN) { + return input; + } + TimeZone timeZone = timeZoneProvider.get(); + int offsetMillis = timeZone.getRawOffset(); + return input.withOffsetSameInstant( + offsetMillis == 0 + ? ZoneOffset.UTC + : ZoneOffset.ofTotalSeconds(offsetMillis / 1000)); + } + + public String toString(OffsetDateTime offsetDateTime) { + try (ResourceLock ignore = lock.obtain()) { + if (offsetDateTime.isAfter(MAX_OFFSET_DATETIME)) { + return "infinity"; + } else if (offsetDateTime.isBefore(MIN_OFFSET_DATETIME)) { + return "-infinity"; + } + + sbuf.setLength(0); + + int nano = offsetDateTime.getNano(); + if (nanosExceed499(nano)) { + // Technically speaking this is not a proper rounding, however + // it relies on the fact that appendTime just truncates 000..999 nanosecond part + offsetDateTime = offsetDateTime.plus(ONE_MICROSECOND); + } + LocalDateTime localDateTime = offsetDateTime.toLocalDateTime(); + LocalDate localDate = localDateTime.toLocalDate(); + appendDate(sbuf, localDate); + sbuf.append(' '); + appendTime(sbuf, localDateTime.toLocalTime()); + appendTimeZone(sbuf, offsetDateTime.getOffset()); + appendEra(sbuf, localDate); + + return sbuf.toString(); + } + } + + /** + * Converts {@code timestamptz} to string taking client time zone ({@link #timeZoneProvider}) + * into account. + * @param value binary representation of {@code timestamptz} + * @return string representation of {@code timestamptz} + */ + public String toStringOffsetDateTime(byte[] value) throws PSQLException { + OffsetDateTime offsetDateTime = toOffsetDateTimeBin(value); + return toString(withClientOffsetSameInstant(offsetDateTime)); + } + + /** + * PostgreSQL does not store the time zone in the binary representation of timestamptz. + * However, we want to preserve the output of {@code getString()} in both binary and text formats + * So we try a client time zone when serializing {@link OffsetDateTime} to string. + * @param input input offset date time + * @return adjusted offset date time (it represents the same instant as the input one) + */ + public OffsetDateTime withClientOffsetSameInstant(OffsetDateTime input) { + if (input == OffsetDateTime.MAX || input == OffsetDateTime.MIN) { + return input; + } + int offsetMillis; + TimeZone timeZone = timeZoneProvider.get(); + if (isSimpleTimeZone(timeZone.getID())) { + offsetMillis = timeZone.getRawOffset(); + } else { + offsetMillis = timeZone.getOffset(input.toEpochSecond() * 1000L); + } + return input.withOffsetSameInstant( + offsetMillis == 0 + ? ZoneOffset.UTC + : ZoneOffset.ofTotalSeconds(offsetMillis / 1000)); + } + + /** + * Formats {@link LocalDateTime} to be sent to the backend, thus it adds time zone. + * Do not use this method in {@link java.sql.ResultSet#getString(int)} + * @param localDateTime The local date to format as a String + * @return The formatted local date + */ + public String toString(LocalDateTime localDateTime) { + try (ResourceLock ignore = lock.obtain()) { + if (localDateTime.isAfter(MAX_LOCAL_DATETIME)) { + return "infinity"; + } else if (localDateTime.isBefore(MIN_LOCAL_DATETIME)) { + return "-infinity"; + } + + sbuf.setLength(0); + + if (nanosExceed499(localDateTime.getNano())) { + localDateTime = localDateTime.plus(ONE_MICROSECOND); + } + + LocalDate localDate = localDateTime.toLocalDate(); + appendDate(sbuf, localDate); + sbuf.append(' '); + appendTime(sbuf, localDateTime.toLocalTime()); + appendEra(sbuf, localDate); + + return sbuf.toString(); + } + } + + private static void appendDate(StringBuilder sb, LocalDate localDate) { + int year = localDate.get(ChronoField.YEAR_OF_ERA); + int month = localDate.getMonthValue(); + int day = localDate.getDayOfMonth(); + appendDate(sb, year, month, day); + } + + private static void appendTime(StringBuilder sb, LocalTime localTime) { + int hours = localTime.getHour(); + int minutes = localTime.getMinute(); + int seconds = localTime.getSecond(); + int nanos = localTime.getNano(); + appendTime(sb, hours, minutes, seconds, nanos); + } + + private void appendTimeZone(StringBuilder sb, ZoneOffset offset) { + int offsetSeconds = offset.getTotalSeconds(); + + appendTimeZone(sb, offsetSeconds); + } + + private static void appendEra(StringBuilder sb, LocalDate localDate) { + if (localDate.get(ChronoField.ERA) == IsoEra.BCE.getValue()) { + sb.append(" BC"); + } + } + + @SuppressWarnings("deprecation") + private static int skipWhitespace(char[] s, int start) { + int slen = s.length; + for (int i = start; i < slen; i++) { + if (!Character.isSpace(s[i])) { + return i; + } + } + return slen; + } + + private static int firstNonDigit(char[] s, int start) { + int slen = s.length; + for (int i = start; i < slen; i++) { + if (!Character.isDigit(s[i])) { + return i; + } + } + return slen; + } + + private static int number(char[] s, int start, int end) { + if (start >= end) { + throw new NumberFormatException(); + } + int n = 0; + for (int i = start; i < end; i++) { + n = 10 * n + (s[i] - '0'); + } + return n; + } + + private static char charAt(char[] s, int pos) { + if (pos >= 0 && pos < s.length) { + return s[pos]; + } + return '\0'; + } + + /** + * Returns the SQL Date object matching the given bytes with {@link Oid#DATE}. + * + * @param tz The timezone used. + * @param bytes The binary encoded date value. + * @return The parsed date object. + * @throws PSQLException If binary format could not be parsed. + */ + public Date toDateBin(TimeZone tz, byte[] bytes) throws PSQLException { + if (bytes.length != 4) { + throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"), + PSQLState.BAD_DATETIME_FORMAT); + } + int days = ByteConverter.int4(bytes, 0); + if (tz == null) { + tz = getDefaultTz(); + } + long secs = toJavaSecs(days * 86400L); + long millis = secs * 1000L; + + if (millis <= PGStatement.DATE_NEGATIVE_SMALLER_INFINITY) { + millis = PGStatement.DATE_NEGATIVE_INFINITY; + } else if (millis >= PGStatement.DATE_POSITIVE_SMALLER_INFINITY) { + millis = PGStatement.DATE_POSITIVE_INFINITY; + } else { + // Here be dragons: backend did not provide us the timezone, so we guess the actual point in + // time + + millis = guessTimestamp(millis, tz); + } + return new Date(millis); + } + + private TimeZone getDefaultTz() { + // Fast path to getting the default timezone. + if (DEFAULT_TIME_ZONE_FIELD != null) { + try { + TimeZone defaultTimeZone = (TimeZone) DEFAULT_TIME_ZONE_FIELD.get(null); + if (defaultTimeZone == prevDefaultZoneFieldValue) { + return defaultTimeZoneCache; + } + prevDefaultZoneFieldValue = defaultTimeZone; + } catch (Exception e) { + // If this were to fail, fallback on slow method. + } + } + TimeZone tz = TimeZone.getDefault(); + defaultTimeZoneCache = tz; + return tz; + } + + public boolean hasFastDefaultTimeZone() { + return DEFAULT_TIME_ZONE_FIELD != null; + } + + /** + * Returns the SQL Time object matching the given bytes with {@link Oid#TIME} or + * {@link Oid#TIMETZ}. + * + * @param tz The timezone used when received data is {@link Oid#TIME}, ignored if data already + * contains {@link Oid#TIMETZ}. + * @param bytes The binary encoded time value. + * @return The parsed time object. + * @throws PSQLException If binary format could not be parsed. + */ + public Time toTimeBin(TimeZone tz, byte[] bytes) throws PSQLException { + if (bytes.length != 8 && bytes.length != 12) { + throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"), + PSQLState.BAD_DATETIME_FORMAT); + } + + long millis; + int timeOffset; + + if (usesDouble) { + double time = ByteConverter.float8(bytes, 0); + + millis = (long) (time * 1000); + } else { + long time = ByteConverter.int8(bytes, 0); + + millis = time / 1000; + } + + if (bytes.length == 12) { + timeOffset = ByteConverter.int4(bytes, 8); + timeOffset *= -1000; + millis -= timeOffset; + return new Time(millis); + } + + if (tz == null) { + tz = getDefaultTz(); + } + + // Here be dragons: backend did not provide us the timezone, so we guess the actual point in + // time + millis = guessTimestamp(millis, tz); + + return convertToTime(millis, tz); // Ensure date part is 1970-01-01 + } + + /** + * Returns the SQL Time object matching the given bytes with {@link Oid#TIME}. + * + * @param bytes The binary encoded time value. + * @return The parsed time object. + * @throws PSQLException If binary format could not be parsed. + */ + public LocalTime toLocalTimeBin(byte[] bytes) throws PSQLException { + if (bytes.length != 8) { + throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"), + PSQLState.BAD_DATETIME_FORMAT); + } + + long micros; + + if (usesDouble) { + double seconds = ByteConverter.float8(bytes, 0); + + micros = (long) (seconds * 1000000d); + } else { + micros = ByteConverter.int8(bytes, 0); + } + + return LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L)); + } + + /** + * Returns the SQL Timestamp object matching the given bytes with {@link Oid#TIMESTAMP} or + * {@link Oid#TIMESTAMPTZ}. + * + * @param tz The timezone used when received data is {@link Oid#TIMESTAMP}, ignored if data + * already contains {@link Oid#TIMESTAMPTZ}. + * @param bytes The binary encoded timestamp value. + * @param timestamptz True if the binary is in GMT. + * @return The parsed timestamp object. + * @throws PSQLException If binary format could not be parsed. + */ + public Timestamp toTimestampBin(TimeZone tz, byte[] bytes, boolean timestamptz) + throws PSQLException { + + ParsedBinaryTimestamp parsedTimestamp = this.toParsedTimestampBin(tz, bytes, timestamptz); + if (parsedTimestamp.infinity == Infinity.POSITIVE) { + return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY); + } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) { + return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY); + } + + Timestamp ts = new Timestamp(parsedTimestamp.millis); + ts.setNanos(parsedTimestamp.nanos); + return ts; + } + + private ParsedBinaryTimestamp toParsedTimestampBinPlain(byte[] bytes) + throws PSQLException { + + if (bytes.length != 8) { + throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "timestamp"), + PSQLState.BAD_DATETIME_FORMAT); + } + + long secs; + int nanos; + + if (usesDouble) { + double time = ByteConverter.float8(bytes, 0); + if (time == Double.POSITIVE_INFINITY) { + ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); + ts.infinity = Infinity.POSITIVE; + return ts; + } else if (time == Double.NEGATIVE_INFINITY) { + ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); + ts.infinity = Infinity.NEGATIVE; + return ts; + } + + secs = (long) time; + nanos = (int) ((time - secs) * 1000000); + } else { + long time = ByteConverter.int8(bytes, 0); + + // compatibility with text based receiving, not strictly necessary + // and can actually be confusing because there are timestamps + // that are larger than infinite + if (time == Long.MAX_VALUE) { + ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); + ts.infinity = Infinity.POSITIVE; + return ts; + } else if (time == Long.MIN_VALUE) { + ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); + ts.infinity = Infinity.NEGATIVE; + return ts; + } + + secs = time / 1000000; + nanos = (int) (time - secs * 1000000); + } + if (nanos < 0) { + secs--; + nanos += 1000000; + } + nanos *= 1000; + + long millis = secs * 1000L; + + ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp(); + ts.millis = millis; + ts.nanos = nanos; + return ts; + } + + private ParsedBinaryTimestamp toParsedTimestampBin(TimeZone tz, byte[] bytes, + boolean timestamptz) + throws PSQLException { + + ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes); + if (ts.infinity != null) { + return ts; + } + + long secs = ts.millis / 1000L; + + secs = toJavaSecs(secs); + long millis = secs * 1000L; + if (!timestamptz) { + // Here be dragons: backend did not provide us the timezone, so we guess the actual point in + // time + millis = guessTimestamp(millis, tz); + } + + ts.millis = millis; + return ts; + } + + private ParsedBinaryTimestamp toProlepticParsedTimestampBin(byte[] bytes) + throws PSQLException { + + ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes); + if (ts.infinity != null) { + return ts; + } + + long secs = ts.millis / 1000L; + + // postgres epoc to java epoc + secs += PG_EPOCH_DIFF.getSeconds(); + long millis = secs * 1000L; + + ts.millis = millis; + return ts; + } + + /** + * Returns the local date time object matching the given bytes with {@link Oid#TIMESTAMP} or + * {@link Oid#TIMESTAMPTZ}. + * @param bytes The binary encoded local date time value. + * + * @return The parsed local date time object. + * @throws PSQLException If binary format could not be parsed. + */ + public LocalDateTime toLocalDateTimeBin(byte[] bytes) throws PSQLException { + + ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes); + if (parsedTimestamp.infinity == Infinity.POSITIVE) { + return LocalDateTime.MAX; + } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) { + return LocalDateTime.MIN; + } + + // hardcode utc because the backend does not provide us the timezone + // Postgres is always UTC + return LocalDateTime.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos, ZoneOffset.UTC); + } + + /** + * Returns the local date time object matching the given bytes with {@link Oid#DATE} or + * {@link Oid#TIMESTAMP}. + * @param bytes The binary encoded local date value. + * + * @return The parsed local date object. + * @throws PSQLException If binary format could not be parsed. + */ + public LocalDate toLocalDateBin(byte[] bytes) throws PSQLException { + if (bytes.length != 4) { + throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"), + PSQLState.BAD_DATETIME_FORMAT); + } + int days = ByteConverter.int4(bytes, 0); + if (days == Integer.MAX_VALUE) { + return LocalDate.MAX; + } else if (days == Integer.MIN_VALUE) { + return LocalDate.MIN; + } + // adapt from different Postgres Epoch and convert to LocalDate: + return LocalDate.ofEpochDay(PG_EPOCH_DIFF.toDays() + days); + } + + /** + *

Given a UTC timestamp {@code millis} finds another point in time that is rendered in given time + * zone {@code tz} exactly as "millis in UTC".

+ * + *

For instance, given 7 Jan 16:00 UTC and tz=GMT+02:00 it returns 7 Jan 14:00 UTC == 7 Jan 16:00 + * GMT+02:00 Note that is not trivial for timestamps near DST change. For such cases, we rely on + * {@link Calendar} to figure out the proper timestamp.

+ * + * @param millis source timestamp + * @param tz desired time zone + * @return timestamp that would be rendered in {@code tz} like {@code millis} in UTC + */ + private long guessTimestamp(long millis, TimeZone tz) { + if (tz == null) { + // If client did not provide us with time zone, we use system default time zone + tz = getDefaultTz(); + } + // The story here: + // Backend provided us with something like '2015-10-04 13:40' and it did NOT provide us with a + // time zone. + // On top of that, user asked us to treat the timestamp as if it were in GMT+02:00. + // + // The code below creates such a timestamp that is rendered as '2015-10-04 13:40 GMT+02:00' + // In other words, its UTC value should be 11:40 UTC == 13:40 GMT+02:00. + // It is not sufficient to just subtract offset as you might cross DST change as you subtract. + // + // For instance, on 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00 + // Suppose we deal with 2000-03-26 02:00:01 + // If you subtract offset from the timestamp, the time will be "a hour behind" since + // "just a couple of hours ago the OFFSET was different" + // + // To make a long story short: we have UTC timestamp that looks like "2000-03-26 02:00:01" when + // rendered in UTC tz. + // We want to know another timestamp that will look like "2000-03-26 02:00:01" in Europe/Moscow + // time zone. + + if (isSimpleTimeZone(tz.getID())) { + // For well-known non-DST time zones, just subtract offset + return millis - tz.getRawOffset(); + } + // For all the other time zones, enjoy debugging Calendar API + // Here we do a straight-forward implementation that splits original timestamp into pieces and + // composes it back. + // Note: cal.setTimeZone alone is not sufficient as it would alter hour (it will try to keep the + // same time instant value) + Calendar cal = calendarWithUserTz; + cal.setTimeZone(UTC_TIMEZONE); + cal.setTimeInMillis(millis); + int era = cal.get(Calendar.ERA); + int year = cal.get(Calendar.YEAR); + int month = cal.get(Calendar.MONTH); + int day = cal.get(Calendar.DAY_OF_MONTH); + int hour = cal.get(Calendar.HOUR_OF_DAY); + int min = cal.get(Calendar.MINUTE); + int sec = cal.get(Calendar.SECOND); + int ms = cal.get(Calendar.MILLISECOND); + cal.setTimeZone(tz); + cal.set(Calendar.ERA, era); + cal.set(Calendar.YEAR, year); + cal.set(Calendar.MONTH, month); + cal.set(Calendar.DAY_OF_MONTH, day); + cal.set(Calendar.HOUR_OF_DAY, hour); + cal.set(Calendar.MINUTE, min); + cal.set(Calendar.SECOND, sec); + cal.set(Calendar.MILLISECOND, ms); + return cal.getTimeInMillis(); + } + + private static boolean isSimpleTimeZone(String id) { + return id.startsWith("GMT") || id.startsWith("UTC"); + } + + /** + * Extracts the date part from a timestamp. + * + * @param millis The timestamp from which to extract the date. + * @param tz The time zone of the date. + * @return The extracted date. + */ + public Date convertToDate(long millis, TimeZone tz) { + + // no adjustments for the infinity hack values + if (millis <= PGStatement.DATE_NEGATIVE_INFINITY + || millis >= PGStatement.DATE_POSITIVE_INFINITY) { + return new Date(millis); + } + if (tz == null) { + tz = getDefaultTz(); + } + if (isSimpleTimeZone(tz.getID())) { + // Truncate to 00:00 of the day. + // Suppose the input date is 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC) + // We want it to become 7 Jan 00:00 GMT+02:00 + // 1) Make sure millis becomes 15:40 in UTC, so add offset + int offset = tz.getRawOffset(); + millis += offset; + // 2) Truncate hours, minutes, etc. Day is always 86400 seconds, no matter what leap seconds + // are + millis = floorDiv(millis, ONEDAY) * ONEDAY; + // 2) Now millis is 7 Jan 00:00 UTC, however we need that in GMT+02:00, so subtract some + // offset + millis -= offset; + // Now we have brand-new 7 Jan 00:00 GMT+02:00 + return new Date(millis); + } + + Calendar cal = calendarWithUserTz; + cal.setTimeZone(tz); + cal.setTimeInMillis(millis); + cal.set(Calendar.HOUR_OF_DAY, 0); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + + return new Date(cal.getTimeInMillis()); + } + + /** + * Extracts the time part from a timestamp. This method ensures the date part of output timestamp + * looks like 1970-01-01 in given timezone. + * + * @param millis The timestamp from which to extract the time. + * @param tz timezone to use. + * @return The extracted time. + */ + public Time convertToTime(long millis, TimeZone tz) { + if (tz == null) { + tz = getDefaultTz(); + } + if (isSimpleTimeZone(tz.getID())) { + // Leave just time part of the day. + // Suppose the input date is 2015 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC) + // We want it to become 1970 1 Jan 15:40 GMT+02:00 + // 1) Make sure millis becomes 15:40 in UTC, so add offset + int offset = tz.getRawOffset(); + millis += offset; + // 2) Truncate year, month, day. Day is always 86400 seconds, no matter what leap seconds are + millis = floorMod(millis, ONEDAY); + // 2) Now millis is 1970 1 Jan 15:40 UTC, however we need that in GMT+02:00, so subtract some + // offset + millis -= offset; + // Now we have brand-new 1970 1 Jan 15:40 GMT+02:00 + return new Time(millis); + } + Calendar cal = calendarWithUserTz; + cal.setTimeZone(tz); + cal.setTimeInMillis(millis); + cal.set(Calendar.ERA, GregorianCalendar.AD); + cal.set(Calendar.YEAR, 1970); + cal.set(Calendar.MONTH, 0); + cal.set(Calendar.DAY_OF_MONTH, 1); + + return new Time(cal.getTimeInMillis()); + } + + /** + * Returns the given time value as String matching what the current postgresql server would send + * in text mode. + * + * @param time time value + * @param withTimeZone whether timezone should be added + * @return given time value as String + */ + public String timeToString(java.util.Date time, boolean withTimeZone) { + Calendar cal = null; + if (withTimeZone) { + cal = calendarWithUserTz; + cal.setTimeZone(timeZoneProvider.get()); + } + if (time instanceof Timestamp) { + return toString(cal, (Timestamp) time, withTimeZone); + } + if (time instanceof Time) { + return toString(cal, (Time) time, withTimeZone); + } + return toString(cal, (Date) time, withTimeZone); + } + + /** + * Converts the given postgresql seconds to java seconds. Reverse engineered by inserting varying + * dates to postgresql and tuning the formula until the java dates matched. See {@link #toPgSecs} + * for the reverse operation. + * + * @param secs Postgresql seconds. + * @return Java seconds. + */ + private static long toJavaSecs(long secs) { + // postgres epoc to java epoc + secs += PG_EPOCH_DIFF.getSeconds(); + + // Julian/Gregorian calendar cutoff point + if (secs < -12219292800L) { // October 4, 1582 -> October 15, 1582 + secs += 86400 * 10; + if (secs < -14825808000L) { // 1500-02-28 -> 1500-03-01 + int extraLeaps = (int) ((secs + 14825808000L) / 3155760000L); + extraLeaps--; + extraLeaps -= extraLeaps / 4; + secs += extraLeaps * 86400L; + } + } + return secs; + } + + /** + * Converts the given java seconds to postgresql seconds. See {@link #toJavaSecs} for the reverse + * operation. The conversion is valid for any year 100 BC onwards. + * + * @param secs Postgresql seconds. + * @return Java seconds. + */ + private static long toPgSecs(long secs) { + // java epoc to postgres epoc + secs -= PG_EPOCH_DIFF.getSeconds(); + + // Julian/Gregorian calendar cutoff point + if (secs < -13165977600L) { // October 15, 1582 -> October 4, 1582 + secs -= 86400 * 10; + if (secs < -15773356800L) { // 1500-03-01 -> 1500-02-28 + int years = (int) ((secs + 15773356800L) / -3155823050L); + years++; + years -= years / 4; + secs += years * 86400L; + } + } + + return secs; + } + + /** + * Converts the SQL Date to binary representation for {@link Oid#DATE}. + * + * @param tz The timezone used. + * @param bytes The binary encoded date value. + * @param value value + * @throws PSQLException If binary format could not be parsed. + */ + public void toBinDate(TimeZone tz, byte[] bytes, Date value) throws PSQLException { + long millis = value.getTime(); + + if (tz == null) { + tz = getDefaultTz(); + } + // It "getOffset" is UNTESTED + // See org.postgresql.jdbc.AbstractJdbc2Statement.setDate(int, java.sql.Date, + // java.util.Calendar) + // The problem is we typically do not know for sure what is the exact required date/timestamp + // type + // Thus pgjdbc sticks to text transfer. + millis += tz.getOffset(millis); + + long secs = toPgSecs(millis / 1000); + ByteConverter.int4(bytes, 0, (int) (secs / 86400)); + } + + /** + * Converts backend's TimeZone parameter to java format. + * Notable difference: backend's gmt-3 is GMT+03 in Java. + * + * @param timeZone time zone to use + * @return java TimeZone + */ + public static TimeZone parseBackendTimeZone(String timeZone) { + if (timeZone.startsWith("GMT")) { + TimeZone tz = GMT_ZONES.get(timeZone); + if (tz != null) { + return tz; + } + } + return TimeZone.getTimeZone(timeZone); + } + + private static long floorDiv(long x, long y) { + long r = x / y; + // if the signs are different and modulo not zero, round down + if ((x ^ y) < 0 && (r * y != x)) { + r--; + } + return r; + } + + private static long floorMod(long x, long y) { + return x - floorDiv(x, y) * y; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java b/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java new file mode 100644 index 0000000..615b653 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java @@ -0,0 +1,1095 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.BaseStatement; +import org.postgresql.core.Oid; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.ServerVersion; +import org.postgresql.core.TypeInfo; +import org.postgresql.util.GT; +import org.postgresql.util.PGobject; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +@SuppressWarnings("try") +public class TypeInfoCache implements TypeInfo { + + private static final Logger LOGGER = Logger.getLogger(TypeInfoCache.class.getName()); + + // pgname (String) -> java.sql.Types (Integer) + private final Map pgNameToSQLType; + + private final Map oidToSQLType; + + // pgname (String) -> java class name (String) + // ie "text" -> "java.lang.String" + private final Map pgNameToJavaClass; + + // oid (Integer) -> pgname (String) + private final Map oidToPgName; + // pgname (String) -> oid (Integer) + private final Map pgNameToOid; + + private final Map javaArrayTypeToOid; + + // pgname (String) -> extension pgobject (Class) + private final Map> pgNameToPgObject; + + // type array oid -> base type's oid + private final Map pgArrayToPgType; + + // array type oid -> base type array element delimiter + private final Map arrayOidToDelimiter; + + private final BaseConnection conn; + private final int unknownLength; + private PreparedStatement getOidStatementSimple; + private PreparedStatement getOidStatementComplexNonArray; + private PreparedStatement getOidStatementComplexArray; + private PreparedStatement getNameStatement; + private PreparedStatement getArrayElementOidStatement; + private PreparedStatement getArrayDelimiterStatement; + private PreparedStatement getTypeInfoStatement; + private PreparedStatement getAllTypeInfoStatement; + private final ResourceLock lock = new ResourceLock(); + + // basic pg types info: + // 0 - type name + // 1 - type oid + // 2 - sql type + // 3 - java class + // 4 - array type oid + private static final Object[][] types = { + {"int2", Oid.INT2, Types.SMALLINT, "java.lang.Integer", Oid.INT2_ARRAY}, + {"int4", Oid.INT4, Types.INTEGER, "java.lang.Integer", Oid.INT4_ARRAY}, + {"oid", Oid.OID, Types.BIGINT, "java.lang.Long", Oid.OID_ARRAY}, + {"int8", Oid.INT8, Types.BIGINT, "java.lang.Long", Oid.INT8_ARRAY}, + {"money", Oid.MONEY, Types.DOUBLE, "java.lang.Double", Oid.MONEY_ARRAY}, + {"numeric", Oid.NUMERIC, Types.NUMERIC, "java.math.BigDecimal", Oid.NUMERIC_ARRAY}, + {"float4", Oid.FLOAT4, Types.REAL, "java.lang.Float", Oid.FLOAT4_ARRAY}, + {"float8", Oid.FLOAT8, Types.DOUBLE, "java.lang.Double", Oid.FLOAT8_ARRAY}, + {"char", Oid.CHAR, Types.CHAR, "java.lang.String", Oid.CHAR_ARRAY}, + {"bpchar", Oid.BPCHAR, Types.CHAR, "java.lang.String", Oid.BPCHAR_ARRAY}, + {"varchar", Oid.VARCHAR, Types.VARCHAR, "java.lang.String", Oid.VARCHAR_ARRAY}, + {"varbit", Oid.VARBIT, Types.OTHER, "java.lang.String", Oid.VARBIT_ARRAY}, + {"text", Oid.TEXT, Types.VARCHAR, "java.lang.String", Oid.TEXT_ARRAY}, + {"name", Oid.NAME, Types.VARCHAR, "java.lang.String", Oid.NAME_ARRAY}, + {"bytea", Oid.BYTEA, Types.BINARY, "[B", Oid.BYTEA_ARRAY}, + {"bool", Oid.BOOL, Types.BIT, "java.lang.Boolean", Oid.BOOL_ARRAY}, + {"bit", Oid.BIT, Types.BIT, "java.lang.Boolean", Oid.BIT_ARRAY}, + {"date", Oid.DATE, Types.DATE, "java.sql.Date", Oid.DATE_ARRAY}, + {"time", Oid.TIME, Types.TIME, "java.sql.Time", Oid.TIME_ARRAY}, + {"timetz", Oid.TIMETZ, Types.TIME, "java.sql.Time", Oid.TIMETZ_ARRAY}, + {"timestamp", Oid.TIMESTAMP, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMP_ARRAY}, + {"timestamptz", Oid.TIMESTAMPTZ, Types.TIMESTAMP, "java.sql.Timestamp", + Oid.TIMESTAMPTZ_ARRAY}, + {"refcursor", Oid.REF_CURSOR, Types.REF_CURSOR, "java.sql.ResultSet", Oid.REF_CURSOR_ARRAY}, + {"json", Oid.JSON, Types.OTHER, "org.postgresql.util.PGobject", Oid.JSON_ARRAY}, + {"point", Oid.POINT, Types.OTHER, "org.postgresql.geometric.PGpoint", Oid.POINT_ARRAY}, + {"box", Oid.BOX, Types.OTHER, "org.postgresql.geometric.PGBox", Oid.BOX_ARRAY} + }; + + /** + * PG maps several alias to real type names. When we do queries against pg_catalog, we must use + * the real type, not an alias, so use this mapping. + *

+ * Additional values used at runtime (including case variants) will be added to the map. + *

+ */ + private static final ConcurrentMap TYPE_ALIASES = new ConcurrentHashMap<>(30); + + static { + TYPE_ALIASES.put("bool", "bool"); + TYPE_ALIASES.put("boolean", "bool"); + TYPE_ALIASES.put("smallint", "int2"); + TYPE_ALIASES.put("int2", "int2"); + TYPE_ALIASES.put("int", "int4"); + TYPE_ALIASES.put("integer", "int4"); + TYPE_ALIASES.put("int4", "int4"); + TYPE_ALIASES.put("long", "int8"); + TYPE_ALIASES.put("int8", "int8"); + TYPE_ALIASES.put("bigint", "int8"); + TYPE_ALIASES.put("float", "float8"); + TYPE_ALIASES.put("real", "float4"); + TYPE_ALIASES.put("float4", "float4"); + TYPE_ALIASES.put("double", "float8"); + TYPE_ALIASES.put("double precision", "float8"); + TYPE_ALIASES.put("float8", "float8"); + TYPE_ALIASES.put("decimal", "numeric"); + TYPE_ALIASES.put("numeric", "numeric"); + TYPE_ALIASES.put("character varying", "varchar"); + TYPE_ALIASES.put("varchar", "varchar"); + TYPE_ALIASES.put("time without time zone", "time"); + TYPE_ALIASES.put("time", "time"); + TYPE_ALIASES.put("time with time zone", "timetz"); + TYPE_ALIASES.put("timetz", "timetz"); + TYPE_ALIASES.put("timestamp without time zone", "timestamp"); + TYPE_ALIASES.put("timestamp", "timestamp"); + TYPE_ALIASES.put("timestamp with time zone", "timestamptz"); + TYPE_ALIASES.put("timestamptz", "timestamptz"); + } + + @SuppressWarnings("this-escape") + public TypeInfoCache(BaseConnection conn, int unknownLength) { + this.conn = conn; + this.unknownLength = unknownLength; + oidToPgName = new HashMap<>((int) Math.round(types.length * 1.5)); + pgNameToOid = new HashMap<>((int) Math.round(types.length * 1.5)); + javaArrayTypeToOid = new HashMap<>((int) Math.round(types.length * 1.5)); + pgNameToJavaClass = new HashMap<>((int) Math.round(types.length * 1.5)); + pgNameToPgObject = new HashMap<>((int) Math.round(types.length * 1.5)); + pgArrayToPgType = new HashMap<>((int) Math.round(types.length * 1.5)); + arrayOidToDelimiter = new HashMap<>((int) Math.round(types.length * 2.5)); + + // needs to be synchronized because the iterator is returned + // from getPGTypeNamesWithSQLTypes() + pgNameToSQLType = Collections.synchronizedMap(new HashMap((int) Math.round(types.length * 1.5))); + oidToSQLType = Collections.synchronizedMap(new HashMap((int) Math.round(types.length * 1.5))); + + for (Object[] type : types) { + String pgTypeName = (String) type[0]; + Integer oid = (Integer) type[1]; + Integer sqlType = (Integer) type[2]; + String javaClass = (String) type[3]; + Integer arrayOid = (Integer) type[4]; + + addCoreType(pgTypeName, oid, sqlType, javaClass, arrayOid); + } + + pgNameToJavaClass.put("hstore", Map.class.getName()); + } + + @Override + public void addCoreType(String pgTypeName, Integer oid, Integer sqlType, + String javaClass, Integer arrayOid) { + try (ResourceLock ignore = lock.obtain()) { + pgNameToJavaClass.put(pgTypeName, javaClass); + pgNameToOid.put(pgTypeName, oid); + oidToPgName.put(oid, pgTypeName); + javaArrayTypeToOid.put(javaClass, arrayOid); + pgArrayToPgType.put(arrayOid, oid); + pgNameToSQLType.put(pgTypeName, sqlType); + oidToSQLType.put(oid, sqlType); + + // Currently we hardcode all core types array delimiter + // to a comma. In a stock install the only exception is + // the box datatype and it's not a JDBC core type. + // + Character delim = ','; + if ("box".equals(pgTypeName)) { + delim = ';'; + } + arrayOidToDelimiter.put(oid, delim); + arrayOidToDelimiter.put(arrayOid, delim); + + String pgArrayTypeName = pgTypeName + "[]"; + pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array"); + pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY); + oidToSQLType.put(arrayOid, Types.ARRAY); + pgNameToOid.put(pgArrayTypeName, arrayOid); + pgArrayTypeName = "_" + pgTypeName; + if (!pgNameToJavaClass.containsKey(pgArrayTypeName)) { + pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array"); + pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY); + pgNameToOid.put(pgArrayTypeName, arrayOid); + oidToPgName.put(arrayOid, pgArrayTypeName); + } + } + } + + @Override + public void addDataType(String type, Class klass) + throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + pgNameToPgObject.put(type, klass); + pgNameToJavaClass.put(type, klass.getName()); + } + } + + @Override + public Iterator getPGTypeNamesWithSQLTypes() { + return pgNameToSQLType.keySet().iterator(); + } + + @Override + public Iterator getPGTypeOidsWithSQLTypes() { + return oidToSQLType.keySet().iterator(); + } + + private String getSQLTypeQuery(boolean typoidParam) { + // There's no great way of telling what's an array type. + // People can name their own types starting with _. + // Other types use typelem that aren't actually arrays, like box. + // + // in case of multiple records (in different schemas) choose the one from the current + // schema, + // otherwise take the last version of a type that is at least more deterministic then before + // (keeping old behaviour of finding types, that should not be found without correct search + // path) + StringBuilder sql = new StringBuilder(); + sql.append("SELECT typinput='pg_catalog.array_in'::regproc as is_array, typtype, typname, pg_type.oid "); + sql.append(" FROM pg_catalog.pg_type "); + sql.append(" LEFT JOIN (select ns.oid as nspoid, ns.nspname, r.r "); + sql.append(" from pg_namespace as ns "); + // -- go with older way of unnesting array to be compatible with 8.0 + sql.append(" join ( select s.r, (current_schemas(false))[s.r] as nspname "); + sql.append(" from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r "); + sql.append(" using ( nspname ) "); + sql.append(" ) as sp "); + sql.append(" ON sp.nspoid = typnamespace "); + if (typoidParam) { + sql.append(" WHERE pg_type.oid = ? "); + } + sql.append(" ORDER BY sp.r, pg_type.oid DESC;"); + return sql.toString(); + } + + private int getSQLTypeFromQueryResult(ResultSet rs) throws SQLException { + Integer type = null; + boolean isArray = rs.getBoolean("is_array"); + String typtype = rs.getString("typtype"); + if (isArray) { + type = Types.ARRAY; + } else if ("c".equals(typtype)) { + type = Types.STRUCT; + } else if ("d".equals(typtype)) { + type = Types.DISTINCT; + } else if ("e".equals(typtype)) { + type = Types.VARCHAR; + } + if (type == null) { + type = Types.OTHER; + } + return type; + } + + private PreparedStatement prepareGetAllTypeInfoStatement() throws SQLException { + PreparedStatement getAllTypeInfoStatement = this.getAllTypeInfoStatement; + if (getAllTypeInfoStatement == null) { + getAllTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(false)); + this.getAllTypeInfoStatement = getAllTypeInfoStatement; + } + return getAllTypeInfoStatement; + } + + public void cacheSQLTypes() throws SQLException { + LOGGER.log(Level.FINEST, "caching all SQL typecodes"); + PreparedStatement getAllTypeInfoStatement = prepareGetAllTypeInfoStatement(); + // Go through BaseStatement to avoid transaction start. + if (!((BaseStatement) getAllTypeInfoStatement) + .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + ResultSet rs = getAllTypeInfoStatement.getResultSet(); + while (rs.next()) { + String typeName = rs.getString("typname"); + Integer type = getSQLTypeFromQueryResult(rs); + if (!pgNameToSQLType.containsKey(typeName)) { + pgNameToSQLType.put(typeName, type); + } + + Integer typeOid = longOidToInt(rs.getLong("oid")); + if (!oidToSQLType.containsKey(typeOid)) { + oidToSQLType.put(typeOid, type); + } + } + rs.close(); + } + + private PreparedStatement prepareGetTypeInfoStatement() throws SQLException { + PreparedStatement getTypeInfoStatement = this.getTypeInfoStatement; + if (getTypeInfoStatement == null) { + getTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(true)); + this.getTypeInfoStatement = getTypeInfoStatement; + } + return getTypeInfoStatement; + } + + @Override + public int getSQLType(String pgTypeName) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + /* + Get a few things out of the way such as arrays and known types + */ + if (pgTypeName.endsWith("[]")) { + return Types.ARRAY; + } + Integer i = this.pgNameToSQLType.get(pgTypeName); + if (i != null) { + return i; + } + + /* + All else fails then we will query the database. + save for future calls + */ + i = getSQLType(getPGType(pgTypeName)); + + pgNameToSQLType.put(pgTypeName, i); + return i; + } + } + + @Override + public int getJavaArrayType(String className) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + Integer oid = javaArrayTypeToOid.get(className); + if (oid == null) { + return Oid.UNSPECIFIED; + } + return oid; + } + } + + @Override + public int getSQLType(int typeOid) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (typeOid == Oid.UNSPECIFIED) { + return Types.OTHER; + } + + Integer i = oidToSQLType.get(typeOid); + if (i != null) { + return i; + } + + LOGGER.log(Level.FINEST, "querying SQL typecode for pg type oid ''{0}''", intOidToLong(typeOid)); + + PreparedStatement getTypeInfoStatement = prepareGetTypeInfoStatement(); + + getTypeInfoStatement.setLong(1, intOidToLong(typeOid)); + + // Go through BaseStatement to avoid transaction start. + if (!((BaseStatement) getTypeInfoStatement) + .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + ResultSet rs = getTypeInfoStatement.getResultSet(); + + int sqlType = Types.OTHER; + if (rs.next()) { + sqlType = getSQLTypeFromQueryResult(rs); + } + rs.close(); + + oidToSQLType.put(typeOid, sqlType); + return sqlType; + } + } + + private PreparedStatement getOidStatement(String pgTypeName) throws SQLException { + boolean isArray = pgTypeName.endsWith("[]"); + boolean hasQuote = pgTypeName.contains("\""); + int dotIndex = pgTypeName.indexOf('.'); + + if (dotIndex == -1 && !hasQuote && !isArray) { + PreparedStatement getOidStatementSimple = this.getOidStatementSimple; + if (getOidStatementSimple == null) { + String sql; + // see comments in @getSQLType() + // -- go with older way of unnesting array to be compatible with 8.0 + sql = "SELECT pg_type.oid, typname " + + " FROM pg_catalog.pg_type " + + " LEFT " + + " JOIN (select ns.oid as nspoid, ns.nspname, r.r " + + " from pg_namespace as ns " + + " join ( select s.r, (current_schemas(false))[s.r] as nspname " + + " from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r " + + " using ( nspname ) " + + " ) as sp " + + " ON sp.nspoid = typnamespace " + + " WHERE typname = ? " + + " ORDER BY sp.r, pg_type.oid DESC LIMIT 1;"; + this.getOidStatementSimple = getOidStatementSimple = conn.prepareStatement(sql); + } + // coerce to lower case to handle upper case type names + String lcName = pgTypeName.toLowerCase(Locale.ROOT); + // default arrays are represented with _ as prefix ... this dont even work for public schema + // fully + getOidStatementSimple.setString(1, lcName); + return getOidStatementSimple; + } + PreparedStatement oidStatementComplex; + if (isArray) { + PreparedStatement getOidStatementComplexArray = this.getOidStatementComplexArray; + if (getOidStatementComplexArray == null) { + String sql; + if (conn.haveMinimumServerVersion(ServerVersion.v8_3)) { + sql = "SELECT t.typarray, arr.typname " + + " FROM pg_catalog.pg_type t" + + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid" + + " JOIN pg_catalog.pg_type arr ON arr.oid = t.typarray" + + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))" + + " ORDER BY t.oid DESC LIMIT 1"; + } else { + sql = "SELECT t.oid, t.typname " + + " FROM pg_catalog.pg_type t" + + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid" + + " WHERE t.typelem = (SELECT oid FROM pg_catalog.pg_type WHERE typname = ?)" + + " AND substring(t.typname, 1, 1) = '_' AND t.typlen = -1" + + " AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))" + + " ORDER BY t.typelem DESC LIMIT 1"; + } + this.getOidStatementComplexArray = getOidStatementComplexArray = conn.prepareStatement(sql); + } + oidStatementComplex = getOidStatementComplexArray; + } else { + PreparedStatement getOidStatementComplexNonArray = this.getOidStatementComplexNonArray; + if (getOidStatementComplexNonArray == null) { + String sql = "SELECT t.oid, t.typname " + + " FROM pg_catalog.pg_type t" + + " JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid" + + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))" + + " ORDER BY t.oid DESC LIMIT 1"; + this.getOidStatementComplexNonArray = getOidStatementComplexNonArray = conn.prepareStatement(sql); + } + oidStatementComplex = getOidStatementComplexNonArray; + } + //type name requested may be schema specific, of the form "{schema}"."typeName", + //or may check across all schemas where a schema is not specified. + String fullName = isArray ? pgTypeName.substring(0, pgTypeName.length() - 2) : pgTypeName; + String schema; + String name; + // simple use case + if (dotIndex == -1) { + schema = null; + name = fullName; + } else { + if (fullName.startsWith("\"")) { + if (fullName.endsWith("\"")) { + String[] parts = fullName.split("\"\\.\""); + schema = parts.length == 2 ? parts[0] + "\"" : null; + name = parts.length == 2 ? "\"" + parts[1] : parts[0]; + } else { + int lastDotIndex = fullName.lastIndexOf('.'); + name = fullName.substring(lastDotIndex + 1); + schema = fullName.substring(0, lastDotIndex); + } + } else { + schema = fullName.substring(0, dotIndex); + name = fullName.substring(dotIndex + 1); + } + } + if (schema != null && schema.startsWith("\"") && schema.endsWith("\"")) { + schema = schema.substring(1, schema.length() - 1); + } else if (schema != null) { + schema = schema.toLowerCase(Locale.ROOT); + } + if (name.startsWith("\"") && name.endsWith("\"")) { + name = name.substring(1, name.length() - 1); + } else { + name = name.toLowerCase(Locale.ROOT); + } + oidStatementComplex.setString(1, name); + oidStatementComplex.setString(2, schema); + oidStatementComplex.setBoolean(3, schema == null); + return oidStatementComplex; + } + + @Override + public int getPGType(String pgTypeName) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + // there really isn't anything else to return other than UNSPECIFIED here. + if (pgTypeName == null) { + return Oid.UNSPECIFIED; + } + + Integer oid = pgNameToOid.get(pgTypeName); + if (oid != null) { + return oid; + } + + PreparedStatement oidStatement = getOidStatement(pgTypeName); + + // Go through BaseStatement to avoid transaction start. + if (!((BaseStatement) oidStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + oid = Oid.UNSPECIFIED; + ResultSet rs = oidStatement.getResultSet(); + if (rs.next()) { + oid = (int) rs.getLong(1); + String internalName = rs.getString(2); + oidToPgName.put(oid, internalName); + pgNameToOid.put(internalName, oid); + } + pgNameToOid.put(pgTypeName, oid); + rs.close(); + + return oid; + } + } + + @Override + public String getPGType(int oid) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (oid == Oid.UNSPECIFIED) { + // TODO: it would be great to forbid UNSPECIFIED argument, and make the return type non-nullable + return null; + } + + String pgTypeName = oidToPgName.get(oid); + if (pgTypeName != null) { + return pgTypeName; + } + + PreparedStatement getNameStatement = prepareGetNameStatement(); + + getNameStatement.setInt(1, oid); + + // Go through BaseStatement to avoid transaction start. + if (!((BaseStatement) getNameStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + ResultSet rs = getNameStatement.getResultSet(); + if (rs.next()) { + boolean onPath = rs.getBoolean(1); + String schema = rs.getString(2); + String name = rs.getString(3); + if (onPath) { + pgTypeName = name; + pgNameToOid.put(schema + "." + name, oid); + } else { + // TODO: escaping !? + pgTypeName = "\"" + schema + "\".\"" + name + "\""; + // if all is lowercase add special type info + // TODO: should probably check for all special chars + if (schema.equals(schema.toLowerCase(Locale.ROOT)) && schema.indexOf('.') == -1 + && name.equals(name.toLowerCase(Locale.ROOT)) && name.indexOf('.') == -1) { + pgNameToOid.put(schema + "." + name, oid); + } + } + pgNameToOid.put(pgTypeName, oid); + oidToPgName.put(oid, pgTypeName); + } + rs.close(); + + return pgTypeName; + } + } + + private PreparedStatement prepareGetNameStatement() throws SQLException { + PreparedStatement getNameStatement = this.getNameStatement; + if (getNameStatement == null) { + String sql; + sql = "SELECT n.nspname = ANY(current_schemas(true)), n.nspname, t.typname " + + "FROM pg_catalog.pg_type t " + + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?"; + + this.getNameStatement = getNameStatement = conn.prepareStatement(sql); + } + return getNameStatement; + } + + @Override + public int getPGArrayType(String elementTypeName) throws SQLException { + elementTypeName = getTypeForAlias(elementTypeName); + return getPGType(elementTypeName + "[]"); + } + + /** + * Return the oid of the array's base element if it's an array, if not return the provided oid. + * This doesn't do any database lookups, so it's only useful for the originally provided type + * mappings. This is fine for it's intended uses where we only have intimate knowledge of types + * that are already known to the driver. + * + * @param oid input oid + * @return oid of the array's base element or the provided oid (if not array) + */ + protected int convertArrayToBaseOid(int oid) { + try (ResourceLock ignore = lock.obtain()) { + Integer i = pgArrayToPgType.get(oid); + if (i == null) { + return oid; + } + return i; + } + } + + @Override + public char getArrayDelimiter(int oid) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (oid == Oid.UNSPECIFIED) { + return ','; + } + + Character delim = arrayOidToDelimiter.get(oid); + if (delim != null) { + return delim; + } + + PreparedStatement getArrayDelimiterStatement = prepareGetArrayDelimiterStatement(); + + getArrayDelimiterStatement.setInt(1, oid); + + // Go through BaseStatement to avoid transaction start. + if (!((BaseStatement) getArrayDelimiterStatement) + .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + ResultSet rs = getArrayDelimiterStatement.getResultSet(); + if (!rs.next()) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + String s = rs.getString(1); + delim = s.charAt(0); + + arrayOidToDelimiter.put(oid, delim); + + rs.close(); + + return delim; + } + } + + private PreparedStatement prepareGetArrayDelimiterStatement() throws SQLException { + PreparedStatement getArrayDelimiterStatement = this.getArrayDelimiterStatement; + if (getArrayDelimiterStatement == null) { + String sql; + sql = "SELECT e.typdelim FROM pg_catalog.pg_type t, pg_catalog.pg_type e " + + "WHERE t.oid = ? and t.typelem = e.oid"; + this.getArrayDelimiterStatement = getArrayDelimiterStatement = conn.prepareStatement(sql); + } + return getArrayDelimiterStatement; + } + + @Override + public int getPGArrayElement(int oid) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + if (oid == Oid.UNSPECIFIED) { + return Oid.UNSPECIFIED; + } + + Integer pgType = pgArrayToPgType.get(oid); + + if (pgType != null) { + return pgType; + } + + PreparedStatement getArrayElementOidStatement = prepareGetArrayElementOidStatement(); + + getArrayElementOidStatement.setInt(1, oid); + + // Go through BaseStatement to avoid transaction start. + if (!((BaseStatement) getArrayElementOidStatement) + .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + ResultSet rs = getArrayElementOidStatement.getResultSet(); + if (!rs.next()) { + throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA); + } + + pgType = (int) rs.getLong(1); + boolean onPath = rs.getBoolean(2); + String schema = rs.getString(3); + String name = rs.getString(4); + pgArrayToPgType.put(oid, pgType); + pgNameToOid.put(schema + "." + name, pgType); + String fullName = "\"" + schema + "\".\"" + name + "\""; + pgNameToOid.put(fullName, pgType); + if (onPath && name.equals(name.toLowerCase(Locale.ROOT))) { + oidToPgName.put(pgType, name); + pgNameToOid.put(name, pgType); + } else { + oidToPgName.put(pgType, fullName); + } + + rs.close(); + + return pgType; + } + } + + private PreparedStatement prepareGetArrayElementOidStatement() throws SQLException { + PreparedStatement getArrayElementOidStatement = this.getArrayElementOidStatement; + if (getArrayElementOidStatement == null) { + String sql; + sql = "SELECT e.oid, n.nspname = ANY(current_schemas(true)), n.nspname, e.typname " + + "FROM pg_catalog.pg_type t JOIN pg_catalog.pg_type e ON t.typelem = e.oid " + + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?"; + this.getArrayElementOidStatement = getArrayElementOidStatement = conn.prepareStatement(sql); + } + return getArrayElementOidStatement; + } + + @Override + public Class getPGobject(String type) { + try (ResourceLock ignore = lock.obtain()) { + return pgNameToPgObject.get(type); + } + } + + @Override + public String getJavaClass(int oid) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + String pgTypeName = getPGType(oid); + if (pgTypeName == null) { + // Technically speaking, we should not be here + // null result probably means oid == UNSPECIFIED which has no clear way + // to map to Java + return "java.lang.String"; + } + + String result = pgNameToJavaClass.get(pgTypeName); + if (result != null) { + return result; + } + + if (getSQLType(pgTypeName) == Types.ARRAY) { + result = "java.sql.Array"; + pgNameToJavaClass.put(pgTypeName, result); + } + + return result == null ? "java.lang.String" : result; + } + } + + @Override + public String getTypeForAlias(String alias) { + if ( alias == null ) { + return null; + } + String type = TYPE_ALIASES.get(alias); + if (type != null) { + return type; + } + type = TYPE_ALIASES.get(alias.toLowerCase(Locale.ROOT)); + if (type == null) { + type = alias; + } + //populate for future use + TYPE_ALIASES.put(alias, type); + return type; + } + + @Override + public int getPrecision(int oid, int typmod) { + oid = convertArrayToBaseOid(oid); + switch (oid) { + case Oid.INT2: + return 5; + + case Oid.OID: + case Oid.INT4: + return 10; + + case Oid.INT8: + return 19; + + case Oid.FLOAT4: + // For float4 and float8, we can normally only get 6 and 15 + // significant digits out, but extra_float_digits may raise + // that number by up to two digits. + return 8; + + case Oid.FLOAT8: + return 17; + + case Oid.NUMERIC: + if (typmod == -1) { + return 0; + } + return ((typmod - 4) & 0xFFFF0000) >> 16; + + case Oid.CHAR: + case Oid.BOOL: + return 1; + + case Oid.BPCHAR: + case Oid.VARCHAR: + if (typmod == -1) { + return unknownLength; + } + return typmod - 4; + + // datetime types get the + // "length in characters of the String representation" + case Oid.DATE: + case Oid.TIME: + case Oid.TIMETZ: + case Oid.INTERVAL: + case Oid.TIMESTAMP: + case Oid.TIMESTAMPTZ: + return getDisplaySize(oid, typmod); + + case Oid.BIT: + return typmod; + + case Oid.VARBIT: + if (typmod == -1) { + return unknownLength; + } + return typmod; + + case Oid.TEXT: + case Oid.BYTEA: + default: + return unknownLength; + } + } + + @Override + public int getScale(int oid, int typmod) { + oid = convertArrayToBaseOid(oid); + switch (oid) { + case Oid.FLOAT4: + return 8; + case Oid.FLOAT8: + return 17; + case Oid.NUMERIC: + if (typmod == -1) { + return 0; + } + return (typmod - 4) & 0xFFFF; + case Oid.TIME: + case Oid.TIMETZ: + case Oid.TIMESTAMP: + case Oid.TIMESTAMPTZ: + if (typmod == -1) { + return 6; + } + return typmod; + case Oid.INTERVAL: + if (typmod == -1) { + return 6; + } + return typmod & 0xFFFF; + default: + return 0; + } + } + + @Override + public boolean isCaseSensitive(int oid) { + oid = convertArrayToBaseOid(oid); + switch (oid) { + case Oid.OID: + case Oid.INT2: + case Oid.INT4: + case Oid.INT8: + case Oid.FLOAT4: + case Oid.FLOAT8: + case Oid.NUMERIC: + case Oid.BOOL: + case Oid.BIT: + case Oid.VARBIT: + case Oid.DATE: + case Oid.TIME: + case Oid.TIMETZ: + case Oid.TIMESTAMP: + case Oid.TIMESTAMPTZ: + case Oid.INTERVAL: + return false; + default: + return true; + } + } + + @Override + public boolean isSigned(int oid) { + oid = convertArrayToBaseOid(oid); + switch (oid) { + case Oid.INT2: + case Oid.INT4: + case Oid.INT8: + case Oid.FLOAT4: + case Oid.FLOAT8: + case Oid.NUMERIC: + return true; + default: + return false; + } + } + + @SuppressWarnings("fallthrough") + @Override + public int getDisplaySize(int oid, int typmod) { + oid = convertArrayToBaseOid(oid); + switch (oid) { + case Oid.INT2: + return 6; // -32768 to +32767 + case Oid.INT4: + return 11; // -2147483648 to +2147483647 + case Oid.OID: + return 10; // 0 to 4294967295 + case Oid.INT8: + return 20; // -9223372036854775808 to +9223372036854775807 + case Oid.FLOAT4: + // varies based upon the extra_float_digits GUC. + // These values are for the longest possible length. + return 15; // sign + 9 digits + decimal point + e + sign + 2 digits + case Oid.FLOAT8: + return 25; // sign + 18 digits + decimal point + e + sign + 3 digits + case Oid.CHAR: + return 1; + case Oid.BOOL: + return 1; + case Oid.DATE: + return 13; // "4713-01-01 BC" to "01/01/4713 BC" - "31/12/32767" + case Oid.TIME: + case Oid.TIMETZ: + case Oid.TIMESTAMP: + case Oid.TIMESTAMPTZ: + // Calculate the number of decimal digits + the decimal point. + int secondSize; + switch (typmod) { + case -1: + secondSize = 6 + 1; + break; + case 0: + secondSize = 0; + break; + case 1: + // Bizarrely SELECT '0:0:0.1'::time(1); returns 2 digits. + secondSize = 2 + 1; + break; + default: + secondSize = typmod + 1; + break; + } + + // We assume the worst case scenario for all of these. + // time = '00:00:00' = 8 + // date = '5874897-12-31' = 13 (although at large values second precision is lost) + // date = '294276-11-20' = 12 --enable-integer-datetimes + // zone = '+11:30' = 6; + + switch (oid) { + case Oid.TIME: + return 8 + secondSize; + case Oid.TIMETZ: + return 8 + secondSize + 6; + case Oid.TIMESTAMP: + return 13 + 1 + 8 + secondSize; + case Oid.TIMESTAMPTZ: + return 13 + 1 + 8 + secondSize + 6; + } + case Oid.INTERVAL: + // SELECT LENGTH('-123456789 years 11 months 33 days 23 hours 10.123456 seconds'::interval); + return 49; + case Oid.VARCHAR: + case Oid.BPCHAR: + if (typmod == -1) { + return unknownLength; + } + return typmod - 4; + case Oid.NUMERIC: + if (typmod == -1) { + return 131089; // SELECT LENGTH(pow(10::numeric,131071)); 131071 = 2^17-1 + } + int precision = (typmod - 4 >> 16) & 0xffff; + int scale = (typmod - 4) & 0xffff; + // sign + digits + decimal point (only if we have nonzero scale) + return 1 + precision + (scale != 0 ? 1 : 0); + case Oid.BIT: + return typmod; + case Oid.VARBIT: + if (typmod == -1) { + return unknownLength; + } + return typmod; + case Oid.TEXT: + case Oid.BYTEA: + return unknownLength; + default: + return unknownLength; + } + } + + @Override + public int getMaximumPrecision(int oid) { + oid = convertArrayToBaseOid(oid); + switch (oid) { + case Oid.NUMERIC: + return 1000; + case Oid.TIME: + case Oid.TIMETZ: + // Technically this depends on the --enable-integer-datetimes + // configure setting. It is 6 with integer and 10 with float. + return 6; + case Oid.TIMESTAMP: + case Oid.TIMESTAMPTZ: + case Oid.INTERVAL: + return 6; + case Oid.BPCHAR: + case Oid.VARCHAR: + return 10485760; + case Oid.BIT: + case Oid.VARBIT: + return 83886080; + default: + return 0; + } + } + + @Override + public boolean requiresQuoting(int oid) throws SQLException { + int sqlType = getSQLType(oid); + return requiresQuotingSqlType(sqlType); + } + + /** + * Returns true if particular sqlType requires quoting. + * This method is used internally by the driver, so it might disappear without notice. + * + * @param sqlType sql type as in java.sql.Types + * @return true if the type requires quoting + * @throws SQLException if something goes wrong + */ + @Override + public boolean requiresQuotingSqlType(int sqlType) throws SQLException { + switch (sqlType) { + case Types.BIGINT: + case Types.DOUBLE: + case Types.FLOAT: + case Types.INTEGER: + case Types.REAL: + case Types.SMALLINT: + case Types.TINYINT: + case Types.NUMERIC: + case Types.DECIMAL: + return false; + } + return true; + } + + @Override + public int longOidToInt(long oid) throws SQLException { + if ((oid & 0xFFFF_FFFF_0000_0000L) != 0) { + throw new PSQLException(GT.tr("Value is not an OID: {0}", oid), PSQLState.NUMERIC_VALUE_OUT_OF_RANGE); + } + + return (int) oid; + } + + @Override + public long intOidToLong(int oid) { + return ((long) oid) & 0xFFFFFFFFL; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java b/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java new file mode 100644 index 0000000..c6376c9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.jdbc2.ArrayAssistant; +import org.postgresql.util.ByteConverter; + +import java.util.UUID; + +public class UUIDArrayAssistant implements ArrayAssistant { + + public UUIDArrayAssistant() { + } + + @Override + public Class baseType() { + return UUID.class; + } + + @Override + public Object buildElement(byte[] bytes, int pos, int len) { + return new UUID(ByteConverter.int8(bytes, pos + 0), ByteConverter.int8(bytes, pos + 8)); + } + + @Override + public Object buildElement(String literal) { + return UUID.fromString(literal); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java new file mode 100644 index 0000000..cc57ac8 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc2; + +/** + * Implement this interface and register the its instance to ArrayAssistantRegistry, to let Postgres + * driver to support more array type. + * + * @author Minglei Tu + */ +public interface ArrayAssistant { + /** + * get array base type. + * + * @return array base type + */ + Class baseType(); + + /** + * build a array element from its binary bytes. + * + * @param bytes input bytes + * @param pos position in input array + * @param len length of the element + * @return array element from its binary bytes + */ + Object buildElement(byte[] bytes, int pos, int len); + + /** + * build an array element from its literal string. + * + * @param literal string representation of array element + * @return array element + */ + Object buildElement(String literal); +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java new file mode 100644 index 0000000..59a97f9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc2; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Array assistants register here. + * + * @author Minglei Tu + */ +public class ArrayAssistantRegistry { + private static final ConcurrentMap ARRAY_ASSISTANT_MAP = + new ConcurrentHashMap<>(); + + public ArrayAssistantRegistry() { + } + + public static ArrayAssistant getAssistant(int oid) { + return ARRAY_ASSISTANT_MAP.get(oid); + } + + public static void register(int oid, ArrayAssistant assistant) { + ARRAY_ASSISTANT_MAP.put(oid, assistant); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/ConnectionPool.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/ConnectionPool.java new file mode 100644 index 0000000..f1afca2 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/ConnectionPool.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc2.optional; + +import org.postgresql.ds.PGConnectionPoolDataSource; + +/** + * @deprecated Please use {@link PGConnectionPoolDataSource} + */ +@SuppressWarnings("serial") +@Deprecated +public class ConnectionPool extends PGConnectionPoolDataSource { +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/PoolingDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/PoolingDataSource.java new file mode 100644 index 0000000..f47d373 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/PoolingDataSource.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc2.optional; + +import org.postgresql.ds.PGPoolingDataSource; + +/** + * @deprecated Since 42.0.0, see {@link PGPoolingDataSource} + */ +@Deprecated +public class PoolingDataSource extends PGPoolingDataSource { +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/SimpleDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/SimpleDataSource.java new file mode 100644 index 0000000..e907c07 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/optional/SimpleDataSource.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc2.optional; + +import org.postgresql.ds.PGSimpleDataSource; + +/** + * @deprecated Please use {@link PGSimpleDataSource} + */ +@SuppressWarnings("serial") +@Deprecated +public class SimpleDataSource extends PGSimpleDataSource { +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3ConnectionPool.java b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3ConnectionPool.java new file mode 100644 index 0000000..0a56657 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3ConnectionPool.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc3; + +import org.postgresql.ds.PGConnectionPoolDataSource; + +/** + * @deprecated Please use {@link PGConnectionPoolDataSource} + */ +@SuppressWarnings("serial") +@Deprecated +public class Jdbc3ConnectionPool extends PGConnectionPoolDataSource { +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3PoolingDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3PoolingDataSource.java new file mode 100644 index 0000000..10b1920 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3PoolingDataSource.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc3; + +import org.postgresql.ds.PGPoolingDataSource; + +/** + * @deprecated Since 42.0.0, see {@link PGPoolingDataSource} + */ +@Deprecated +public class Jdbc3PoolingDataSource extends PGPoolingDataSource { +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3SimpleDataSource.java b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3SimpleDataSource.java new file mode 100644 index 0000000..b61c8b0 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbc3/Jdbc3SimpleDataSource.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc3; + +import org.postgresql.ds.PGSimpleDataSource; + +/** + * @deprecated Please use {@link PGSimpleDataSource} + */ +@SuppressWarnings("serial") +@Deprecated +public class Jdbc3SimpleDataSource extends PGSimpleDataSource { +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java new file mode 100644 index 0000000..0ca6f05 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbcurlresolver; + +import java.net.URI; +import org.postgresql.PGEnvironment; +import org.postgresql.util.OSUtil; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * helps to read Password File. + * https://www.postgresql.org/docs/current/libpq-pgpass.html + */ +public class PgPassParser { + + private static final Logger LOGGER = Logger.getLogger(PgPassParser.class.getName()); + private static final char SEPARATOR = ':'; + // + private final String hostname; + private final String port; + private final String database; + private final String user; + + // + private PgPassParser(String hostname, String port, String database, String user) { + this.hostname = hostname; + this.port = port; + this.database = database; + this.user = user; + } + + /** + * Read .pgpass resource + * + * @param hostname hostname or * + * @param port port or * + * @param database database or * + * @param user username or * + * @return password or null + */ + public static String getPassword(String hostname, String port, String database, String user) { + if (hostname == null || hostname.isEmpty()) { + return null; + } + if (port == null || port.isEmpty()) { + return null; + } + if (database == null || database.isEmpty()) { + return null; + } + if (user == null || user.isEmpty()) { + return null; + } + PgPassParser pgPassParser = new PgPassParser(hostname, port, database, user); + return pgPassParser.findPassword(); + } + + private String findPassword() { + String resourceName = findPgPasswordResourceName(); + if (resourceName == null) { + return null; + } + // + String result = null; + try (InputStream inputStream = openInputStream(resourceName)) { + result = parseInputStream(inputStream); + } catch (IOException e) { + LOGGER.log(Level.FINE, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()}); + } + // + return result; + } + + // open URL or File + private InputStream openInputStream(String resourceName) throws IOException { + + try { + URL url = URI.create(resourceName).toURL(); + return url.openStream(); + } catch ( MalformedURLException ex ) { + // try file + File file = new File(resourceName); + return new FileInputStream(file); + } + } + + // choose resource where to search for service description + private String findPgPasswordResourceName() { + // default file name + String pgPassFileDefaultName = PGEnvironment.PGPASSFILE.getDefaultValue(); + + // if there is value, use it - 1st priority + { + String propertyName = PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(); + String resourceName = System.getProperty(propertyName); + if (resourceName != null && !resourceName.trim().isEmpty()) { + LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]", new Object[]{resourceName, propertyName}); + return resourceName; + } + } + + // if there is value, use it - 2nd priority + { + String envVariableName = PGEnvironment.PGPASSFILE.getName(); + String resourceName = System.getenv().get(envVariableName); + if (resourceName != null && !resourceName.trim().isEmpty()) { + LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]", new Object[]{resourceName, envVariableName}); + return resourceName; + } + } + + // if file in user home is readable, use it, otherwise continue - 3rd priority + { + String resourceName = ""; + if ( !OSUtil.isWindows() ) { + resourceName += "."; + } + resourceName += pgPassFileDefaultName; + if (OSUtil.isWindows()) { + resourceName += ".conf"; + } + File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName); + if (resourceFile.canRead()) { + LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()}); + return resourceFile.getAbsolutePath(); + } + } + + // otherwise null + LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgPassFileDefaultName); + return null; + } + + // + private String parseInputStream(InputStream inputStream) throws IOException { + // + String result = null; + try ( + Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8); + BufferedReader br = new BufferedReader(reader)) { + // + String line; + int currentLine = 0; + while ((line = br.readLine()) != null) { + currentLine++; + if (line.trim().isEmpty()) { + // skip empty lines + continue; + } else if (line.startsWith("#")) { + // skip lines with comments + continue; + } + // analyze line, accept first matching line + result = evaluateLine(line, currentLine); + if (result != null) { + break; + } + } + } + // + return result; + } + + // + private String evaluateLine(String fullLine, int currentLine) { + String line = fullLine; + String result = null; + // check match + if ((line = checkForPattern(line, hostname)) != null + && (line = checkForPattern(line, port)) != null + && (line = checkForPattern(line, database)) != null + && (line = checkForPattern(line, user)) != null) { + // use remaining line to get password + result = extractPassword(line); + String lineWithoutPassword = fullLine.substring(0, fullLine.length() - line.length()); + LOGGER.log(Level.FINE, "Matching line number [{0}] with value prefix [{1}] found for input [{2}:{3}:{4}:{5}]", + new Object[]{currentLine, lineWithoutPassword, hostname, port, database, user}); + } + // + return result; + } + + // + private String extractPassword(String line) { + StringBuilder sb = new StringBuilder(); + // take all characters up to separator (which is colon) + // remove escaping colon and backslash ("\\ -> \" ; "\: -> :") + // single backslash is not considered as error ("\a -> \a") + for (int i = 0; i < line.length(); i++) { + char chr = line.charAt(i); + if (chr == '\\' && (i + 1) < line.length()) { + char nextChr = line.charAt(i + 1); + if (nextChr == '\\' || nextChr == SEPARATOR) { + chr = nextChr; + i++; + } + } else if (chr == SEPARATOR) { + break; + } + sb.append(chr); + } + return sb.toString(); + } + + // + private String checkForPattern(String line, String value) { + String result = null; + if (line.startsWith("*:")) { + // any value match + result = line.substring(2); + } else { + int lPos = 0; + // Why not to split by separator (:) and compare by elements? + // Ipv6 makes in tricky. ipv6 may contain different number of colons. Also, to maintain compatibility with libpq. + // Compare beginning of line and value char by char. + // line may have escaped values, value does not have escaping + // line escaping is not mandatory. These are considered equal: "ab\cd:ef" == "ab\\cd\:ef" == "ab\cd\:ef" == "ab\\cd:ef" + for (int vPos = 0; vPos < value.length(); vPos++) { + if (lPos >= line.length()) { + return null; + } + char l = line.charAt(lPos); + if (l == '\\') { + if ((lPos + 1) >= line.length()) { + return null; + } + char next = line.charAt(lPos + 1); + if (next == '\\' || next == SEPARATOR) { + l = next; + lPos++; + } + } + lPos++; + char v = value.charAt(vPos); + if (l != v) { + return null; + } + } + if (line.charAt(lPos) == SEPARATOR) { + result = line.substring(lPos + 1); + } + } + return result; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java new file mode 100644 index 0000000..7bca1b1 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbcurlresolver; + +import java.net.URI; +import org.postgresql.PGEnvironment; +import org.postgresql.PGProperty; +import org.postgresql.util.OSUtil; +import org.postgresql.util.PGPropertyUtil; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Properties; +import java.util.Set; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +/** + * helps to read Connection Service File. + * https://www.postgresql.org/docs/current/libpq-pgservice.html + */ +public class PgServiceConfParser { + + private static final Logger LOGGER = Logger.getLogger(PgServiceConfParser.class.getName()); + private final String serviceName; + private boolean ignoreIfOpenFails = true; + + private PgServiceConfParser(String serviceName) { + this.serviceName = serviceName; + } + + /** + * Read pg_service.conf resource + * + * @param serviceName service name to search for + * @return key value pairs + */ + public static Properties getServiceProperties(String serviceName) { + PgServiceConfParser pgServiceConfParser = new PgServiceConfParser(serviceName); + return pgServiceConfParser.findServiceDescription(); + } + + private Properties findServiceDescription() { + String resourceName = findPgServiceConfResourceName(); + if (resourceName == null) { + return null; + } + // + Properties result = null; + try (InputStream inputStream = openInputStream(resourceName)) { + result = parseInputStream(inputStream); + } catch (IOException e) { + Level level = ignoreIfOpenFails ? Level.FINE : Level.WARNING; + LOGGER.log(level, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()}); + } + // + return result; + } + + // open URL or File + private InputStream openInputStream(String resourceName) throws IOException { + + try { + URL url = URI.create(resourceName).toURL(); + return url.openStream(); + } catch ( MalformedURLException ex ) { + // try file + File file = new File(resourceName); + return new FileInputStream(file); + } + } + + // choose resource where to search for service description + private String findPgServiceConfResourceName() { + // default file name + String pgServiceConfFileDefaultName = PGEnvironment.PGSERVICEFILE.getDefaultValue(); + + // if there is value, use it - 1st priority + { + String propertyName = PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(); + String resourceName = System.getProperty(propertyName); + if (resourceName != null && !resourceName.trim().isEmpty()) { + this.ignoreIfOpenFails = false; + LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]", + new Object[]{resourceName, propertyName}); + return resourceName; + } + } + + // if there is value, use it - 2nd priority + { + String envVariableName = PGEnvironment.PGSERVICEFILE.getName(); + String resourceName = System.getenv().get(envVariableName); + if (resourceName != null && !resourceName.trim().isEmpty()) { + this.ignoreIfOpenFails = false; + LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]", + new Object[]{resourceName, envVariableName}); + return resourceName; + } + } + + /* + if file in user home is readable, use it, otherwise continue - 3rd priority + in the case that the file is in the user home directory it is prepended with '.' + */ + { + String resourceName = "." + pgServiceConfFileDefaultName; + File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName); + if (resourceFile.canRead()) { + LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()}); + return resourceFile.getAbsolutePath(); + } + } + + // if there is value, use it - 4th priority + { + String envVariableName = PGEnvironment.PGSYSCONFDIR.getName(); + String pgSysconfDir = System.getenv().get(envVariableName); + if (pgSysconfDir != null && !pgSysconfDir.trim().isEmpty()) { + String resourceName = pgSysconfDir + File.separator + pgServiceConfFileDefaultName; + LOGGER.log(Level.FINE, "Value [{0}] selected using environment variable [{1}]", new Object[]{resourceName, envVariableName}); + return resourceName; + } + } + // otherwise null + LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgServiceConfFileDefaultName); + return null; + } + + /* + # Requirements for stream handling (have to match with libpq behaviour) + # + # space around line is removed + # Line: " host=my-host " + # equal to : "host=my-host" + # keys are case sensitive + # Line: "host=my-host" + # not equal to : "HOST=my-host" + # keys are limited with values described in enum PGEnvironment field name + # key is invalid: "my-host=my-host" + # unexpected keys produce error + # Example: "my-host=my-host" + # Example: "HOST=my-host" + # space before equal sign becomes part of key + # Line: "host =my-host" + # key equals: "host " + # space after equal sign becomes part of value + # Line: "host= my-host" + # key equals: " my-host" + # in case of duplicate section - first entry counts + # Line: "[service-one]" + # Line: "host=host-one" + # Line: "[service-two]" + # Line: "host=host-two" + # --> section-one is selected + # in case of duplicate key - first entry counts + # Line: "[service-one]" + # Line: "host=host-one" + # Line: "host=host-two" + # --> host-one is selected + # service name is case sensitive + # Line: "[service-one]" + # Line: "[service-ONE]" + # --> these are unique service names + # whatever is between brackets is considered as service name (including space) + # Line: "[ service-ONE]" + # Line: "[service-ONE ]" + # Line: "[service ONE]" + # --> these are unique service names + */ + private Properties parseInputStream(InputStream inputStream) throws IOException { + // build set of allowed keys + Set allowedServiceKeys = Arrays.stream(PGProperty.values()) + .map(PGProperty::getName) + .map(PGPropertyUtil::translatePGPropertyToPGService) + .collect(Collectors.toSet()); + + // + Properties result = new Properties(); + boolean isFound = false; + try ( + Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8); + BufferedReader br = new BufferedReader(reader)) { + // + String originalLine; + String line; + int lineNumber = 0; + while ((originalLine = br.readLine()) != null) { + lineNumber++; + // remove spaces around it + line = originalLine.trim(); + // skip if empty line or starts with comment sign + if (line.isEmpty() || line.startsWith("#")) { + continue; + } + // find first equal sign + int indexOfEqualSign = line.indexOf("="); + // is it section start? + if (line.startsWith("[") && line.endsWith("]")) { + // stop processing if section with correct name was found already + if (isFound) { + break; + } + // get name of section + String sectionName = line.substring(1, line.length() - 1); + // if match then mark it as section is found + if (serviceName.equals(sectionName)) { + isFound = true; + } + } else if (!isFound) { + // skip further processing until section is found + continue; + } else if (indexOfEqualSign > 1) { + // get key and value + String key = line.substring(0, indexOfEqualSign); + String value = line.substring(indexOfEqualSign + 1); + // check key against set of allowed keys + if (!allowedServiceKeys.contains(key)) { + // log list of allowed keys + String allowedValuesCommaSeparated = + allowedServiceKeys.stream().sorted().collect(Collectors.joining(",")); + LOGGER.log(Level.SEVERE, "Got invalid key: line number [{0}], value [{1}], allowed " + + "values [{2}]", + new Object[]{lineNumber, originalLine, allowedValuesCommaSeparated}); + // stop processing because of invalid key + return null; + } + // ignore line if value is missing + if (!value.isEmpty()) { + // ignore line having duplicate key, otherwise store key-value pair + result.putIfAbsent(PGPropertyUtil.translatePGServiceToPGProperty(key), value); + } + } else { + // if not equal sign then stop processing because of invalid syntax + LOGGER.log(Level.WARNING, "Not valid line: line number [{0}], value [{1}]", + new Object[]{lineNumber, originalLine}); + return null; + } + } + } + // null means failure - service is not found + return isFound ? result : null; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java new file mode 100644 index 0000000..15beab6 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.largeobject; + +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.util.GT; + +import java.io.IOException; +import java.io.InputStream; +import java.sql.SQLException; + +/** + * This is an implementation of an InputStream from a large object. + */ +@SuppressWarnings("try") +public class BlobInputStream extends InputStream { + static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024; + static final int INITIAL_BUFFER_SIZE = 64 * 1024; + + /** + * The parent LargeObject. + */ + private LargeObject lo; + private final ResourceLock lock = new ResourceLock(); + + /** + * The absolute position. + */ + private long absolutePosition; + + /** + * Buffer used to improve performance. + */ + private byte [] buffer; + + /** + * Position within buffer. + */ + private int bufferPosition; + + /** + * The amount of bytes to read on the next read. + * Currently, we nullify {@link #buffer}, so we can't use {@code buffer.length}. + */ + private int lastBufferSize; + + /** + * The buffer size. + */ + private final int maxBufferSize; + + /** + * The mark position. + */ + private long markPosition; + + /** + * The limit. + */ + private final long limit; + + /** + * @param lo LargeObject to read from + */ + public BlobInputStream(LargeObject lo) { + this(lo, DEFAULT_MAX_BUFFER_SIZE); + } + + /** + * @param lo LargeObject to read from + * @param bsize buffer size + */ + + public BlobInputStream(LargeObject lo, int bsize) { + this(lo, bsize, Long.MAX_VALUE); + } + + /** + * @param lo LargeObject to read from + * @param bsize buffer size + * @param limit max number of bytes to read + */ + public BlobInputStream(LargeObject lo, int bsize, long limit) { + this.lo = lo; + this.maxBufferSize = bsize; + // The very first read multiplies the last buffer size by two, so we divide by two to get + // the first read to be exactly the initial buffer size + this.lastBufferSize = INITIAL_BUFFER_SIZE / 2; + // Treat -1 as no limit for backward compatibility + this.limit = limit == -1 ? Long.MAX_VALUE : limit; + } + + /** + * The minimum required to implement input stream. + */ + @Override + public int read() throws IOException { + try (ResourceLock ignore = lock.obtain()) { + LargeObject lo = getLo(); + if (absolutePosition >= limit) { + buffer = null; + bufferPosition = 0; + return -1; + } + // read more in if necessary + if (buffer == null || bufferPosition >= buffer.length) { + // Don't hold the buffer while waiting for DB to respond + // Note: lo.read(...) does not support "fetching the response into the user-provided buffer" + // See https://github.com/pgjdbc/pgjdbc/issues/3043 + int nextBufferSize = getNextBufferSize(1); + buffer = lo.read(nextBufferSize); + bufferPosition = 0; + + if (buffer.length == 0) { + // The lob does not produce any more data, so we are at the end of the stream + return -1; + } + } + + int ret = buffer[bufferPosition] & 0xFF; + + bufferPosition++; + absolutePosition++; + if (bufferPosition >= buffer.length) { + // TODO: support buffer reuse in mark/reset + buffer = null; + bufferPosition = 0; + } + + return ret; + } catch (SQLException e) { + long loId = lo == null ? -1 : lo.getLongOID(); + throw new IOException( + GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}", + loId, absolutePosition, lastBufferSize), + e); + } + } + + /** + * Computes the next buffer size to use for reading data from the large object. + * The idea is to avoid allocating too much memory, especially if the user will use just a few + * bytes of the data. + * @param len estimated read request + * @return next buffer size or {@link #maxBufferSize} if the buffer should not be increased + */ + private int getNextBufferSize(int len) { + int nextBufferSize = Math.min(maxBufferSize, this.lastBufferSize * 2); + if (len > nextBufferSize) { + nextBufferSize = Math.min(maxBufferSize, Integer.highestOneBit(len * 2)); + } + this.lastBufferSize = nextBufferSize; + return nextBufferSize; + } + + @Override + public int read(byte[] dest, int off, int len) throws IOException { + if (len == 0) { + return 0; + } + try (ResourceLock ignore = lock.obtain()) { + int bytesCopied = 0; + LargeObject lo = getLo(); + + // Check to make sure we aren't at the limit. + if (absolutePosition >= limit) { + return -1; + } + + // Check to make sure we are not going to read past the limit + len = Math.min(len, (int) Math.min(limit - absolutePosition, Integer.MAX_VALUE)); + + // have we read anything into the buffer + if (buffer != null) { + // now figure out how much data is in the buffer + int bytesInBuffer = buffer.length - bufferPosition; + // figure out how many bytes the user wants + int bytesToCopy = Math.min(len, bytesInBuffer); + // copy them in + System.arraycopy(buffer, bufferPosition, dest, off, bytesToCopy); + // move the buffer position + bufferPosition += bytesToCopy; + if (bufferPosition >= buffer.length) { + // TODO: support buffer reuse in mark/reset + buffer = null; + bufferPosition = 0; + } + // position in the blob + absolutePosition += bytesToCopy; + // increment offset + off += bytesToCopy; + // decrement the length + len -= bytesToCopy; + bytesCopied = bytesToCopy; + } + + if (len > 0) { + int nextBufferSize = getNextBufferSize(len); + // We are going to read data past the existing buffer, so we release the memory + // before making a DB call + buffer = null; + bufferPosition = 0; + int bytesRead; + try { + if (len >= nextBufferSize) { + // Read directly into the user's buffer + bytesRead = lo.read(dest, off, len); + } else { + // Refill the buffer and copy from it + buffer = lo.read(nextBufferSize); + // Note that actual number of bytes read may be less than requested + bytesRead = Math.min(len, buffer.length); + System.arraycopy(buffer, 0, dest, off, bytesRead); + // If we at the end of the stream, and we just copied the last bytes, + // we can release the buffer + if (bytesRead == buffer.length) { + // TODO: if we want to reuse the buffer in mark/reset we should not release the + // buffer here + buffer = null; + bufferPosition = 0; + } else { + bufferPosition = bytesRead; + } + } + } catch (SQLException ex) { + throw new IOException( + GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}", + lo.getLongOID(), absolutePosition, len), + ex); + } + bytesCopied += bytesRead; + absolutePosition += bytesRead; + } + return bytesCopied == 0 ? -1 : bytesCopied; + } + } + + /** + *

Closes this input stream and releases any system resources associated with the stream.

+ * + *

The close method of InputStream does nothing.

+ * + * @throws IOException if an I/O error occurs. + */ + @Override + public void close() throws IOException { + long loId = 0; + try (ResourceLock ignore = lock.obtain()) { + LargeObject lo = this.lo; + if (lo != null) { + loId = lo.getLongOID(); + lo.close(); + } + this.lo = null; + } catch (SQLException e) { + throw new IOException( + GT.tr("Can not close large object {0}", + loId), + e); + } + } + + /** + *

Marks the current position in this input stream. A subsequent call to the reset + * method repositions this stream at the last marked position so that subsequent reads re-read the + * same bytes.

+ * + *

The readlimit arguments tells this input stream to allow that many bytes to be + * read before the mark position gets invalidated.

+ * + *

The general contract of mark is that, if the method markSupported + * returns true, the stream somehow remembers all the bytes read after the call to + * mark and stands ready to supply those same bytes again if and whenever the method + * reset is called. However, the stream is not required to remember any data at all + * if more than readlimit bytes are read from the stream before reset is + * called.

+ * + *

Marking a closed stream should not have any effect on the stream.

+ * + * @param readlimit the maximum limit of bytes that can be read before the mark position becomes + * invalid. + * @see java.io.InputStream#reset() + */ + @Override + public void mark(int readlimit) { + try (ResourceLock ignore = lock.obtain()) { + markPosition = absolutePosition; + } + } + + /** + * Repositions this stream to the position at the time the mark method was last + * called on this input stream. NB: If mark is not called we move to the beginning. + * + * @see java.io.InputStream#mark(int) + * @see java.io.IOException + */ + @Override + public void reset() throws IOException { + try (ResourceLock ignore = lock.obtain()) { + LargeObject lo = getLo(); + long loId = lo.getLongOID(); + try { + if (markPosition <= Integer.MAX_VALUE) { + lo.seek((int) markPosition); + } else { + lo.seek64(markPosition, LargeObject.SEEK_SET); + } + buffer = null; + absolutePosition = markPosition; + } catch (SQLException e) { + throw new IOException( + GT.tr("Can not reset stream for large object {0} to position {1}", + loId, markPosition), + e); + } + } + } + + /** + * Tests if this input stream supports the mark and reset methods. The + * markSupported method of InputStream returns false. + * + * @return true if this true type supports the mark and reset method; + * false otherwise. + * @see java.io.InputStream#mark(int) + * @see java.io.InputStream#reset() + */ + @Override + public boolean markSupported() { + return true; + } + + private LargeObject getLo() throws IOException { + if (lo == null) { + throw new IOException("BlobOutputStream is closed"); + } + return lo; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java new file mode 100644 index 0000000..2636ee4 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.largeobject; + +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.GT; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.sql.SQLException; + +/** + * This implements a basic output stream that writes to a LargeObject. + */ +@SuppressWarnings("try") +public class BlobOutputStream extends OutputStream { + static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024; + + /** + * The parent LargeObject. + */ + private LargeObject lo; + private final ResourceLock lock = new ResourceLock(); + + /** + * Buffer. + */ + private byte [] buf; + + /** + * Size of the buffer (default 1K). + */ + private final int maxBufferSize; + + /** + * Position within the buffer. + */ + private int bufferPosition; + + /** + * Create an OutputStream to a large object. + * + * @param lo LargeObject + */ + public BlobOutputStream(LargeObject lo) { + this(lo, DEFAULT_MAX_BUFFER_SIZE); + } + + /** + * Create an OutputStream to a large object. + * + * @param lo LargeObject + * @param bufferSize The size of the buffer for single-byte writes + */ + public BlobOutputStream(LargeObject lo, int bufferSize) { + this.lo = lo; + // Avoid "0" buffer size, and ensure the bufferSize will always be a power of two + this.maxBufferSize = Integer.highestOneBit(Math.max(bufferSize, 1)); + } + + /** + * Grows an internal buffer to ensure the extra bytes fit in the buffer. + * @param extraBytes the number of extra bytes that should fit in the buffer + * @return new buffer + */ + private byte[] growBuffer(int extraBytes) { + byte[] buf = this.buf; + if (buf != null && (buf.length == maxBufferSize || buf.length - bufferPosition >= extraBytes)) { + // Buffer is already large enough + return buf; + } + // We use power-of-two buffers, so they align nicely with PostgreSQL's LargeObject slicing + // By default PostgreSQL slices the data in 2KiB chunks + int newSize = Math.min(maxBufferSize, Integer.highestOneBit(bufferPosition + extraBytes) * 2); + byte[] newBuffer = new byte[newSize]; + if (buf != null && bufferPosition != 0) { + // There was some data in the old buffer, copy it over + System.arraycopy(buf, 0, newBuffer, 0, bufferPosition); + } + this.buf = newBuffer; + return newBuffer; + } + + @Override + public void write(int b) throws IOException { + long loId = 0; + try (ResourceLock ignore = lock.obtain()) { + LargeObject lo = checkClosed(); + loId = lo.getLongOID(); + byte[] buf = growBuffer(16); + if (bufferPosition >= buf.length) { + lo.write(buf); + bufferPosition = 0; + } + buf[bufferPosition++] = (byte) b; + } catch (SQLException e) { + throw new IOException( + GT.tr("Can not write data to large object {0}, requested write length: {1}", + loId, 1), + e); + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + long loId = 0; + try (ResourceLock ignore = lock.obtain()) { + LargeObject lo = checkClosed(); + loId = lo.getLongOID(); + byte[] buf = this.buf; + int totalData = bufferPosition + len; + // We have two parts of the data (it goes sequentially): + // 1) Data in buf at positions [0, bufferPosition) + // 2) Data in b at positions [off, off + len) + // If the new data fits into the buffer, we just copy it there. + // Otherwise, it might sound nice idea to just write them to the database, unfortunately, + // it is not optimal, as PostgreSQL chunks LargeObjects into 2KiB rows. + // That is why we would like to avoid writing a part of 2KiB chunk, and then issue overwrite + // causing DB to load and update the row. + // + // In fact, LOBLKSIZE is BLCKSZ/4, so users might have different values, so we use + // 8KiB write alignment for larger buffer sizes just in case. + // + // | buf[0] ... buf[bufferPosition] | b[off] ... b[off + len] | + // |<----------------- totalData ---------------------------->| + // If the total data does not align with 2048, we might have some remainder that we will + // copy to the beginning of the buffer and write later. + // The remainder can fall into either b (e.g. if the requested len is big enough): + // + // | buf[0] ... buf[bufferPosition] | b[off] ........ b[off + len] | + // |<----------------- totalData --------------------------------->| + // |<-------writeFromBuf----------->|<-writeFromB->|<--tailLength->| + // + // or + // buf (e.g. if the requested write len is small yet it does not fit into the max buffer size): + // | buf[0] .................... buf[bufferPosition] | b[off] .. b[off + len] | + // |<----------------- totalData -------------------------------------------->| + // |<-------writeFromBuf---------------->|<--------tailLength---------------->| + // "writeFromB" will be zero in that case + + // We want aligned writes, so the write requests chunk nicely into large object rows + int tailLength = + maxBufferSize >= 8192 ? totalData % 8192 : ( + maxBufferSize >= 2048 ? totalData % 2048 : 0 + ); + + if (totalData >= maxBufferSize) { + // The resulting data won't fit into the buffer, so we flush the data to the database + int writeFromBuffer = Math.min(bufferPosition, totalData - tailLength); + int writeFromB = Math.max(0, totalData - writeFromBuffer - tailLength); + if (buf == null || bufferPosition <= 0) { + // The buffer is empty, so we can write the data directly + lo.write(b, off, writeFromB); + } else { + if (writeFromB == 0) { + lo.write(buf, 0, writeFromBuffer); + } else { + lo.write( + ByteStreamWriter.of( + ByteBuffer.wrap(buf, 0, writeFromBuffer), + ByteBuffer.wrap(b, off, writeFromB))); + } + // There might be some data left in the buffer since we keep the tail + if (writeFromBuffer >= bufferPosition) { + // The buffer was fully written to the database + bufferPosition = 0; + } else { + // Copy the rest to the beginning + System.arraycopy(buf, writeFromBuffer, buf, 0, bufferPosition - writeFromBuffer); + bufferPosition -= writeFromBuffer; + } + } + len -= writeFromB; + off += writeFromB; + } + if (len > 0) { + buf = growBuffer(len); + System.arraycopy(b, off, buf, bufferPosition, len); + bufferPosition += len; + } + } catch (SQLException e) { + throw new IOException( + GT.tr("Can not write data to large object {0}, requested write length: {1}", + loId, len), + e); + } + } + + /** + * Flushes this output stream and forces any buffered output bytes to be written out. The general + * contract of flush is that calling it is an indication that, if any bytes + * previously written have been buffered by the implementation of the output stream, such bytes + * should immediately be written to their intended destination. + * + * @throws IOException if an I/O error occurs. + */ + @Override + public void flush() throws IOException { + long loId = 0; + try (ResourceLock ignore = lock.obtain()) { + LargeObject lo = checkClosed(); + loId = lo.getLongOID(); + byte[] buf = this.buf; + if (buf != null && bufferPosition > 0) { + lo.write(buf, 0, bufferPosition); + } + bufferPosition = 0; + } catch (SQLException e) { + throw new IOException( + GT.tr("Can not flush large object {0}", + loId), + e); + } + } + + @Override + public void close() throws IOException { + long loId = 0; + try (ResourceLock ignore = lock.obtain()) { + LargeObject lo = this.lo; + if (lo != null) { + loId = lo.getLongOID(); + flush(); + lo.close(); + this.lo = null; + } + } catch (SQLException e) { + throw new IOException( + GT.tr("Can not close large object {0}", + loId), + e); + } + } + + private LargeObject checkClosed() throws IOException { + if (lo == null) { + throw new IOException("BlobOutputStream is closed"); + } + return lo; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java new file mode 100644 index 0000000..f56812e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.largeobject; + +import org.postgresql.core.BaseConnection; +import org.postgresql.fastpath.Fastpath; +import org.postgresql.fastpath.FastpathArg; +import org.postgresql.util.ByteStreamWriter; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.SQLException; + +/** + *

This class provides the basic methods required to run the interface, plus a pair of methods that + * provide InputStream and OutputStream classes for this object.

+ * + *

Normally, client code would use the getAsciiStream, getBinaryStream, or getUnicodeStream methods + * in ResultSet, or setAsciiStream, setBinaryStream, or setUnicodeStream methods in + * PreparedStatement to access Large Objects.

+ * + *

However, sometimes lower level access to Large Objects are required, that are not supported by + * the JDBC specification.

+ * + *

Refer to org.postgresql.largeobject.LargeObjectManager on how to gain access to a Large Object, + * or how to create one.

+ * + * @see org.postgresql.largeobject.LargeObjectManager + * @see java.sql.ResultSet#getAsciiStream + * @see java.sql.ResultSet#getBinaryStream + * @see java.sql.ResultSet#getUnicodeStream + * @see java.sql.PreparedStatement#setAsciiStream + * @see java.sql.PreparedStatement#setBinaryStream + * @see java.sql.PreparedStatement#setUnicodeStream + */ +@SuppressWarnings("deprecation") // support for deprecated Fastpath API +public class LargeObject + implements AutoCloseable { + + /** + * Indicates a seek from the beginning of a file. + */ + public static final int SEEK_SET = 0; + + /** + * Indicates a seek from the current position. + */ + public static final int SEEK_CUR = 1; + + /** + * Indicates a seek from the end of a file. + */ + public static final int SEEK_END = 2; + + private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; + + private final Fastpath fp; // Fastpath API to use + private final long oid; // OID of this object + private final int mode; // read/write mode of this object + private final int fd; // the descriptor of the open large object + + private BlobOutputStream os; // The current output stream + + private boolean closed; // true when we are closed + + private BaseConnection conn; // Only initialized when open a LOB with CommitOnClose + private final boolean commitOnClose; // Only initialized when open a LOB with CommitOnClose + + /** + *

This opens a large object.

+ * + *

If the object does not exist, then an SQLException is thrown.

+ * + * @param fp FastPath API for the connection to use + * @param oid of the Large Object to open + * @param mode Mode of opening the large object + * @param conn the connection to the database used to access this LOB + * @param commitOnClose commit the transaction when this LOB will be closed (defined in + * LargeObjectManager) + * @throws SQLException if a database-access error occurs. + * @see org.postgresql.largeobject.LargeObjectManager + */ + protected LargeObject(Fastpath fp, long oid, int mode, + BaseConnection conn, boolean commitOnClose) + throws SQLException { + this.fp = fp; + this.oid = oid; + this.mode = mode; + if (commitOnClose) { + this.commitOnClose = true; + this.conn = conn; + } else { + this.commitOnClose = false; + } + + FastpathArg[] args = new FastpathArg[2]; + args[0] = Fastpath.createOIDArg(oid); + args[1] = new FastpathArg(mode); + this.fd = fp.getInteger("lo_open", args); + } + + /** + *

This opens a large object.

+ * + *

If the object does not exist, then an SQLException is thrown.

+ * + * @param fp FastPath API for the connection to use + * @param oid of the Large Object to open + * @param mode Mode of opening the large object (defined in LargeObjectManager) + * @throws SQLException if a database-access error occurs. + * @see org.postgresql.largeobject.LargeObjectManager + */ + protected LargeObject(Fastpath fp, long oid, int mode) throws SQLException { + this(fp, oid, mode, null, false); + } + + public LargeObject copy() throws SQLException { + return new LargeObject(fp, oid, mode); + } + + /* + * Release large object resources during garbage cleanup. + * + * This code used to call close() however that was problematic because the scope of the fd is a + * transaction, thus if commit or rollback was called before garbage collection ran then the call + * to close would error out with an invalid large object handle. So this method now does nothing + * and lets the server handle cleanup when it ends the transaction. + * + * protected void finalize() throws SQLException { } + */ + + /** + * @return the OID of this LargeObject + * @deprecated As of 8.3, replaced by {@link #getLongOID()} + */ + @Deprecated + public int getOID() { + return (int) oid; + } + + /** + * @return the OID of this LargeObject + */ + public long getLongOID() { + return oid; + } + + /** + * This method closes the object. You must not call methods in this object after this is called. + * + * @throws SQLException if a database-access error occurs. + */ + @Override + public void close() throws SQLException { + if (!closed) { + // flush any open output streams + if (os != null) { + try { + // we can't call os.close() otherwise we go into an infinite loop! + os.flush(); + } catch (IOException ioe) { + throw new PSQLException("Exception flushing output stream", PSQLState.DATA_ERROR, ioe); + } finally { + os = null; + } + } + + // finally close + FastpathArg[] args = new FastpathArg[1]; + args[0] = new FastpathArg(fd); + fp.fastpath("lo_close", args); // true here as we dont care!! + closed = true; + BaseConnection conn = this.conn; + if (this.commitOnClose && conn != null) { + conn.commit(); + } + } + } + + /** + * Reads some data from the object, and return as a byte[] array. + * + * @param len number of bytes to read + * @return byte[] array containing data read + * @throws SQLException if a database-access error occurs. + */ + public byte[] read(int len) throws SQLException { + // This is the original method, where the entire block (len bytes) + // is retrieved in one go. + FastpathArg[] args = new FastpathArg[2]; + args[0] = new FastpathArg(fd); + args[1] = new FastpathArg(len); + byte[] bytes = fp.getData("loread", args); + if (bytes == null) { + return EMPTY_BYTE_ARRAY; + } + return bytes; + } + + /** + * Reads some data from the object into an existing array. + * + * @param buf destination array + * @param off offset within array + * @param len number of bytes to read + * @return the number of bytes actually read + * @throws SQLException if a database-access error occurs. + */ + public int read(byte[] buf, int off, int len) throws SQLException { + byte[] b = read(len); + if (b.length == 0) { + return 0; + } + len = Math.min(len, b.length); + System.arraycopy(b, 0, buf, off, len); + return len; + } + + /** + * Writes an array to the object. + * + * @param buf array to write + * @throws SQLException if a database-access error occurs. + */ + public void write(byte[] buf) throws SQLException { + FastpathArg[] args = new FastpathArg[2]; + args[0] = new FastpathArg(fd); + args[1] = new FastpathArg(buf); + fp.fastpath("lowrite", args); + } + + /** + * Writes some data from an array to the object. + * + * @param buf destination array + * @param off offset within array + * @param len number of bytes to write + * @throws SQLException if a database-access error occurs. + */ + public void write(byte[] buf, int off, int len) throws SQLException { + FastpathArg[] args = new FastpathArg[2]; + args[0] = new FastpathArg(fd); + args[1] = new FastpathArg(buf, off, len); + fp.fastpath("lowrite", args); + } + + /** + * Writes some data from a given writer to the object. + * + * @param writer the source of the data to write + * @throws SQLException if a database-access error occurs. + */ + public void write(ByteStreamWriter writer) throws SQLException { + FastpathArg[] args = new FastpathArg[2]; + args[0] = new FastpathArg(fd); + args[1] = FastpathArg.of(writer); + fp.fastpath("lowrite", args); + } + + /** + *

Sets the current position within the object.

+ * + *

This is similar to the fseek() call in the standard C library. It allows you to have random + * access to the large object.

+ * + * @param pos position within object + * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END + * @throws SQLException if a database-access error occurs. + */ + public void seek(int pos, int ref) throws SQLException { + FastpathArg[] args = new FastpathArg[3]; + args[0] = new FastpathArg(fd); + args[1] = new FastpathArg(pos); + args[2] = new FastpathArg(ref); + fp.fastpath("lo_lseek", args); + } + + /** + * Sets the current position within the object using 64-bit value (9.3+). + * + * @param pos position within object + * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END + * @throws SQLException if a database-access error occurs. + */ + public void seek64(long pos, int ref) throws SQLException { + FastpathArg[] args = new FastpathArg[3]; + args[0] = new FastpathArg(fd); + args[1] = new FastpathArg(pos); + args[2] = new FastpathArg(ref); + fp.fastpath("lo_lseek64", args); + } + + /** + *

Sets the current position within the object.

+ * + *

This is similar to the fseek() call in the standard C library. It allows you to have random + * access to the large object.

+ * + * @param pos position within object from beginning + * @throws SQLException if a database-access error occurs. + */ + public void seek(int pos) throws SQLException { + seek(pos, SEEK_SET); + } + + /** + * @return the current position within the object + * @throws SQLException if a database-access error occurs. + */ + public int tell() throws SQLException { + FastpathArg[] args = new FastpathArg[1]; + args[0] = new FastpathArg(fd); + return fp.getInteger("lo_tell", args); + } + + /** + * @return the current position within the object + * @throws SQLException if a database-access error occurs. + */ + public long tell64() throws SQLException { + FastpathArg[] args = new FastpathArg[1]; + args[0] = new FastpathArg(fd); + return fp.getLong("lo_tell64", args); + } + + /** + *

This method is inefficient, as the only way to find out the size of the object is to seek to + * the end, record the current position, then return to the original position.

+ * + *

A better method will be found in the future.

+ * + * @return the size of the large object + * @throws SQLException if a database-access error occurs. + */ + public int size() throws SQLException { + int cp = tell(); + seek(0, SEEK_END); + int sz = tell(); + seek(cp, SEEK_SET); + return sz; + } + + /** + * See #size() for information about efficiency. + * + * @return the size of the large object + * @throws SQLException if a database-access error occurs. + */ + public long size64() throws SQLException { + long cp = tell64(); + seek64(0, SEEK_END); + long sz = tell64(); + seek64(cp, SEEK_SET); + return sz; + } + + /** + * Truncates the large object to the given length in bytes. If the number of bytes is larger than + * the current large object length, the large object will be filled with zero bytes. This method + * does not modify the current file offset. + * + * @param len given length in bytes + * @throws SQLException if something goes wrong + */ + public void truncate(int len) throws SQLException { + FastpathArg[] args = new FastpathArg[2]; + args[0] = new FastpathArg(fd); + args[1] = new FastpathArg(len); + fp.getInteger("lo_truncate", args); + } + + /** + * Truncates the large object to the given length in bytes. If the number of bytes is larger than + * the current large object length, the large object will be filled with zero bytes. This method + * does not modify the current file offset. + * + * @param len given length in bytes + * @throws SQLException if something goes wrong + */ + public void truncate64(long len) throws SQLException { + FastpathArg[] args = new FastpathArg[2]; + args[0] = new FastpathArg(fd); + args[1] = new FastpathArg(len); + fp.getInteger("lo_truncate64", args); + } + + /** + *

Returns an {@link InputStream} from this object.

+ * + *

This {@link InputStream} can then be used in any method that requires an InputStream.

+ * + * @return {@link InputStream} from this object + * @throws SQLException if a database-access error occurs. + */ + public InputStream getInputStream() throws SQLException { + return new BlobInputStream(this); + } + + /** + * Returns an {@link InputStream} from this object, that will limit the amount of data that is + * visible. + * + * @param limit maximum number of bytes the resulting stream will serve + * @return {@link InputStream} from this object + * @throws SQLException if a database-access error occurs. + */ + public InputStream getInputStream(long limit) throws SQLException { + return new BlobInputStream(this, BlobInputStream.DEFAULT_MAX_BUFFER_SIZE, limit); + } + + /** + * Returns an {@link InputStream} from this object, that will limit the amount of data that is + * visible. + * Added mostly for testing + * + * @param bufferSize buffer size for the stream + * @param limit maximum number of bytes the resulting stream will serve + * @return {@link InputStream} from this object + * @throws SQLException if a database-access error occurs. + */ + public InputStream getInputStream(int bufferSize, long limit) throws SQLException { + return new BlobInputStream(this, bufferSize, limit); + } + + /** + *

Returns an {@link OutputStream} to this object.

+ * + *

This OutputStream can then be used in any method that requires an OutputStream.

+ * + * @return {@link OutputStream} from this object + * @throws SQLException if a database-access error occurs. + */ + public OutputStream getOutputStream() throws SQLException { + if (os == null) { + os = new BlobOutputStream(this); + } + return os; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java new file mode 100644 index 0000000..12efe14 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.largeobject; + +import org.postgresql.core.BaseConnection; +import org.postgresql.fastpath.Fastpath; +import org.postgresql.fastpath.FastpathArg; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.logging.Level; + +/** + * This class implements the large object interface to org.postgresql. + * + *

It provides methods that allow client code to create, open and delete large objects from the + * database. When opening an object, an instance of org.postgresql.largeobject.LargeObject is + * returned, and its methods then allow access to the object.

+ * + *

This class can only be created by {@link BaseConnection}

+ * + *

To get access to this class, use the following segment of code:

+ * + *
+ * import org.postgresql.largeobject.*;
+ *
+ * Connection  conn;
+ * LargeObjectManager lobj;
+ *
+ * ... code that opens a connection ...
+ *
+ * lobj = ((org.postgresql.PGConnection)myconn).getLargeObjectAPI();
+ * 
+ * + *

Normally, client code would use the getAsciiStream, getBinaryStream, or getUnicodeStream methods + * in ResultSet, or setAsciiStream, setBinaryStream, or setUnicodeStream methods in + * PreparedStatement to access Large Objects.

+ * + *

However, sometimes lower level access to Large Objects are required, that are not supported by + * the JDBC specification.

+ * + *

Refer to org.postgresql.largeobject.LargeObject on how to manipulate the contents of a Large + * Object.

+ * + * @see java.sql.ResultSet#getAsciiStream + * @see java.sql.ResultSet#getBinaryStream + * @see java.sql.ResultSet#getUnicodeStream + * @see java.sql.PreparedStatement#setAsciiStream + * @see java.sql.PreparedStatement#setBinaryStream + * @see java.sql.PreparedStatement#setUnicodeStream + */ +@SuppressWarnings("deprecation") // support for deprecated Fastpath API +public class LargeObjectManager { + // the fastpath api for this connection + private Fastpath fp; + private BaseConnection conn; + + /** + * This mode indicates we want to write to an object. + */ + public static final int WRITE = 0x00020000; + + /** + * This mode indicates we want to read an object. + */ + public static final int READ = 0x00040000; + + /** + * This mode is the default. It indicates we want read and write access to a large object. + */ + public static final int READWRITE = READ | WRITE; + + /** + *

Constructs the LargeObject API.

+ * + *

Important Notice
+ * This method should only be called by {@link BaseConnection}

+ * + *

There should only be one LargeObjectManager per Connection. The {@link BaseConnection} class + * keeps track of the various extension API's and it's advised you use those to gain access, and + * not going direct.

+ * + * @param conn connection + * @throws SQLException if something wrong happens + */ + public LargeObjectManager(BaseConnection conn) throws SQLException { + this.conn = conn; + // We need Fastpath to do anything + this.fp = conn.getFastpathAPI(); + + // Now get the function oid's for the api + // + // This is an example of Fastpath.addFunctions(); + // + String sql; + if (conn.getMetaData().supportsSchemasInTableDefinitions()) { + sql = "SELECT p.proname,p.oid " + + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n " + + " WHERE p.pronamespace=n.oid AND n.nspname='pg_catalog' AND ("; + } else { + sql = "SELECT proname,oid FROM pg_proc WHERE "; + } + sql += " proname = 'lo_open'" + + " or proname = 'lo_close'" + + " or proname = 'lo_creat'" + + " or proname = 'lo_unlink'" + + " or proname = 'lo_lseek'" + + " or proname = 'lo_lseek64'" + + " or proname = 'lo_tell'" + + " or proname = 'lo_tell64'" + + " or proname = 'loread'" + + " or proname = 'lowrite'" + + " or proname = 'lo_truncate'" + + " or proname = 'lo_truncate64'"; + + if (conn.getMetaData().supportsSchemasInTableDefinitions()) { + sql += ")"; + } + + Statement stmt = conn.createStatement(); + ResultSet res = stmt.executeQuery(sql); + + fp.addFunctions(res); + res.close(); + stmt.close(); + + conn.getLogger().log(Level.FINE, "Large Object initialised"); + } + + /** + * This opens an existing large object, based on its OID. This method assumes that READ and WRITE + * access is required (the default). + * + * @param oid of large object + * @return LargeObject instance providing access to the object + * @throws SQLException on error + * @deprecated As of 8.3, replaced by {@link #open(long)} + */ + @Deprecated + public LargeObject open(int oid) throws SQLException { + return open((long) oid, false); + } + + /** + * This opens an existing large object, same as previous method, but commits the transaction on + * close if asked. This is useful when the LOB is returned to a caller which won't take care of + * transactions by itself. + * + * @param oid of large object + * @param commitOnClose commit the transaction when this LOB will be closed + * @return LargeObject instance providing access to the object + * @throws SQLException on error + */ + + public LargeObject open(int oid, boolean commitOnClose) throws SQLException { + return open((long) oid, commitOnClose); + } + + /** + * This opens an existing large object, based on its OID. This method assumes that READ and WRITE + * access is required (the default). + * + * @param oid of large object + * @return LargeObject instance providing access to the object + * @throws SQLException on error + */ + public LargeObject open(long oid) throws SQLException { + return open(oid, READWRITE, false); + } + + /** + * This opens an existing large object, same as previous method, but commits the transaction on + * close if asked. + * + * @param oid of large object + * @param commitOnClose commit the transaction when this LOB will be closed + * @return LargeObject instance providing access to the object + * @throws SQLException on error + */ + + public LargeObject open(long oid, boolean commitOnClose) throws SQLException { + return open(oid, READWRITE, commitOnClose); + } + + /** + * This opens an existing large object, based on its OID. + * + * @param oid of large object + * @param mode mode of open + * @return LargeObject instance providing access to the object + * @throws SQLException on error + * @deprecated As of 8.3, replaced by {@link #open(long, int)} + */ + @Deprecated + public LargeObject open(int oid, int mode) throws SQLException { + return open((long) oid, mode, false); + } + + /** + * This opens an existing large object, same as previous method, but commits the transaction on + * close if asked. + * + * @param oid of large object + * @param mode mode of open + * @param commitOnClose commit the transaction when this LOB will be closed + * @return LargeObject instance providing access to the object + * @throws SQLException on error + */ + + public LargeObject open(int oid, int mode, boolean commitOnClose) throws SQLException { + return open((long) oid, mode, commitOnClose); + } + + /** + * This opens an existing large object, based on its OID. + * + * @param oid of large object + * @param mode mode of open + * @return LargeObject instance providing access to the object + * @throws SQLException on error + */ + public LargeObject open(long oid, int mode) throws SQLException { + return open(oid, mode, false); + } + + /** + * This opens an existing large object, based on its OID. + * + * @param oid of large object + * @param mode mode of open + * @param commitOnClose commit the transaction when this LOB will be closed + * @return LargeObject instance providing access to the object + * @throws SQLException on error + */ + public LargeObject open(long oid, int mode, boolean commitOnClose) throws SQLException { + if (conn.getAutoCommit()) { + throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."), + PSQLState.NO_ACTIVE_SQL_TRANSACTION); + } + return new LargeObject(fp, oid, mode, conn, commitOnClose); + } + + /** + *

This creates a large object, returning its OID.

+ * + *

It defaults to READWRITE for the new object's attributes.

+ * + * @return oid of new object + * @throws SQLException on error + * @deprecated As of 8.3, replaced by {@link #createLO()} + */ + @Deprecated + public int create() throws SQLException { + return create(READWRITE); + } + + /** + *

This creates a large object, returning its OID.

+ * + *

It defaults to READWRITE for the new object's attributes.

+ * + * @return oid of new object + * @throws SQLException if something wrong happens + */ + public long createLO() throws SQLException { + return createLO(READWRITE); + } + + /** + * This creates a large object, returning its OID. + * + * @param mode a bitmask describing different attributes of the new object + * @return oid of new object + * @throws SQLException on error + */ + public long createLO(int mode) throws SQLException { + if (conn.getAutoCommit()) { + throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."), + PSQLState.NO_ACTIVE_SQL_TRANSACTION); + } + FastpathArg[] args = new FastpathArg[1]; + args[0] = new FastpathArg(mode); + return fp.getOID("lo_creat", args); + } + + /** + * This creates a large object, returning its OID. + * + * @param mode a bitmask describing different attributes of the new object + * @return oid of new object + * @throws SQLException on error + * @deprecated As of 8.3, replaced by {@link #createLO(int)} + */ + @Deprecated + public int create(int mode) throws SQLException { + long oid = createLO(mode); + return (int) oid; + } + + /** + * This deletes a large object. + * + * @param oid describing object to delete + * @throws SQLException on error + */ + public void delete(long oid) throws SQLException { + FastpathArg[] args = new FastpathArg[1]; + args[0] = Fastpath.createOIDArg(oid); + fp.fastpath("lo_unlink", args); + } + + /** + *

This deletes a large object.

+ * + *

It is identical to the delete method, and is supplied as the C API uses unlink.

+ * + * @param oid describing object to delete + * @throws SQLException on error + * @deprecated As of 8.3, replaced by {@link #unlink(long)} + */ + @Deprecated + public void unlink(int oid) throws SQLException { + delete((long) oid); + } + + /** + *

This deletes a large object.

+ * + *

It is identical to the delete method, and is supplied as the C API uses unlink.

+ * + * @param oid describing object to delete + * @throws SQLException on error + */ + public void unlink(long oid) throws SQLException { + delete(oid); + } + + /** + * This deletes a large object. + * + * @param oid describing object to delete + * @throws SQLException on error + * @deprecated As of 8.3, replaced by {@link #delete(long)} + */ + @Deprecated + public void delete(int oid) throws SQLException { + delete((long) oid); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java new file mode 100644 index 0000000..3734cbe --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.plugin; + +import org.postgresql.util.PSQLException; + +public interface AuthenticationPlugin { + + /** + * Callback method to provide the password to use for authentication. + * + *

Implementers can also check the authentication type to ensure that the + * authentication handshake is using a specific authentication method (e.g. SASL) + * or avoiding a specific one (e.g. cleartext).

+ * + *

For security reasons, the driver will wipe the contents of the array returned + * by this method after it has been used for authentication.

+ * + *

Implementers must provide a new array each time this method is invoked as + * the previous contents will have been wiped.

+ * + * @param type The authentication method that the server is requesting + * @return The password to use or null if no password is available + * @throws PSQLException if something goes wrong supplying the password + */ + char [] getPassword(AuthenticationRequestType type) throws PSQLException; + +} diff --git a/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationRequestType.java b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationRequestType.java new file mode 100644 index 0000000..f62bb11 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationRequestType.java @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.plugin; + +public enum AuthenticationRequestType { + CLEARTEXT_PASSWORD, + GSS, + MD5_PASSWORD, + SASL, +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java b/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java new file mode 100644 index 0000000..1886a7d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import java.nio.ByteBuffer; + +/** + * LSN (Log Sequence Number) data which is a pointer to a location in the XLOG. + */ +public final class LogSequenceNumber implements Comparable { + /** + * Zero is used indicate an invalid pointer. Bootstrap skips the first possible WAL segment, + * initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG record can begin at zero. + */ + public static final LogSequenceNumber INVALID_LSN = LogSequenceNumber.valueOf(0); + + private final long value; + + private LogSequenceNumber(long value) { + this.value = value; + } + + /** + * @param value numeric represent position in the write-ahead log stream + * @return not null LSN instance + */ + public static LogSequenceNumber valueOf(long value) { + return new LogSequenceNumber(value); + } + + /** + * Create LSN instance by string represent LSN. + * + * @param strValue not null string as two hexadecimal numbers of up to 8 digits each, separated by + * a slash. For example {@code 16/3002D50}, {@code 0/15D68C50} + * @return not null LSN instance where if specified string represent have not valid form {@link + * LogSequenceNumber#INVALID_LSN} + */ + public static LogSequenceNumber valueOf(String strValue) { + int slashIndex = strValue.lastIndexOf('/'); + + if (slashIndex <= 0) { + return INVALID_LSN; + } + + String logicalXLogStr = strValue.substring(0, slashIndex); + int logicalXlog = (int) Long.parseLong(logicalXLogStr, 16); + String segmentStr = strValue.substring(slashIndex + 1, strValue.length()); + int segment = (int) Long.parseLong(segmentStr, 16); + + ByteBuffer buf = ByteBuffer.allocate(8); + buf.putInt(logicalXlog); + buf.putInt(segment); + buf.position(0); + long value = buf.getLong(); + + return LogSequenceNumber.valueOf(value); + } + + /** + * @return Long represent position in the write-ahead log stream + */ + public long asLong() { + return value; + } + + /** + * @return String represent position in the write-ahead log stream as two hexadecimal numbers of + * up to 8 digits each, separated by a slash. For example {@code 16/3002D50}, {@code 0/15D68C50} + */ + public String asString() { + ByteBuffer buf = ByteBuffer.allocate(8); + buf.putLong(value); + buf.position(0); + + int logicalXlog = buf.getInt(); + int segment = buf.getInt(); + return String.format("%X/%X", logicalXlog, segment); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + LogSequenceNumber that = (LogSequenceNumber) o; + + return value == that.value; + + } + + @Override + public int hashCode() { + return (int) (value ^ (value >>> 32)); + } + + @Override + public String toString() { + return "LSN{" + asString() + '}'; + } + + @Override + public int compareTo(LogSequenceNumber o) { + if (value == o.value) { + return 0; + } + //Unsigned comparison + return value + Long.MIN_VALUE < o.value + Long.MIN_VALUE ? -1 : 1; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java new file mode 100644 index 0000000..6148f49 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import org.postgresql.PGProperty; +import org.postgresql.replication.fluent.ChainedCreateReplicationSlotBuilder; +import org.postgresql.replication.fluent.ChainedStreamBuilder; + +import java.sql.SQLException; + +/** + * Api available only if connection was create with required for replication properties: {@link + * PGProperty#REPLICATION} and {@link PGProperty#ASSUME_MIN_SERVER_VERSION}. Without it property + * building replication stream fail with exception. + */ +public interface PGReplicationConnection { + + /** + * After start replication stream this connection not available to use for another queries until + * replication stream will not close. + * + * @return not null fluent api for build replication stream + */ + ChainedStreamBuilder replicationStream(); + + /** + *

Create replication slot, that can be next use in {@link PGReplicationConnection#replicationStream()}

+ * + *

Replication slots provide an automated way to ensure that the master does not remove WAL + * segments until they have been received by all standbys, and that the master does not remove + * rows which could cause a recovery conflict even when the standby is disconnected.

+ * + * @return not null fluent api for build create replication slot + */ + ChainedCreateReplicationSlotBuilder createReplicationSlot(); + + /** + * @param slotName not null replication slot name exists in database that should be drop + * @throws SQLException if the replication slot cannot be dropped. + */ + void dropReplicationSlot(String slotName) throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java new file mode 100644 index 0000000..350526e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import org.postgresql.core.BaseConnection; +import org.postgresql.replication.fluent.ChainedCreateReplicationSlotBuilder; +import org.postgresql.replication.fluent.ChainedStreamBuilder; +import org.postgresql.replication.fluent.ReplicationCreateSlotBuilder; +import org.postgresql.replication.fluent.ReplicationStreamBuilder; + +import java.sql.SQLException; +import java.sql.Statement; + +public class PGReplicationConnectionImpl implements PGReplicationConnection { + private final BaseConnection connection; + + public PGReplicationConnectionImpl(BaseConnection connection) { + this.connection = connection; + } + + @Override + public ChainedStreamBuilder replicationStream() { + return new ReplicationStreamBuilder(connection); + } + + @Override + public ChainedCreateReplicationSlotBuilder createReplicationSlot() { + return new ReplicationCreateSlotBuilder(connection); + } + + @Override + public void dropReplicationSlot(String slotName) throws SQLException { + if (slotName == null || slotName.isEmpty()) { + throw new IllegalArgumentException("Replication slot name can't be null or empty"); + } + + Statement statement = connection.createStatement(); + try { + statement.execute("DROP_REPLICATION_SLOT " + slotName); + } finally { + statement.close(); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java new file mode 100644 index 0000000..cbd06f2 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import org.postgresql.replication.fluent.CommonOptions; +import org.postgresql.replication.fluent.logical.LogicalReplicationOptions; + +import java.nio.ByteBuffer; +import java.sql.SQLException; + +/** + * Not tread safe replication stream (though certain methods can be safely called by different + * threads). After complete streaming should be close, for free resource on backend. Periodical + * status update work only when use {@link PGReplicationStream#read()} method. It means that + * process wal record should be fast as possible, because during process wal record lead to + * disconnect by timeout from server. + */ +public interface PGReplicationStream + extends AutoCloseable { + + /** + *

Read next wal record from backend. It method can be block until new message will not get + * from server.

+ * + *

A single WAL record is never split across two XLogData messages. When a WAL record crosses a + * WAL page boundary, and is therefore already split using continuation records, it can be split + * at the page boundary. In other words, the first main WAL record and its continuation records + * can be sent in different XLogData messages.

+ * + * @return not null byte array received by replication protocol, return ByteBuffer wrap around + * received byte array with use offset, so, use {@link ByteBuffer#array()} carefully + * @throws SQLException when some internal exception occurs during read from stream + */ + ByteBuffer read() throws SQLException; + + /** + *

Read next WAL record from backend. This method does not block and in contrast to {@link + * PGReplicationStream#read()}. If message from backend absent return null. It allow periodically + * check message in stream and if they absent sleep some time, but it time should be less than + * {@link CommonOptions#getStatusInterval()} to avoid disconnect from the server.

+ * + *

A single WAL record is never split across two XLogData messages. When a WAL record crosses a + * WAL page boundary, and is therefore already split using continuation records, it can be split + * at the page boundary. In other words, the first main WAL record and its continuation records + * can be sent in different XLogData messages.

+ * + * @return byte array received by replication protocol or NULL if pending message from server + * absent. Returns ByteBuffer wrap around received byte array with use offset, so, use {@link + * ByteBuffer#array()} carefully. + * @throws SQLException when some internal exception occurs during read from stream + */ + ByteBuffer readPending() throws SQLException; + + /** + *

Parameter updates by execute {@link PGReplicationStream#read()} method.

+ * + *

It is safe to call this method in a thread different than the main thread. However, usually this + * method is called in the main thread after a successful {@link PGReplicationStream#read()} or + * {@link PGReplicationStream#readPending()}, to get the LSN corresponding to the received record.

+ * + * @return NOT NULL LSN position that was receive last time via {@link PGReplicationStream#read()} + * method + */ + LogSequenceNumber getLastReceiveLSN(); + + /** + *

Last flushed LSN sent in update message to backend. Parameter updates only via {@link + * PGReplicationStream#setFlushedLSN(LogSequenceNumber)}

+ * + *

It is safe to call this method in a thread different than the main thread.

+ * + * @return NOT NULL location of the last WAL flushed to disk in the standby. + */ + LogSequenceNumber getLastFlushedLSN(); + + /** + *

Last applied lsn sent in update message to backed. Parameter updates only via {@link + * PGReplicationStream#setAppliedLSN(LogSequenceNumber)}

+ * + *

It is safe to call this method in a thread different than the main thread.

+ * + * @return not null location of the last WAL applied in the standby. + */ + LogSequenceNumber getLastAppliedLSN(); + + /** + *

Set flushed LSN. This parameter will be sent to backend on next update status iteration. Flushed + * LSN position help backend define which WAL can be recycled.

+ * + *

It is safe to call this method in a thread different than the main thread. The updated value + * will be sent to the backend in the next status update run.

+ * + * @param flushed NOT NULL location of the last WAL flushed to disk in the standby. + * @see PGReplicationStream#forceUpdateStatus() + */ + void setFlushedLSN(LogSequenceNumber flushed); + + /** + *

Inform backend which LSN has been applied on standby. + * Feedback will send to backend on next update status iteration.

+ * + *

It is safe to call this method in a thread different than the main thread. The updated value + * will be sent to the backend in the next status update run.

+ * + * @param applied NOT NULL location of the last WAL applied in the standby. + * @see PGReplicationStream#forceUpdateStatus() + */ + void setAppliedLSN(LogSequenceNumber applied); + + /** + * Force send last received, flushed and applied LSN status to backend. You cannot send LSN status + * explicitly because {@link PGReplicationStream} sends the status to backend periodically by + * configured interval via {@link LogicalReplicationOptions#getStatusInterval} + * + * @throws SQLException when some internal exception occurs during read from stream + * @see LogicalReplicationOptions#getStatusInterval() + */ + void forceUpdateStatus() throws SQLException; + + /** + * @return {@code true} if replication stream was already close, otherwise return {@code false} + */ + boolean isClosed(); + + /** + *

Stop replication changes from server and free resources. After that connection can be reuse + * to another queries. Also after close current stream they cannot be used anymore.

+ * + *

Note: This method can spend much time for logical replication stream on postgresql + * version 9.6 and lower, because postgresql have bug - during decode big transaction to logical + * form and during wait new changes postgresql ignore messages from client. As workaround you can + * close replication connection instead of close replication stream. For more information about it + * problem see mailing list thread + * Stopping logical replication protocol

+ * + * @throws SQLException when some internal exception occurs during end streaming + */ + @Override + void close() throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java new file mode 100644 index 0000000..8c904b3 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +/** + * Information returned on replication slot creation. + * + *

Returned keys of CREATE_REPLICATION_SLOT: + *

    + *
  1. slot_name String {@code =>} the slot name + *
  2. consistent_point String {@code =>} LSN at which we became consistent + *
  3. snapshot_name String {@code =>} exported snapshot's name (may be null) + *
  4. output_plugin String {@code =>} output plugin (may be null) + *
+ * + * @see CREATE_REPLICATION_SLOT documentation + */ +public final class ReplicationSlotInfo { + + private final String slotName; + private final ReplicationType replicationType; + private final LogSequenceNumber consistentPoint; + private final String snapshotName; + private final String outputPlugin; + + public ReplicationSlotInfo(String slotName, ReplicationType replicationType, + LogSequenceNumber consistentPoint, String snapshotName, + String outputPlugin) { + this.slotName = slotName; + this.replicationType = replicationType; + this.consistentPoint = consistentPoint; + this.snapshotName = snapshotName; + this.outputPlugin = outputPlugin; + } + + /** + * Replication slot name. + * + * @return the slot name + */ + public String getSlotName() { + return slotName; + } + + /** + * Replication type of the slot created, might be PHYSICAL or LOGICAL. + * + * @return ReplicationType, PHYSICAL or LOGICAL + */ + public ReplicationType getReplicationType() { + return replicationType; + } + + /** + * LSN at which we became consistent. + * + * @return LogSequenceNumber with the consistent_point + */ + public LogSequenceNumber getConsistentPoint() { + return consistentPoint; + } + + /** + * Exported snapshot name at the point of replication slot creation. + * + *

As long as the exporting transaction remains open, other transactions can import its snapshot, + * and thereby be guaranteed that they see exactly the same view of the database that the first + * transaction sees. + * + * @return exported snapshot_name (may be null) + */ + public String getSnapshotName() { + return snapshotName; + } + + /** + * Output Plugin used on slot creation. + * + * @return output_plugin (may be null) + */ + public String getOutputPlugin() { + return outputPlugin; + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java new file mode 100644 index 0000000..ab93bfd --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +public enum ReplicationType { + LOGICAL, + PHYSICAL +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java new file mode 100644 index 0000000..807400f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.util.GT; + +import java.sql.SQLFeatureNotSupportedException; + +public abstract class AbstractCreateSlotBuilder> + implements ChainedCommonCreateSlotBuilder { + + protected String slotName; + protected boolean temporaryOption; + protected BaseConnection connection; + + protected AbstractCreateSlotBuilder(BaseConnection connection) { + this.connection = connection; + } + + protected abstract T self(); + + @Override + public T withSlotName(String slotName) { + this.slotName = slotName; + return self(); + } + + @Override + public T withTemporaryOption() throws SQLFeatureNotSupportedException { + + if (!connection.haveMinimumServerVersion(ServerVersion.v10)) { + throw new SQLFeatureNotSupportedException( + GT.tr("Server does not support temporary replication slots") + ); + } + + this.temporaryOption = true; + return self(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java new file mode 100644 index 0000000..8f08bba --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.replication.LogSequenceNumber; + +import java.util.concurrent.TimeUnit; + +public abstract class AbstractStreamBuilder> + implements ChainedCommonStreamBuilder { + private static final int DEFAULT_STATUS_INTERVAL = (int) TimeUnit.SECONDS.toMillis(10L); + protected int statusIntervalMs = DEFAULT_STATUS_INTERVAL; + protected LogSequenceNumber startPosition = LogSequenceNumber.INVALID_LSN; + protected String slotName; + + public AbstractStreamBuilder() { + } + + protected abstract T self(); + + @Override + public T withStatusInterval(int time, TimeUnit format) { + statusIntervalMs = (int) TimeUnit.MILLISECONDS.convert(time, format); + return self(); + } + + @Override + public T withStartPosition(LogSequenceNumber lsn) { + this.startPosition = lsn; + return self(); + } + + @Override + public T withSlotName(String slotName) { + this.slotName = slotName; + return self(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java new file mode 100644 index 0000000..4114bef --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.replication.ReplicationSlotInfo; + +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; + +/** + * Fluent interface for specify common parameters for create Logical and Physical replication slot. + */ +public interface ChainedCommonCreateSlotBuilder> { + + /** + * Replication slots provide an automated way to ensure that the master does not remove WAL + * segments until they have been received by all standbys, and that the master does not remove + * rows which could cause a recovery conflict even when the standby is disconnected. + * + * @param slotName not null unique replication slot name for create. + * @return T a slot builder + */ + T withSlotName(String slotName); + + /** + *

Temporary slots are not saved to disk and are automatically dropped on error or when + * the session has finished.

+ * + *

This feature is only supported by PostgreSQL versions >= 10.

+ * + * @return T a slot builder + * @throws SQLFeatureNotSupportedException thrown if PostgreSQL version is less than 10. + */ + T withTemporaryOption() throws SQLFeatureNotSupportedException; + + /** + * Create slot with specified parameters in database. + * + * @return ReplicationSlotInfo with the information of the created slot. + * @throws SQLException on error + */ + ReplicationSlotInfo make() throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java new file mode 100644 index 0000000..2a41246 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.replication.LogSequenceNumber; + +import java.util.concurrent.TimeUnit; + +/** + * Fluent interface for specify common parameters for Logical and Physical replication. + */ +public interface ChainedCommonStreamBuilder> { + + /** + * Replication slots provide an automated way to ensure that the master does not remove WAL + * segments until they have been received by all standbys, and that the master does not remove + * rows which could cause a recovery conflict even when the standby is disconnected. + * + * @param slotName not null replication slot already exists on server. + * @return this instance as a fluent interface + */ + T withSlotName(String slotName); + + /** + * Specifies the number of time between status packets sent back to the server. This allows for + * easier monitoring of the progress from server. A value of zero disables the periodic status + * updates completely, although an update will still be sent when requested by the server, to + * avoid timeout disconnect. The default value is 10 seconds. + * + * @param time positive time + * @param format format for specified time + * @return not null fluent + */ + T withStatusInterval(int time, TimeUnit format); + + /** + * Specify start position from which backend will start stream changes. If parameter will not + * specify, streaming starts from restart_lsn. For more details see pg_replication_slots + * description. + * + * @param lsn not null position from which need start replicate changes + * @return not null fluent + */ + T withStartPosition(LogSequenceNumber lsn); +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java new file mode 100644 index 0000000..36e2f0b --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.replication.fluent.logical.ChainedLogicalCreateSlotBuilder; +import org.postgresql.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder; + +/** + * Fluent interface for specify common parameters for Logical and Physical replication. + */ +public interface ChainedCreateReplicationSlotBuilder { + /** + * Get the logical slot builder. + * Example usage: + *
+   *   {@code
+   *
+   *    pgConnection
+   *        .getReplicationAPI()
+   *        .createReplicationSlot()
+   *        .logical()
+   *        .withSlotName("mySlot")
+   *        .withOutputPlugin("test_decoding")
+   *        .make();
+   *
+   *    PGReplicationStream stream =
+   *        pgConnection
+   *            .getReplicationAPI()
+   *            .replicationStream()
+   *            .logical()
+   *            .withSlotName("mySlot")
+   *            .withSlotOption("include-xids", false)
+   *            .withSlotOption("skip-empty-xacts", true)
+   *            .start();
+   *
+   *    while (true) {
+   *      ByteBuffer buffer = stream.read();
+   *      //process logical changes
+   *    }
+   *
+   *   }
+   * 
+ * @return not null fluent api + */ + ChainedLogicalCreateSlotBuilder logical(); + + /** + *

Create physical replication stream for process wal logs in binary form.

+ * + *

Example usage:

+ *
+   *   {@code
+   *
+   *    pgConnection
+   *        .getReplicationAPI()
+   *        .createReplicationSlot()
+   *        .physical()
+   *        .withSlotName("mySlot")
+   *        .make();
+   *
+   *    PGReplicationStream stream =
+   *        pgConnection
+   *            .getReplicationAPI()
+   *            .replicationStream()
+   *            .physical()
+   *            .withSlotName("mySlot")
+   *            .start();
+   *
+   *    while (true) {
+   *      ByteBuffer buffer = stream.read();
+   *      //process binary WAL logs
+   *    }
+   *
+   *   }
+   * 
+ * + * @return not null fluent api + */ + ChainedPhysicalCreateSlotBuilder physical(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java new file mode 100644 index 0000000..58cbd2e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder; +import org.postgresql.replication.fluent.physical.ChainedPhysicalStreamBuilder; + +/** + * Start point for fluent API that build replication stream(logical or physical). + * Api not thread safe, and can be use only for crate single stream. + */ +public interface ChainedStreamBuilder { + /** + *

Create logical replication stream that decode raw wal logs by output plugin to logical form. + * Default about logical decoding you can see by following link + * + * Logical Decoding Concepts + * . + *

+ * + *

Example usage:

+ *
+   *   {@code
+   *
+   *    PGReplicationStream stream =
+   *        pgConnection
+   *            .getReplicationAPI()
+   *            .replicationStream()
+   *            .logical()
+   *            .withSlotName("test_decoding")
+   *            .withSlotOption("include-xids", false)
+   *            .withSlotOption("skip-empty-xacts", true)
+   *            .start();
+   *
+   *    while (true) {
+   *      ByteBuffer buffer = stream.read();
+   *      //process logical changes
+   *    }
+   *
+   *   }
+   * 
+ * + * @return not null fluent api + */ + ChainedLogicalStreamBuilder logical(); + + /** + *

Create physical replication stream for process wal logs in binary form.

+ * + *

Example usage:

+ *
+   *   {@code
+   *
+   *    LogSequenceNumber lsn = getCurrentLSN();
+   *
+   *    PGReplicationStream stream =
+   *        pgConnection
+   *            .getReplicationAPI()
+   *            .replicationStream()
+   *            .physical()
+   *            .withStartPosition(lsn)
+   *            .start();
+   *
+   *    while (true) {
+   *      ByteBuffer buffer = stream.read();
+   *      //process binary WAL logs
+   *    }
+   *
+   *   }
+   * 
+ * + * @return not null fluent api + */ + ChainedPhysicalStreamBuilder physical(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java new file mode 100644 index 0000000..6eacbee --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.replication.LogSequenceNumber; + +/** + * Common parameters for logical and physical replication. + */ +public interface CommonOptions { + /** + * Replication slots provide an automated way to ensure that the master does not remove WAL + * segments until they have been received by all standbys, and that the master does not remove + * rows which could cause a recovery conflict even when the standby is disconnected. + * + * @return nullable replication slot name that already exists on server and free. + */ + String getSlotName(); + + /** + * @return the position to start replication. This cannot be null. + */ + LogSequenceNumber getStartLSNPosition(); + + /** + * Specifies the number of millisecond between status packets sent back to the server. This allows + * for easier monitoring of the progress from server. A value of zero disables the periodic status + * updates completely, although an update will still be sent when requested by the server, to + * avoid timeout disconnect. The default value is 10 seconds. + * + * @return the current status interval + */ + int getStatusInterval(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java new file mode 100644 index 0000000..e0067a3 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.core.BaseConnection; +import org.postgresql.replication.fluent.logical.ChainedLogicalCreateSlotBuilder; +import org.postgresql.replication.fluent.logical.LogicalCreateSlotBuilder; +import org.postgresql.replication.fluent.physical.ChainedPhysicalCreateSlotBuilder; +import org.postgresql.replication.fluent.physical.PhysicalCreateSlotBuilder; + +public class ReplicationCreateSlotBuilder implements ChainedCreateReplicationSlotBuilder { + private final BaseConnection baseConnection; + + public ReplicationCreateSlotBuilder(BaseConnection baseConnection) { + this.baseConnection = baseConnection; + } + + @Override + public ChainedLogicalCreateSlotBuilder logical() { + return new LogicalCreateSlotBuilder(baseConnection); + } + + @Override + public ChainedPhysicalCreateSlotBuilder physical() { + return new PhysicalCreateSlotBuilder(baseConnection); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java new file mode 100644 index 0000000..4d8443a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ReplicationProtocol; +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder; +import org.postgresql.replication.fluent.logical.LogicalReplicationOptions; +import org.postgresql.replication.fluent.logical.LogicalStreamBuilder; +import org.postgresql.replication.fluent.logical.StartLogicalReplicationCallback; +import org.postgresql.replication.fluent.physical.ChainedPhysicalStreamBuilder; +import org.postgresql.replication.fluent.physical.PhysicalReplicationOptions; +import org.postgresql.replication.fluent.physical.PhysicalStreamBuilder; +import org.postgresql.replication.fluent.physical.StartPhysicalReplicationCallback; + +import java.sql.SQLException; + +public class ReplicationStreamBuilder implements ChainedStreamBuilder { + private final BaseConnection baseConnection; + + /** + * @param connection not null connection with that will be associate replication + */ + public ReplicationStreamBuilder(final BaseConnection connection) { + this.baseConnection = connection; + } + + @Override + public ChainedLogicalStreamBuilder logical() { + return new LogicalStreamBuilder(new StartLogicalReplicationCallback() { + @Override + public PGReplicationStream start(LogicalReplicationOptions options) throws SQLException { + ReplicationProtocol protocol = baseConnection.getReplicationProtocol(); + return protocol.startLogical(options); + } + }); + } + + @Override + public ChainedPhysicalStreamBuilder physical() { + return new PhysicalStreamBuilder(new StartPhysicalReplicationCallback() { + @Override + public PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException { + ReplicationProtocol protocol = baseConnection.getReplicationProtocol(); + return protocol.startPhysical(options); + } + }); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java new file mode 100644 index 0000000..cae77bb --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.logical; + +import org.postgresql.replication.fluent.ChainedCommonCreateSlotBuilder; + +/** + * Logical replication slot specific parameters. + */ +public interface ChainedLogicalCreateSlotBuilder + extends ChainedCommonCreateSlotBuilder { + + /** + *

Output plugin that should be use for decode physical represent WAL to some logical form. + * Output plugin should be installed on server(exists in shared_preload_libraries).

+ * + *

Package postgresql-contrib provides sample output plugin test_decoding that can be + * use for test logical replication api

+ * + * @param outputPlugin not null name of the output plugin used for logical decoding + * @return the logical slot builder + */ + ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin); +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java new file mode 100644 index 0000000..0dc60b9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.logical; + +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.fluent.ChainedCommonStreamBuilder; + +import java.sql.SQLException; +import java.util.Properties; + +public interface ChainedLogicalStreamBuilder + extends ChainedCommonStreamBuilder { + /** + * Open logical replication stream. + * + * @return not null PGReplicationStream available for fetch data in logical form + * @throws SQLException if there are errors + */ + PGReplicationStream start() throws SQLException; + + /** + * + * @param optionName name of option + * @param optionValue boolean value + * @return ChainedLogicalStreamBuilder + */ + + ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue); + + /** + * + * @param optionName name of option + * @param optionValue integer value + * @return ChainedLogicalStreamBuilder + */ + ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue); + + /** + * + * @param optionName name of option + * @param optionValue String value + * @return ChainedLogicalStreamBuilder + */ + ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue); + + /** + * + * @param options properties + * @return ChainedLogicalStreamBuilder + */ + ChainedLogicalStreamBuilder withSlotOptions(Properties options); + +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java new file mode 100644 index 0000000..0688822 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.logical; + +import org.postgresql.core.BaseConnection; +import org.postgresql.replication.LogSequenceNumber; +import org.postgresql.replication.ReplicationSlotInfo; +import org.postgresql.replication.ReplicationType; +import org.postgresql.replication.fluent.AbstractCreateSlotBuilder; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class LogicalCreateSlotBuilder + extends AbstractCreateSlotBuilder + implements ChainedLogicalCreateSlotBuilder { + + private String outputPlugin; + + public LogicalCreateSlotBuilder(BaseConnection connection) { + super(connection); + } + + @Override + protected ChainedLogicalCreateSlotBuilder self() { + return this; + } + + @Override + public ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin) { + this.outputPlugin = outputPlugin; + return self(); + } + + @Override + public ReplicationSlotInfo make() throws SQLException { + String outputPlugin = this.outputPlugin; + if (outputPlugin == null || outputPlugin.isEmpty()) { + throw new IllegalArgumentException( + "OutputPlugin required parameter for logical replication slot"); + } + + if (slotName == null || slotName.isEmpty()) { + throw new IllegalArgumentException("Replication slotName can't be null"); + } + + Statement statement = connection.createStatement(); + ResultSet result = null; + ReplicationSlotInfo slotInfo = null; + try { + String sql = String.format( + "CREATE_REPLICATION_SLOT %s %s LOGICAL %s", + slotName, + temporaryOption ? "TEMPORARY" : "", + outputPlugin + ); + statement.execute(sql); + result = statement.getResultSet(); + if (result != null && result.next()) { + slotInfo = new ReplicationSlotInfo( + result.getString("slot_name"), + ReplicationType.LOGICAL, + LogSequenceNumber.valueOf(result.getString("consistent_point")), + result.getString("snapshot_name"), + result.getString("output_plugin")); + } else { + throw new PSQLException( + GT.tr("{0} returned no results"), + PSQLState.OBJECT_NOT_IN_STATE); + } + } finally { + if (result != null) { + result.close(); + } + statement.close(); + } + return slotInfo; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java new file mode 100644 index 0000000..8f1ef01 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.logical; + +import org.postgresql.replication.fluent.CommonOptions; + +import java.util.Properties; + +public interface LogicalReplicationOptions extends CommonOptions { + /** + * Required parameter for logical replication. + * + * @return not null logical replication slot name that already exists on server and free. + */ + @Override + String getSlotName(); + + /** + * Parameters for output plugin. Parameters will be set to output plugin that register for + * specified replication slot name. + * + * @return list options that will be pass to output_plugin for that was create replication slot + */ + Properties getSlotOptions(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java new file mode 100644 index 0000000..f8a1bcf --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.logical; + +import org.postgresql.replication.LogSequenceNumber; +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.fluent.AbstractStreamBuilder; + +import java.sql.SQLException; +import java.util.Properties; + +public class LogicalStreamBuilder extends AbstractStreamBuilder + implements ChainedLogicalStreamBuilder, LogicalReplicationOptions { + private final Properties slotOptions; + + private final StartLogicalReplicationCallback startCallback; + + /** + * @param startCallback not null callback that should be execute after build parameters for start + * replication + */ + public LogicalStreamBuilder(StartLogicalReplicationCallback startCallback) { + this.startCallback = startCallback; + this.slotOptions = new Properties(); + } + + @Override + protected ChainedLogicalStreamBuilder self() { + return this; + } + + @Override + public PGReplicationStream start() throws SQLException { + return startCallback.start(this); + } + + @Override + public String getSlotName() { + return slotName; + } + + @Override + public ChainedLogicalStreamBuilder withStartPosition(LogSequenceNumber lsn) { + startPosition = lsn; + return this; + } + + @Override + public ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue) { + slotOptions.setProperty(optionName, String.valueOf(optionValue)); + return this; + } + + @Override + public ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue) { + slotOptions.setProperty(optionName, String.valueOf(optionValue)); + return this; + } + + @Override + public ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue) { + slotOptions.setProperty(optionName, optionValue); + return this; + } + + @Override + public ChainedLogicalStreamBuilder withSlotOptions(Properties options) { + for (String propertyName : options.stringPropertyNames()) { + slotOptions.setProperty(propertyName, options.getProperty(propertyName)); + } + return this; + } + + @Override + public LogSequenceNumber getStartLSNPosition() { + return startPosition; + } + + @Override + public Properties getSlotOptions() { + return slotOptions; + } + + @Override + public int getStatusInterval() { + return statusIntervalMs; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java new file mode 100644 index 0000000..8612eca --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.logical; + +import org.postgresql.replication.PGReplicationStream; + +import java.sql.SQLException; + +public interface StartLogicalReplicationCallback { + PGReplicationStream start(LogicalReplicationOptions options) throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java new file mode 100644 index 0000000..8fdc810 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.physical; + +import org.postgresql.replication.fluent.ChainedCommonCreateSlotBuilder; + +/** + * Physical replication slot specific parameters. + */ +public interface ChainedPhysicalCreateSlotBuilder extends + ChainedCommonCreateSlotBuilder { +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java new file mode 100644 index 0000000..f458c88 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.physical; + +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.fluent.ChainedCommonStreamBuilder; + +import java.sql.SQLException; + +public interface ChainedPhysicalStreamBuilder extends + ChainedCommonStreamBuilder { + + /** + * Open physical replication stream. + * + * @return not null PGReplicationStream available for fetch wal logs in binary form + * @throws SQLException on error + */ + PGReplicationStream start() throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java new file mode 100644 index 0000000..4c2597b --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.physical; + +import org.postgresql.core.BaseConnection; +import org.postgresql.replication.LogSequenceNumber; +import org.postgresql.replication.ReplicationSlotInfo; +import org.postgresql.replication.ReplicationType; +import org.postgresql.replication.fluent.AbstractCreateSlotBuilder; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class PhysicalCreateSlotBuilder + extends AbstractCreateSlotBuilder + implements ChainedPhysicalCreateSlotBuilder { + + public PhysicalCreateSlotBuilder(BaseConnection connection) { + super(connection); + } + + @Override + protected ChainedPhysicalCreateSlotBuilder self() { + return this; + } + + @Override + public ReplicationSlotInfo make() throws SQLException { + if (slotName == null || slotName.isEmpty()) { + throw new IllegalArgumentException("Replication slotName can't be null"); + } + + Statement statement = connection.createStatement(); + ResultSet result = null; + ReplicationSlotInfo slotInfo = null; + try { + String sql = String.format( + "CREATE_REPLICATION_SLOT %s %s PHYSICAL", + slotName, + temporaryOption ? "TEMPORARY" : "" + ); + statement.execute(sql); + result = statement.getResultSet(); + if (result != null && result.next()) { + slotInfo = new ReplicationSlotInfo( + result.getString("slot_name"), + ReplicationType.PHYSICAL, + LogSequenceNumber.valueOf(result.getString("consistent_point")), + result.getString("snapshot_name"), + result.getString("output_plugin")); + } else { + throw new PSQLException( + GT.tr("{0} returned no results"), + PSQLState.OBJECT_NOT_IN_STATE); + } + } finally { + if (result != null) { + result.close(); + } + statement.close(); + } + return slotInfo; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalReplicationOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalReplicationOptions.java new file mode 100644 index 0000000..58326d9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalReplicationOptions.java @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.physical; + +import org.postgresql.replication.fluent.CommonOptions; + +public interface PhysicalReplicationOptions extends CommonOptions { +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java new file mode 100644 index 0000000..eb177d0 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.physical; + +import org.postgresql.replication.LogSequenceNumber; +import org.postgresql.replication.PGReplicationStream; +import org.postgresql.replication.fluent.AbstractStreamBuilder; + +import java.sql.SQLException; + +public class PhysicalStreamBuilder extends AbstractStreamBuilder + implements ChainedPhysicalStreamBuilder, PhysicalReplicationOptions { + + private final StartPhysicalReplicationCallback startCallback; + + /** + * @param startCallback not null callback that should be execute after build parameters for start + * replication + */ + public PhysicalStreamBuilder(StartPhysicalReplicationCallback startCallback) { + this.startCallback = startCallback; + } + + @Override + protected ChainedPhysicalStreamBuilder self() { + return this; + } + + @Override + public PGReplicationStream start() throws SQLException { + return this.startCallback.start(this); + } + + @Override + public String getSlotName() { + return slotName; + } + + @Override + public LogSequenceNumber getStartLSNPosition() { + return startPosition; + } + + @Override + public int getStatusInterval() { + return statusIntervalMs; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java new file mode 100644 index 0000000..543edcb --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication.fluent.physical; + +import org.postgresql.replication.PGReplicationStream; + +import java.sql.SQLException; + +public interface StartPhysicalReplicationCallback { + PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException; +} diff --git a/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java b/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java new file mode 100644 index 0000000..f673e34 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.scram; + +import org.postgresql.core.PGStream; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import com.ongres.scram.client.ScramClient; +import com.ongres.scram.client.ScramSession; +import com.ongres.scram.common.exception.ScramException; +import com.ongres.scram.common.exception.ScramInvalidServerSignatureException; +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.exception.ScramServerErrorException; +import com.ongres.scram.common.stringprep.StringPreparations; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class ScramAuthenticator { + private static final Logger LOGGER = Logger.getLogger(ScramAuthenticator.class.getName()); + + private final String user; + private final String password; + private final PGStream pgStream; + private ScramClient scramClient; + private ScramSession scramSession; + private ScramSession.ClientFinalProcessor clientFinalProcessor; + + private interface BodySender { + void sendBody(PGStream pgStream) throws IOException; + } + + private void sendAuthenticationMessage(int bodyLength, BodySender bodySender) + throws IOException { + pgStream.sendChar('p'); + pgStream.sendInteger4(Integer.SIZE / Byte.SIZE + bodyLength); + bodySender.sendBody(pgStream); + pgStream.flush(); + } + + public ScramAuthenticator(String user, String password, PGStream pgStream) { + this.user = user; + this.password = password; + this.pgStream = pgStream; + } + + public void processServerMechanismsAndInit() throws IOException, PSQLException { + List mechanisms = new ArrayList<>(); + do { + mechanisms.add(pgStream.receiveString()); + } while (pgStream.peekChar() != 0); + int c = pgStream.receiveChar(); + assert c == 0; + if (mechanisms.isEmpty()) { + throw new PSQLException( + GT.tr("No SCRAM mechanism(s) advertised by the server"), + PSQLState.CONNECTION_REJECTED + ); + } + + ScramClient scramClient; + try { + scramClient = ScramClient + .channelBinding(ScramClient.ChannelBinding.NO) + .stringPreparation(StringPreparations.SASL_PREPARATION) + .selectMechanismBasedOnServerAdvertised(mechanisms.toArray(new String[]{})) + .setup(); + } catch (IllegalArgumentException e) { + throw new PSQLException( + GT.tr("Invalid or unsupported by client SCRAM mechanisms", e), + PSQLState.CONNECTION_REJECTED + ); + } + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, " Using SCRAM mechanism {0}", scramClient.getScramMechanism().getName()); + } + + this.scramClient = scramClient; + scramSession = + scramClient.scramSession("*"); // Real username is ignored by server, uses startup one + } + + public void sendScramClientFirstMessage() throws IOException { + ScramSession scramSession = this.scramSession; + String clientFirstMessage = scramSession.clientFirstMessage(); + LOGGER.log(Level.FINEST, " FE=> SASLInitialResponse( {0} )", clientFirstMessage); + + ScramClient scramClient = this.scramClient; + String scramMechanismName = scramClient.getScramMechanism().getName(); + final byte[] scramMechanismNameBytes = scramMechanismName.getBytes(StandardCharsets.UTF_8); + final byte[] clientFirstMessageBytes = clientFirstMessage.getBytes(StandardCharsets.UTF_8); + sendAuthenticationMessage( + (scramMechanismNameBytes.length + 1) + 4 + clientFirstMessageBytes.length, + new BodySender() { + @Override + public void sendBody(PGStream pgStream) throws IOException { + pgStream.send(scramMechanismNameBytes); + pgStream.sendChar(0); // List terminated in '\0' + pgStream.sendInteger4(clientFirstMessageBytes.length); + pgStream.send(clientFirstMessageBytes); + } + } + ); + } + + public void processServerFirstMessage(int length) throws IOException, PSQLException { + String serverFirstMessage = pgStream.receiveString(length); + LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLContinue( {0} )", serverFirstMessage); + + ScramSession scramSession = this.scramSession; + if (scramSession == null) { + throw new PSQLException( + GT.tr("SCRAM session does not exist"), + PSQLState.UNKNOWN_STATE + ); + } + + ScramSession.ServerFirstProcessor serverFirstProcessor; + try { + serverFirstProcessor = scramSession.receiveServerFirstMessage(serverFirstMessage); + } catch (ScramException e) { + throw new PSQLException( + GT.tr("Invalid server-first-message: {0}", serverFirstMessage), + PSQLState.CONNECTION_REJECTED, + e + ); + } + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, + " <=BE AuthenticationSASLContinue(salt={0}, iterations={1})", + new Object[]{serverFirstProcessor.getSalt(), serverFirstProcessor.getIteration()} + ); + } + + clientFinalProcessor = serverFirstProcessor.clientFinalProcessor(password); + + String clientFinalMessage = clientFinalProcessor.clientFinalMessage(); + LOGGER.log(Level.FINEST, " FE=> SASLResponse( {0} )", clientFinalMessage); + + final byte[] clientFinalMessageBytes = clientFinalMessage.getBytes(StandardCharsets.UTF_8); + sendAuthenticationMessage( + clientFinalMessageBytes.length, + new BodySender() { + @Override + public void sendBody(PGStream pgStream) throws IOException { + pgStream.send(clientFinalMessageBytes); + } + } + ); + } + + public void verifyServerSignature(int length) throws IOException, PSQLException { + String serverFinalMessage = pgStream.receiveString(length); + LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLFinal( {0} )", serverFinalMessage); + + ScramSession.ClientFinalProcessor clientFinalProcessor = this.clientFinalProcessor; + if (clientFinalProcessor == null) { + throw new PSQLException( + GT.tr("SCRAM client final processor does not exist"), + PSQLState.UNKNOWN_STATE + ); + } + try { + clientFinalProcessor.receiveServerFinalMessage(serverFinalMessage); + } catch (ScramParseException e) { + throw new PSQLException( + GT.tr("Invalid server-final-message: {0}", serverFinalMessage), + PSQLState.CONNECTION_REJECTED, + e + ); + } catch (ScramServerErrorException e) { + throw new PSQLException( + GT.tr("SCRAM authentication failed, server returned error: {0}", + e.getError().getErrorMessage()), + PSQLState.CONNECTION_REJECTED, + e + ); + } catch (ScramInvalidServerSignatureException e) { + throw new PSQLException( + GT.tr("Invalid server SCRAM signature"), + PSQLState.CONNECTION_REJECTED, + e + ); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java new file mode 100644 index 0000000..d7e375f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.security.GeneralSecurityException; +import java.security.KeyStore; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; + +public abstract class DbKeyStoreSocketFactory extends WrappedFactory { + /* + * Populate the WrappedFactory member factory with an SSL Socket Factory that uses the JKS + * keystore provided by getKeyStorePassword() and getKeyStoreStream(). A subclass only needs to + * implement these two methods. The key store will be used both for selecting a private key + * certificate to send to the server, as well as checking the server's certificate against a set + * of trusted CAs. + */ + public DbKeyStoreSocketFactory() throws DbKeyStoreSocketException { + KeyStore keys; + char[] password; + try { + keys = KeyStore.getInstance("JKS"); + // Call of the sub-class method during object initialization is generally a bad idea + password = getKeyStorePassword(); + keys.load(getKeyStoreStream(), password); + } catch (GeneralSecurityException gse) { + throw new DbKeyStoreSocketException("Failed to load keystore: " + gse.getMessage()); + } catch (FileNotFoundException fnfe) { + throw new DbKeyStoreSocketException("Failed to find keystore file." + fnfe.getMessage()); + } catch (IOException ioe) { + throw new DbKeyStoreSocketException("Failed to read keystore file: " + ioe.getMessage()); + } + try { + KeyManagerFactory keyfact = + KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyfact.init(keys, password); + + TrustManagerFactory trustfact = + TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + trustfact.init(keys); + + SSLContext ctx = SSLContext.getInstance("SSL"); + ctx.init(keyfact.getKeyManagers(), trustfact.getTrustManagers(), null); + factory = ctx.getSocketFactory(); + } catch (GeneralSecurityException gse) { + throw new DbKeyStoreSocketException( + "Failed to set up database socket factory: " + gse.getMessage()); + } + } + + public abstract char[] getKeyStorePassword(); + + public abstract InputStream getKeyStoreStream(); + + @SuppressWarnings("serial") + public static class DbKeyStoreSocketException extends Exception { + public DbKeyStoreSocketException(String message) { + super(message); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java new file mode 100644 index 0000000..a772e18 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import java.util.Properties; + +import javax.net.ssl.SSLSocketFactory; + +/** + * Socket factory that uses Java's default truststore to validate server certificate. + * Note: it always validates server certificate, so it might result to downgrade to non-encrypted + * connection when default truststore lacks certificates to validate server. + */ +public class DefaultJavaSSLFactory extends WrappedFactory { + public DefaultJavaSSLFactory(Properties info) { + factory = (SSLSocketFactory) SSLSocketFactory.getDefault(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java b/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java new file mode 100644 index 0000000..e3c46b5 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.net.Socket; +import java.security.AlgorithmParameters; +import java.security.GeneralSecurityException; +import java.security.Key; +import java.security.KeyFactory; +import java.security.NoSuchAlgorithmException; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.KeySpec; +import java.security.spec.PKCS8EncodedKeySpec; +import java.util.Collection; + +import javax.crypto.Cipher; +import javax.crypto.EncryptedPrivateKeyInfo; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import javax.net.ssl.X509KeyManager; +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.auth.x500.X500Principal; + +/** + * A Key manager that only loads the keys, if necessary. + */ +public class LazyKeyManager implements X509KeyManager { + private X509Certificate [] cert; + private PrivateKey key; + private final String certfile; + private final String keyfile; + private final CallbackHandler cbh; + private final boolean defaultfile; + private PSQLException error; + + /** + * Constructor. certfile and keyfile can be null, in that case no certificate is presented to the + * server. + * + * @param certfile certfile + * @param keyfile key file + * @param cbh callback handler + * @param defaultfile default file + */ + public LazyKeyManager(String certfile, String keyfile, CallbackHandler cbh, boolean defaultfile) { + this.certfile = certfile; + this.keyfile = keyfile; + this.cbh = cbh; + this.defaultfile = defaultfile; + } + + /** + * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored + * in {@link #error} and can be raised by this method. + * + * @throws PSQLException if any exception is stored in {@link #error} and can be raised + */ + public void throwKeyManagerException() throws PSQLException { + if (error != null) { + throw error; + } + } + + @Override + public String chooseClientAlias(String[] keyType, + Principal [] issuers, Socket socket) { + if (certfile == null) { + return null; + } else { + if (issuers == null || issuers.length == 0) { + // Postgres 8.4 and earlier do not send the list of accepted certificate authorities + // to the client. See BUG #5468. We only hope, that our certificate will be accepted. + return "user"; + } else { + // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in + // pg_hba.conf. + // therefore we only send our certificate, if the issuer is listed in issuers + X509Certificate[] certchain = getCertificateChain("user"); + if (certchain == null) { + return null; + } else { + X509Certificate cert = certchain[certchain.length - 1]; + X500Principal ourissuer = cert.getIssuerX500Principal(); + String certKeyType = cert.getPublicKey().getAlgorithm(); + boolean keyTypeFound = false; + boolean found = false; + if (keyType != null && keyType.length > 0) { + for (String kt : keyType) { + if (kt.equalsIgnoreCase(certKeyType)) { + keyTypeFound = true; + } + } + } else { + // If no key types were passed in, assume we don't care + // about checking that the cert uses a particular key type. + keyTypeFound = true; + } + if (keyTypeFound) { + for (Principal issuer : issuers) { + if (ourissuer.equals(issuer)) { + found = keyTypeFound; + } + } + } + return found ? "user" : null; + } + } + } + } + + @Override + public String chooseServerAlias(String keyType, + Principal [] issuers, Socket socket) { + return null; // We are not a server + } + + @Override + public X509Certificate [] getCertificateChain(String alias) { + if (cert == null && certfile != null) { + // If certfile is null, we do not load the certificate + // The certificate must be loaded + CertificateFactory cf; + try { + cf = CertificateFactory.getInstance("X.509"); + } catch (CertificateException ex) { + // For some strange reason it throws CertificateException instead of + // NoSuchAlgorithmException... + error = new PSQLException(GT.tr( + "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."), + PSQLState.CONNECTION_FAILURE, ex); + return null; + } + Collection certs; + FileInputStream certfileStream = null; + try { + certfileStream = new FileInputStream(certfile); + certs = cf.generateCertificates(certfileStream); + } catch (FileNotFoundException ioex) { + if (!defaultfile) { // It is not an error if there is no file at the default location + error = new PSQLException( + GT.tr("Could not open SSL certificate file {0}.", certfile), + PSQLState.CONNECTION_FAILURE, ioex); + } + return null; + } catch (CertificateException gsex) { + error = new PSQLException(GT.tr("Loading the SSL certificate {0} into a KeyManager failed.", + certfile), PSQLState.CONNECTION_FAILURE, gsex); + return null; + } finally { + if (certfileStream != null) { + try { + certfileStream.close(); + } catch (IOException ioex) { + if (!defaultfile) { // It is not an error if there is no file at the default location + error = new PSQLException( + GT.tr("Could not close SSL certificate file {0}.", certfile), + PSQLState.CONNECTION_FAILURE, ioex); + } + } + } + } + cert = certs.toArray(new X509Certificate[0]); + } + return cert; + } + + @Override + public String [] getClientAliases(String keyType, + Principal [] issuers) { + String alias = chooseClientAlias(new String[]{keyType}, issuers, (Socket) null); + return alias == null ? new String[]{} : new String[]{alias}; + } + + private static byte[] readFileFully(String path) throws IOException { + RandomAccessFile raf = new RandomAccessFile(path, "r"); + try { + byte[] ret = new byte[(int) raf.length()]; + raf.readFully(ret); + return ret; + } finally { + raf.close(); + } + } + + @Override + public PrivateKey getPrivateKey(String alias) { + try { + if (key == null && keyfile != null) { + // If keyfile is null, we do not load the key + // The private key must be loaded + X509Certificate[] cert = getCertificateChain("user"); + if (cert == null || cert.length == 0) { // We need the certificate for the algorithm + return null; + } + + byte[] keydata; + try { + keydata = readFileFully(keyfile); + } catch (FileNotFoundException ex) { + if (!defaultfile) { + // It is not an error if there is no file at the default location + throw ex; + } + return null; + } + + KeyFactory kf = KeyFactory.getInstance(cert[0].getPublicKey().getAlgorithm()); + try { + KeySpec pkcs8KeySpec = new PKCS8EncodedKeySpec(keydata); + key = kf.generatePrivate(pkcs8KeySpec); + } catch (InvalidKeySpecException ex) { + // The key might be password protected + EncryptedPrivateKeyInfo ePKInfo = new EncryptedPrivateKeyInfo(keydata); + Cipher cipher; + try { + cipher = Cipher.getInstance(ePKInfo.getAlgName()); + } catch (NoSuchPaddingException npex) { + // Why is it not a subclass of NoSuchAlgorithmException? + throw new NoSuchAlgorithmException(npex.getMessage(), npex); + } + // We call back for the password + PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false); + try { + cbh.handle(new Callback[]{pwdcb}); + } catch (UnsupportedCallbackException ucex) { + if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler) + && ("Console is not available".equals(ucex.getMessage()))) { + error = new PSQLException(GT + .tr("Could not read password for SSL key file, console is not available."), + PSQLState.CONNECTION_FAILURE, ucex); + } else { + error = + new PSQLException( + GT.tr("Could not read password for SSL key file by callbackhandler {0}.", + cbh.getClass().getName()), + PSQLState.CONNECTION_FAILURE, ucex); + } + return null; + } + try { + PBEKeySpec pbeKeySpec = new PBEKeySpec(pwdcb.getPassword()); + pwdcb.clearPassword(); + // Now create the Key from the PBEKeySpec + SecretKeyFactory skFac = SecretKeyFactory.getInstance(ePKInfo.getAlgName()); + Key pbeKey = skFac.generateSecret(pbeKeySpec); + // Extract the iteration count and the salt + AlgorithmParameters algParams = ePKInfo.getAlgParameters(); + cipher.init(Cipher.DECRYPT_MODE, pbeKey, algParams); + // Decrypt the encrypted private key into a PKCS8EncodedKeySpec + KeySpec pkcs8KeySpec = ePKInfo.getKeySpec(cipher); + key = kf.generatePrivate(pkcs8KeySpec); + } catch (GeneralSecurityException ikex) { + error = new PSQLException( + GT.tr("Could not decrypt SSL key file {0}.", keyfile), + PSQLState.CONNECTION_FAILURE, ikex); + return null; + } + } + } + } catch (IOException ioex) { + error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile), + PSQLState.CONNECTION_FAILURE, ioex); + } catch (NoSuchAlgorithmException ex) { + error = new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.", + ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex); + return null; + } + + return key; + } + + @Override + public String [] getServerAliases(String keyType, Principal [] issuers) { + return new String[]{}; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java new file mode 100644 index 0000000..9677d59 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import org.postgresql.PGProperty; +import org.postgresql.jdbc.SslMode; +import org.postgresql.ssl.NonValidatingFactory.NonValidatingTM; +import org.postgresql.util.GT; +import org.postgresql.util.ObjectFactory; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.Console; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.util.Locale; +import java.util.Properties; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; + +/** + * Provide an SSLSocketFactory that is compatible with the libpq behaviour. + */ +public class LibPQFactory extends WrappedFactory { + + KeyManager km; + boolean defaultfile; + + private CallbackHandler getCallbackHandler(LibPQFactory this, Properties info) throws PSQLException { + // Determine the callback handler + CallbackHandler cbh; + String sslpasswordcallback = PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(info); + if (sslpasswordcallback != null) { + try { + cbh = ObjectFactory.instantiate(CallbackHandler.class, sslpasswordcallback, info, false, null); + } catch (Exception e) { + throw new PSQLException( + GT.tr("The password callback class provided {0} could not be instantiated.", + sslpasswordcallback), + PSQLState.CONNECTION_FAILURE, e); + } + } else { + cbh = new ConsoleCallbackHandler(PGProperty.SSL_PASSWORD.getOrDefault(info)); + } + return cbh; + } + + private void initPk8(LibPQFactory this, + String sslkeyfile, String defaultdir, Properties info) throws PSQLException { + + // Load the client's certificate and key + String sslcertfile = PGProperty.SSL_CERT.getOrDefault(info); + if (sslcertfile == null) { // Fall back to default + defaultfile = true; + sslcertfile = defaultdir + "postgresql.crt"; + } + + // If the properties are empty, give null to prevent client key selection + km = new LazyKeyManager(("".equals(sslcertfile) ? null : sslcertfile), + ("".equals(sslkeyfile) ? null : sslkeyfile), getCallbackHandler(info), defaultfile); + } + + private void initP12(LibPQFactory this, + String sslkeyfile, Properties info) throws PSQLException { + km = new PKCS12KeyManager(sslkeyfile, getCallbackHandler(info)); + } + + /** + * @param info the connection parameters The following parameters are used: + * sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword + * @throws PSQLException if security error appears when initializing factory + */ + public LibPQFactory(Properties info) throws PSQLException { + try { + SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ? + + // Determining the default file location + String pathsep = System.getProperty("file.separator"); + String defaultdir; + + if (System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows")) { // It is Windows + defaultdir = System.getenv("APPDATA") + pathsep + "postgresql" + pathsep; + } else { + defaultdir = System.getProperty("user.home") + pathsep + ".postgresql" + pathsep; + } + + String sslkeyfile = PGProperty.SSL_KEY.getOrDefault(info); + if (sslkeyfile == null) { // Fall back to default + defaultfile = true; + sslkeyfile = defaultdir + "postgresql.pk8"; + } + + if (sslkeyfile.endsWith(".p12") || sslkeyfile.endsWith(".pfx")) { + initP12(sslkeyfile, info); + } else { + initPk8(sslkeyfile, defaultdir, info); + } + + TrustManager[] tm; + SslMode sslMode = SslMode.of(info); + if (!sslMode.verifyCertificate()) { + // server validation is not required + tm = new TrustManager[]{new NonValidatingTM()}; + } else { + // Load the server certificate + + TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX"); + KeyStore ks; + try { + ks = KeyStore.getInstance("jks"); + } catch (KeyStoreException e) { + // this should never happen + throw new NoSuchAlgorithmException("jks KeyStore not available"); + } + String sslrootcertfile = PGProperty.SSL_ROOT_CERT.getOrDefault(info); + if (sslrootcertfile == null) { // Fall back to default + sslrootcertfile = defaultdir + "root.crt"; + } + FileInputStream fis; + try { + fis = new FileInputStream(sslrootcertfile); // NOSONAR + } catch (FileNotFoundException ex) { + throw new PSQLException( + GT.tr("Could not open SSL root certificate file {0}.", sslrootcertfile), + PSQLState.CONNECTION_FAILURE, ex); + } + try { + CertificateFactory cf = CertificateFactory.getInstance("X.509"); + // Certificate[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{}); //Does + // not work in java 1.4 + Object[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{}); + ks.load(null, null); + for (int i = 0; i < certs.length; i++) { + ks.setCertificateEntry("cert" + i, (Certificate) certs[i]); + } + tmf.init(ks); + } catch (IOException ioex) { + throw new PSQLException( + GT.tr("Could not read SSL root certificate file {0}.", sslrootcertfile), + PSQLState.CONNECTION_FAILURE, ioex); + } catch (GeneralSecurityException gsex) { + throw new PSQLException( + GT.tr("Loading the SSL root certificate {0} into a TrustManager failed.", + sslrootcertfile), + PSQLState.CONNECTION_FAILURE, gsex); + } finally { + try { + fis.close(); + } catch (IOException e) { + /* ignore */ + } + } + tm = tmf.getTrustManagers(); + } + + // finally we can initialize the context + try { + KeyManager km = this.km; + ctx.init(km == null ? null : new KeyManager[]{km}, tm, null); + } catch (KeyManagementException ex) { + throw new PSQLException(GT.tr("Could not initialize SSL context."), + PSQLState.CONNECTION_FAILURE, ex); + } + + factory = ctx.getSocketFactory(); + } catch (NoSuchAlgorithmException ex) { + throw new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.", + ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex); + } + } + + /** + * Propagates any exception from {@link LazyKeyManager}. + * + * @throws PSQLException if there is an exception to propagate + */ + public void throwKeyManagerException() throws PSQLException { + if (km != null) { + if (km instanceof LazyKeyManager) { + ((LazyKeyManager) km).throwKeyManagerException(); + } + if (km instanceof PKCS12KeyManager) { + ((PKCS12KeyManager) km).throwKeyManagerException(); + } + } + } + + /** + * A CallbackHandler that reads the password from the console or returns the password given to its + * constructor. + */ + public static class ConsoleCallbackHandler implements CallbackHandler { + + private char [] password; + + ConsoleCallbackHandler(String password) { + if (password != null) { + this.password = password.toCharArray(); + } + } + + /** + * Handles the callbacks. + * + * @param callbacks The callbacks to handle + * @throws UnsupportedCallbackException If the console is not available or other than + * PasswordCallback is supplied + */ + @Override + public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { + Console cons = System.console(); + char[] password = this.password; + if (cons == null && password == null) { + throw new UnsupportedCallbackException(callbacks[0], "Console is not available"); + } + for (Callback callback : callbacks) { + if (!(callback instanceof PasswordCallback)) { + throw new UnsupportedCallbackException(callback); + } + PasswordCallback pwdCallback = (PasswordCallback) callback; + if (password != null) { + pwdCallback.setPassword(password); + continue; + } + // It is used instead of cons.readPassword(prompt), because the prompt may contain '%' + // characters + pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt()) + ); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java b/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java new file mode 100644 index 0000000..b590970 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import org.postgresql.PGProperty; +import org.postgresql.core.PGStream; +import org.postgresql.core.SocketFactoryFactory; +import org.postgresql.jdbc.SslMode; +import org.postgresql.util.GT; +import org.postgresql.util.ObjectFactory; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.IOException; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; + +public class MakeSSL extends ObjectFactory { + + private static final Logger LOGGER = Logger.getLogger(MakeSSL.class.getName()); + + public static void convert(PGStream stream, Properties info) + throws PSQLException, IOException { + LOGGER.log(Level.FINE, "converting regular socket connection to ssl"); + + SSLSocketFactory factory = SocketFactoryFactory.getSslSocketFactory(info); + SSLSocket newConnection; + try { + newConnection = (SSLSocket) factory.createSocket(stream.getSocket(), + stream.getHostSpec().getHost(), stream.getHostSpec().getPort(), true); + int connectTimeoutSeconds = PGProperty.CONNECT_TIMEOUT.getInt(info); + newConnection.setSoTimeout(connectTimeoutSeconds * 1000); + // We must invoke manually, otherwise the exceptions are hidden + newConnection.setUseClientMode(true); + newConnection.startHandshake(); + } catch (IOException ex) { + throw new PSQLException(GT.tr("SSL error: {0}", ex.getMessage()), + PSQLState.CONNECTION_FAILURE, ex); + } + if (factory instanceof LibPQFactory) { // throw any KeyManager exception + ((LibPQFactory) factory).throwKeyManagerException(); + } + + SslMode sslMode = SslMode.of(info); + if (sslMode.verifyPeerName()) { + verifyPeerName(stream, info, newConnection); + } + // Zero timeout (default) means infinite + int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info); + newConnection.setSoTimeout(socketTimeout * 1000); + stream.changeSocket(newConnection); + } + + private static void verifyPeerName(PGStream stream, Properties info, SSLSocket newConnection) + throws PSQLException { + HostnameVerifier hvn; + String sslhostnameverifier = PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(info); + if (sslhostnameverifier == null) { + hvn = PGjdbcHostnameVerifier.INSTANCE; + sslhostnameverifier = "PgjdbcHostnameVerifier"; + } else { + try { + hvn = instantiate(HostnameVerifier.class, sslhostnameverifier, info, false, null); + } catch (Exception e) { + throw new PSQLException( + GT.tr("The HostnameVerifier class provided {0} could not be instantiated.", + sslhostnameverifier), + PSQLState.CONNECTION_FAILURE, e); + } + } + + if (hvn.verify(stream.getHostSpec().getHost(), newConnection.getSession())) { + return; + } + + throw new PSQLException( + GT.tr("The hostname {0} could not be verified by hostnameverifier {1}.", + stream.getHostSpec().getHost(), sslhostnameverifier), + PSQLState.CONNECTION_FAILURE); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java new file mode 100644 index 0000000..649a54d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import java.security.GeneralSecurityException; +import java.security.cert.X509Certificate; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; + +/** + * Provide a SSLSocketFactory that allows SSL connections to be made without validating the server's + * certificate. This is more convenient for some applications, but is less secure as it allows "man + * in the middle" attacks. + */ +public class NonValidatingFactory extends WrappedFactory { + + /** + * We provide a constructor that takes an unused argument solely because the ssl calling code will + * look for this constructor first and then fall back to the no argument constructor, so we avoid + * an exception and additional reflection lookups. + * + * @param arg input argument + * @throws GeneralSecurityException if something goes wrong + */ + public NonValidatingFactory(String arg) throws GeneralSecurityException { + SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ? + + ctx.init(null, new TrustManager[]{new NonValidatingTM()}, null); + + factory = ctx.getSocketFactory(); + } + + public static class NonValidatingTM implements X509TrustManager { + + @Override + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } + + @Override + public void checkClientTrusted(X509Certificate[] certs, String authType) { + } + + @Override + public void checkServerTrusted(X509Certificate[] certs, String authType) { + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java b/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java new file mode 100644 index 0000000..dbd432c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import org.postgresql.util.GT; + +import java.net.IDN; +import java.security.cert.CertificateParsingException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.naming.InvalidNameException; +import javax.naming.ldap.LdapName; +import javax.naming.ldap.Rdn; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.security.auth.x500.X500Principal; + +public class PGjdbcHostnameVerifier implements HostnameVerifier { + private static final Logger LOGGER = Logger.getLogger(PGjdbcHostnameVerifier.class.getName()); + + public static final PGjdbcHostnameVerifier INSTANCE = new PGjdbcHostnameVerifier(); + + private static final int TYPE_DNS_NAME = 2; + private static final int TYPE_IP_ADDRESS = 7; + + public static final Comparator HOSTNAME_PATTERN_COMPARATOR = new Comparator() { + private int countChars(String value, char ch) { + int count = 0; + int pos = -1; + while (true) { + pos = value.indexOf(ch, pos + 1); + if (pos == -1) { + break; + } + count++; + } + return count; + } + + @Override + public int compare(String o1, String o2) { + // The more the dots the better: a.b.c.postgresql.org is more specific than postgresql.org + int d1 = countChars(o1, '.'); + int d2 = countChars(o2, '.'); + if (d1 != d2) { + return d1 > d2 ? 1 : -1; + } + + // The less the stars the better: postgresql.org is more specific than *.*.postgresql.org + int s1 = countChars(o1, '*'); + int s2 = countChars(o2, '*'); + if (s1 != s2) { + return s1 < s2 ? 1 : -1; + } + + // The longer the better: postgresql.org is more specific than sql.org + int l1 = o1.length(); + int l2 = o2.length(); + if (l1 != l2) { + return l1 > l2 ? 1 : -1; + } + + return 0; + } + }; + + @Override + public boolean verify(String hostname, SSLSession session) { + X509Certificate[] peerCerts; + try { + peerCerts = (X509Certificate[]) session.getPeerCertificates(); + } catch (SSLPeerUnverifiedException e) { + LOGGER.log(Level.SEVERE, + GT.tr("Unable to parse X509Certificate for hostname {0}", hostname), e); + return false; + } + if (peerCerts == null || peerCerts.length == 0) { + LOGGER.log(Level.SEVERE, + GT.tr("No certificates found for hostname {0}", hostname)); + return false; + } + + String canonicalHostname; + if (hostname.startsWith("[") && hostname.endsWith("]")) { + // IPv6 address like [2001:db8:0:1:1:1:1:1] + canonicalHostname = hostname.substring(1, hostname.length() - 1); + } else { + // This converts unicode domain name to ASCII + try { + canonicalHostname = IDN.toASCII(hostname); + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, "Canonical host name for {0} is {1}", + new Object[]{hostname, canonicalHostname}); + } + } catch (IllegalArgumentException e) { + // e.g. hostname is invalid + LOGGER.log(Level.SEVERE, + GT.tr("Hostname {0} is invalid", hostname), e); + return false; + } + } + + X509Certificate serverCert = peerCerts[0]; + + // Check for Subject Alternative Names (see RFC 6125) + + Collection> subjectAltNames; + try { + subjectAltNames = serverCert.getSubjectAlternativeNames(); + if (subjectAltNames == null) { + subjectAltNames = Collections.emptyList(); + } + } catch (CertificateParsingException e) { + LOGGER.log(Level.SEVERE, + GT.tr("Unable to parse certificates for hostname {0}", hostname), e); + return false; + } + + boolean anyDnsSan = false; + /* + * Each item in the SAN collection is a 2-element list. + * See {@link X509Certificate#getSubjectAlternativeNames} + * The first element in each list is a number indicating the type of entry. + */ + for (List sanItem : subjectAltNames) { + if (sanItem.size() != 2) { + continue; + } + Integer sanType = (Integer) sanItem.get(0); + if (sanType == null) { + // just in case + continue; + } + if (sanType != TYPE_IP_ADDRESS && sanType != TYPE_DNS_NAME) { + continue; + } + String san = (String) sanItem.get(1); + if (sanType == TYPE_IP_ADDRESS && san != null && san.startsWith("*")) { + // Wildcards should not be present in the IP Address field + continue; + } + anyDnsSan |= sanType == TYPE_DNS_NAME; + if (verifyHostName(canonicalHostname, san)) { + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, + GT.tr("Server name validation pass for {0}, subjectAltName {1}", hostname, san)); + } + return true; + } + } + + if (anyDnsSan) { + /* + * RFC2818, section 3.1 (I bet you won't recheck :) + * If a subjectAltName extension of type dNSName is present, that MUST + * be used as the identity. Otherwise, the (most specific) Common Name + * field in the Subject field of the certificate MUST be used. Although + * the use of the Common Name is existing practice, it is deprecated and + * Certification Authorities are encouraged to use the dNSName instead. + */ + LOGGER.log(Level.SEVERE, + GT.tr("Server name validation failed: certificate for host {0} dNSName entries subjectAltName," + + " but none of them match. Assuming server name validation failed", hostname)); + return false; + } + + // Last attempt: no DNS Subject Alternative Name entries detected, try common name + LdapName dn; + try { + dn = new LdapName(serverCert.getSubjectX500Principal().getName(X500Principal.RFC2253)); + } catch (InvalidNameException e) { + LOGGER.log(Level.SEVERE, + GT.tr("Server name validation failed: unable to extract common name" + + " from X509Certificate for hostname {0}", hostname), e); + return false; + } + + List commonNames = new ArrayList<>(1); + for (Rdn rdn : dn.getRdns()) { + if ("CN".equals(rdn.getType())) { + commonNames.add((String) rdn.getValue()); + } + } + if (commonNames.isEmpty()) { + LOGGER.log(Level.SEVERE, + GT.tr("Server name validation failed: certificate for hostname {0} has no DNS subjectAltNames," + + " and it CommonName is missing as well", + hostname)); + return false; + } + if (commonNames.size() > 1) { + /* + * RFC2818, section 3.1 + * If a subjectAltName extension of type dNSName is present, that MUST + * be used as the identity. Otherwise, the (most specific) Common Name + * field in the Subject field of the certificate MUST be used + * + * The sort is from less specific to most specific. + */ + Collections.sort(commonNames, HOSTNAME_PATTERN_COMPARATOR); + } + String commonName = commonNames.get(commonNames.size() - 1); + boolean result = verifyHostName(canonicalHostname, commonName); + if (!result) { + LOGGER.log(Level.SEVERE, + GT.tr("Server name validation failed: hostname {0} does not match common name {1}", + hostname, commonName)); + } + return result; + } + + public boolean verifyHostName(String hostname, String pattern) { + if (hostname == null || pattern == null) { + return false; + } + int lastStar = pattern.lastIndexOf('*'); + if (lastStar == -1) { + // No wildcard => just compare hostnames + return hostname.equalsIgnoreCase(pattern); + } + if (lastStar > 0) { + // Wildcards like foo*.com are not supported yet + return false; + } + if (pattern.indexOf('.') == -1) { + // Wildcard certificates should contain at least one dot + return false; + } + // pattern starts with *, so hostname should be at least (pattern.length-1) long + if (hostname.length() < pattern.length() - 1) { + return false; + } + // Use case insensitive comparison + final boolean ignoreCase = true; + // Below code is "hostname.endsWithIgnoreCase(pattern.withoutFirstStar())" + + // E.g. hostname==sub.host.com; pattern==*.host.com + // We need to start the offset of ".host.com" in hostname + // For this we take hostname.length() - pattern.length() + // and +1 is required since pattern is known to start with * + int toffset = hostname.length() - pattern.length() + 1; + + // Wildcard covers just one domain level + // a.b.c.com should not be covered by *.c.com + if (hostname.lastIndexOf('.', toffset - 1) >= 0) { + // If there's a dot in between 0..toffset + return false; + } + + return hostname.regionMatches(ignoreCase, toffset, + pattern, 1, pattern.length() - 1); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java b/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java new file mode 100644 index 0000000..4f12420 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import org.postgresql.jdbc.ResourceLock; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.io.FileInputStream; +import java.net.Socket; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; + +import javax.net.ssl.X509KeyManager; +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.auth.x500.X500Principal; + +public class PKCS12KeyManager implements X509KeyManager { + + private final CallbackHandler cbh; + private PSQLException error; + private final String keyfile; + private final KeyStore keyStore; + boolean keystoreLoaded; + private final ResourceLock lock = new ResourceLock(); + + public PKCS12KeyManager(String pkcsFile, CallbackHandler cbh) throws PSQLException { + try { + keyStore = KeyStore.getInstance("pkcs12"); + keyfile = pkcsFile; + this.cbh = cbh; + } catch ( KeyStoreException kse ) { + throw new PSQLException(GT.tr( + "Unable to find pkcs12 keystore."), + PSQLState.CONNECTION_FAILURE, kse); + } + } + + /** + * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored + * in {@link #error} and can be raised by this method. + * + * @throws PSQLException if any exception is stored in {@link #error} and can be raised + */ + public void throwKeyManagerException() throws PSQLException { + if (error != null) { + throw error; + } + } + + @Override + public String [] getClientAliases(String keyType, Principal [] principals) { + String alias = chooseClientAlias(new String[]{keyType}, principals, (Socket) null); + return alias == null ? null : new String[]{alias}; + } + + @Override + public String chooseClientAlias(String[] keyType, Principal [] principals, + Socket socket) { + if (principals == null || principals.length == 0) { + // Postgres 8.4 and earlier do not send the list of accepted certificate authorities + // to the client. See BUG #5468. We only hope, that our certificate will be accepted. + return "user"; + } else { + // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in + // pg_hba.conf. + // therefore we only send our certificate, if the issuer is listed in issuers + X509Certificate[] certchain = getCertificateChain("user"); + if (certchain == null) { + return null; + } else { + X509Certificate cert = certchain[certchain.length - 1]; + X500Principal ourissuer = cert.getIssuerX500Principal(); + String certKeyType = cert.getPublicKey().getAlgorithm(); + boolean keyTypeFound = false; + boolean found = false; + if (keyType != null && keyType.length > 0) { + for (String kt : keyType) { + if (kt.equalsIgnoreCase(certKeyType)) { + keyTypeFound = true; + } + } + } else { + // If no key types were passed in, assume we don't care + // about checking that the cert uses a particular key type. + keyTypeFound = true; + } + if (keyTypeFound) { + for (Principal issuer : principals) { + if (ourissuer.equals(issuer)) { + found = keyTypeFound; + } + } + } + return found ? "user" : null; + } + } + } + + @Override + public String [] getServerAliases(String s, Principal [] principals) { + return new String[]{}; + } + + @Override + public String chooseServerAlias(String s, Principal [] principals, + Socket socket) { + // we are not a server + return null; + } + + @Override + public X509Certificate [] getCertificateChain(String alias) { + try { + loadKeyStore(); + Certificate[] certs = keyStore.getCertificateChain(alias); + if (certs == null) { + return null; + } + X509Certificate[] x509Certificates = new X509Certificate[certs.length]; + int i = 0; + for (Certificate cert : certs) { + x509Certificates[i++] = (X509Certificate) cert; + } + return x509Certificates; + } catch (Exception kse) { + error = new PSQLException(GT.tr( + "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."), + PSQLState.CONNECTION_FAILURE, kse); + } + return null; + } + + @Override + public PrivateKey getPrivateKey(String s) { + try { + loadKeyStore(); + PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false); + cbh.handle(new Callback[]{pwdcb}); + + KeyStore.ProtectionParameter protParam = new KeyStore.PasswordProtection(pwdcb.getPassword()); + KeyStore.PrivateKeyEntry pkEntry = + (KeyStore.PrivateKeyEntry) keyStore.getEntry("user", protParam); + if (pkEntry == null) { + return null; + } + return pkEntry.getPrivateKey(); + } catch (Exception ioex ) { + error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile), + PSQLState.CONNECTION_FAILURE, ioex); + } + return null; + } + + @SuppressWarnings("try") + private void loadKeyStore() throws Exception { + try (ResourceLock ignore = lock.obtain()) { + if (keystoreLoaded) { + return; + } + // We call back for the password + PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false); + try { + cbh.handle(new Callback[]{pwdcb}); + } catch (UnsupportedCallbackException ucex) { + if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler) + && ("Console is not available".equals(ucex.getMessage()))) { + error = new PSQLException(GT + .tr("Could not read password for SSL key file, console is not available."), + PSQLState.CONNECTION_FAILURE, ucex); + } else { + error = + new PSQLException( + GT.tr("Could not read password for SSL key file by callbackhandler {0}.", + cbh.getClass().getName()), + PSQLState.CONNECTION_FAILURE, ucex); + } + + } + + keyStore.load(new FileInputStream(keyfile), pwdcb.getPassword()); + keystoreLoaded = true; + } + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java new file mode 100644 index 0000000..b42f635 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import org.postgresql.util.GT; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.UUID; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; + +/** + *

Provides a SSLSocketFactory that authenticates the remote server against an explicit pre-shared + * SSL certificate. This is more secure than using the NonValidatingFactory as it prevents "man in + * the middle" attacks. It is also more secure than relying on a central CA signing your server's + * certificate as it pins the server's certificate.

+ * + *

This class requires a single String parameter specified by setting the connection property + * sslfactoryarg. The value of this property is the PEM-encoded remote server's SSL + * certificate.

+ * + *

Where the certificate is loaded from is based upon the prefix of the sslfactoryarg property. + * The following table lists the valid set of prefixes.

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Valid prefixes for sslfactoryarg
PrefixExampleExplanation
classpath:classpath:ssl/server.crtLoaded from the classpath.
file:file:/foo/bar/server.crtLoaded from the filesystem.
env:env:mydb_certLoaded from string value of the mydb_cert environment variable.
sys:sys:mydb_certLoaded from string value of the mydb_cert system property.
-----BEGIN CERTIFICATE------
+ *
+ * -----BEGIN CERTIFICATE-----
+ * MIIDQzCCAqygAwIBAgIJAOd1tlfiGoEoMA0GCSqGSIb3DQEBBQUAMHUxCzAJBgNV
+ * [... truncated ...]
+ * UCmmYqgiVkAGWRETVo+byOSDZ4swb10=
+ * -----END CERTIFICATE-----
+ *         
+*
Loaded from string value of the argument.
+ */ + +public class SingleCertValidatingFactory extends WrappedFactory { + private static final String FILE_PREFIX = "file:"; + private static final String CLASSPATH_PREFIX = "classpath:"; + private static final String ENV_PREFIX = "env:"; + private static final String SYS_PROP_PREFIX = "sys:"; + + public SingleCertValidatingFactory(String sslFactoryArg) throws GeneralSecurityException { + if (sslFactoryArg == null || "".equals(sslFactoryArg)) { + throw new GeneralSecurityException(GT.tr("The sslfactoryarg property may not be empty.")); + } + InputStream in = null; + try { + if (sslFactoryArg.startsWith(FILE_PREFIX)) { + String path = sslFactoryArg.substring(FILE_PREFIX.length()); + in = new BufferedInputStream(new FileInputStream(path)); + } else if (sslFactoryArg.startsWith(CLASSPATH_PREFIX)) { + String path = sslFactoryArg.substring(CLASSPATH_PREFIX.length()); + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + InputStream inputStream; + if (classLoader != null) { + inputStream = classLoader.getResourceAsStream(path); + if (inputStream == null) { + throw new IllegalArgumentException( + GT.tr("Unable to find resource {0} via Thread contextClassLoader {1}", + path, classLoader) + ); + } + } else { + inputStream = getClass().getResourceAsStream(path); + if (inputStream == null) { + throw new IllegalArgumentException( + GT.tr("Unable to find resource {0} via class {1} ClassLoader {2}", + path, getClass(), getClass().getClassLoader()) + ); + } + } + in = new BufferedInputStream(inputStream); + } else if (sslFactoryArg.startsWith(ENV_PREFIX)) { + String name = sslFactoryArg.substring(ENV_PREFIX.length()); + String cert = System.getenv(name); + if (cert == null || "".equals(cert)) { + throw new GeneralSecurityException(GT.tr( + "The environment variable containing the server's SSL certificate must not be empty.")); + } + in = new ByteArrayInputStream(cert.getBytes(StandardCharsets.UTF_8)); + } else if (sslFactoryArg.startsWith(SYS_PROP_PREFIX)) { + String name = sslFactoryArg.substring(SYS_PROP_PREFIX.length()); + String cert = System.getProperty(name); + if (cert == null || "".equals(cert)) { + throw new GeneralSecurityException(GT.tr( + "The system property containing the server's SSL certificate must not be empty.")); + } + in = new ByteArrayInputStream(cert.getBytes(StandardCharsets.UTF_8)); + } else if (sslFactoryArg.startsWith("-----BEGIN CERTIFICATE-----")) { + in = new ByteArrayInputStream(sslFactoryArg.getBytes(StandardCharsets.UTF_8)); + } else { + throw new GeneralSecurityException(GT.tr( + "The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----.")); + } + + SSLContext ctx = SSLContext.getInstance("TLS"); + ctx.init(null, new TrustManager[]{new SingleCertTrustManager(in)}, null); + factory = ctx.getSocketFactory(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + if (e instanceof GeneralSecurityException) { + throw (GeneralSecurityException) e; + } + throw new GeneralSecurityException(GT.tr("An error occurred reading the certificate"), e); + } finally { + if (in != null) { + try { + in.close(); + } catch (Exception e2) { + // ignore + } + } + } + } + + public static class SingleCertTrustManager implements X509TrustManager { + X509Certificate cert; + X509TrustManager trustManager; + + public SingleCertTrustManager(InputStream in) throws IOException, GeneralSecurityException { + KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); + // Note: KeyStore requires it be loaded even if you don't load anything into it: + ks.load(null); + CertificateFactory cf = CertificateFactory.getInstance("X509"); + cert = (X509Certificate) cf.generateCertificate(in); + ks.setCertificateEntry(UUID.randomUUID().toString(), cert); + TrustManagerFactory tmf = + TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ks); + for (TrustManager tm : tmf.getTrustManagers()) { + if (tm instanceof X509TrustManager) { + trustManager = (X509TrustManager) tm; + break; + } + } + if (trustManager == null) { + throw new GeneralSecurityException(GT.tr("No X509TrustManager found")); + } + } + + @Override + public void checkClientTrusted(X509Certificate[] chain, String authType) + throws CertificateException { + } + + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType) + throws CertificateException { + trustManager.checkServerTrusted(chain, authType); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[]{cert}; + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/WrappedFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/WrappedFactory.java new file mode 100644 index 0000000..66e940c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/WrappedFactory.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; + +import javax.net.ssl.SSLSocketFactory; + +/** + * Provide a wrapper to a real SSLSocketFactory delegating all calls to the contained instance. A + * subclass needs only provide a constructor for the wrapped SSLSocketFactory. + */ +public abstract class WrappedFactory extends SSLSocketFactory { + + // The field is indeed not initialized in this class, however it is a part of public API, + // so it is hard to fix. + @SuppressWarnings("initialization.field.uninitialized") + protected SSLSocketFactory factory; + + @Override + public Socket createSocket(InetAddress host, int port) throws IOException { + return factory.createSocket(host, port); + } + + @Override + public Socket createSocket(String host, int port) throws IOException { + return factory.createSocket(host, port); + } + + @Override + public Socket createSocket(String host, int port, InetAddress localHost, int localPort) + throws IOException { + return factory.createSocket(host, port, localHost, localPort); + } + + @Override + public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) + throws IOException { + return factory.createSocket(address, port, localAddress, localPort); + } + + @Override + public Socket createSocket(Socket socket, String host, int port, boolean autoClose) + throws IOException { + return factory.createSocket(socket, host, port, autoClose); + } + + @Override + public String[] getDefaultCipherSuites() { + return factory.getDefaultCipherSuites(); + } + + @Override + public String[] getSupportedCipherSuites() { + return factory.getSupportedCipherSuites(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/jdbc4/LibPQFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/jdbc4/LibPQFactory.java new file mode 100644 index 0000000..40e5139 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/ssl/jdbc4/LibPQFactory.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.ssl.jdbc4; + +import org.postgresql.jdbc.SslMode; +import org.postgresql.ssl.PGjdbcHostnameVerifier; +import org.postgresql.util.PSQLException; + +import java.net.IDN; +import java.util.Properties; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLSession; + +/** + * @deprecated prefer {@link org.postgresql.ssl.LibPQFactory} + */ +@Deprecated +public class LibPQFactory extends org.postgresql.ssl.LibPQFactory implements HostnameVerifier { + private final SslMode sslMode; + + /** + * @param info the connection parameters The following parameters are used: + * sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword + * @throws PSQLException if security error appears when initializing factory + * @deprecated prefer {@link org.postgresql.ssl.LibPQFactory} + */ + @Deprecated + public LibPQFactory(Properties info) throws PSQLException { + super(info); + + sslMode = SslMode.of(info); + } + + /** + * Verifies if given hostname matches pattern. + * + * @param hostname input hostname + * @param pattern domain name pattern + * @return true when domain matches pattern + * @deprecated use {@link PGjdbcHostnameVerifier} + */ + @Deprecated + public static boolean verifyHostName(String hostname, String pattern) { + String canonicalHostname; + if (hostname.startsWith("[") && hostname.endsWith("]")) { + // IPv6 address like [2001:db8:0:1:1:1:1:1] + canonicalHostname = hostname.substring(1, hostname.length() - 1); + } else { + // This converts unicode domain name to ASCII + try { + canonicalHostname = IDN.toASCII(hostname); + } catch (IllegalArgumentException e) { + // e.g. hostname is invalid + return false; + } + } + return PGjdbcHostnameVerifier.INSTANCE.verifyHostName(canonicalHostname, pattern); + } + + /** + * Verifies the server certificate according to the libpq rules. The cn attribute of the + * certificate is matched against the hostname. If the cn attribute starts with an asterisk (*), + * it will be treated as a wildcard, and will match all characters except a dot (.). This means + * the certificate will not match subdomains. If the connection is made using an IP address + * instead of a hostname, the IP address will be matched (without doing any DNS lookups). + * + * @param hostname Hostname or IP address of the server. + * @param session The SSL session. + * @return true if the certificate belongs to the server, false otherwise. + * @see PGjdbcHostnameVerifier + * @deprecated use PgjdbcHostnameVerifier + */ + @Deprecated + @Override + public boolean verify(String hostname, SSLSession session) { + if (!sslMode.verifyPeerName()) { + return true; + } + return PGjdbcHostnameVerifier.INSTANCE.verify(hostname, session); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_bg.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_bg.java new file mode 100644 index 0000000..5c70c93 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_bg.java @@ -0,0 +1,458 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_bg extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[890]; + t[0] = ""; + t[1] = "Project-Id-Version: JDBC Driver for PostgreSQL 8.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-12-28 00:01+0100\nLast-Translator: \nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Bulgarian\nX-Poedit-Country: BULGARIA\n"; + t[2] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[3] = "CallableStatement функция бе обработена и изходния параметър {0} бе от тип {1}, обаче тип {2} бе използван."; + t[6] = "Too many update results were returned."; + t[7] = "Твърде много резултати бяха получени при актуализацията."; + t[10] = "There are no rows in this ResultSet."; + t[11] = "В този ResultSet няма редове."; + t[14] = "Detail: {0}"; + t[15] = "Подробност: {0}"; + t[20] = "Invalid fetch direction constant: {0}."; + t[21] = "Невалидна константа за fetch посоката: {0}."; + t[22] = "No function outputs were registered."; + t[23] = "Резултати от функцията не бяха регистрирани."; + t[24] = "The array index is out of range: {0}"; + t[25] = "Индексът на масив е извън обхвата: {0}"; + t[26] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[27] = "Тип на удостоверяване {0} не се поддържа. Проверете дали сте конфигурирали pg_hba.conf файла, да включва IP адреса на клиента или подмрежата, и че се използва схема за удостоверяване, поддържана от драйвъра."; + t[28] = "The server requested password-based authentication, but no password was provided."; + t[29] = "Сървърът изисква идентифициране с парола, но парола не бе въведена."; + t[40] = "Large Objects may not be used in auto-commit mode."; + t[41] = "Големи обекти LOB не могат да се използват в auto-commit модус."; + t[46] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[47] = "Операцията изисква резултатите да са scrollable, но този ResultSet е FORWARD_ONLY."; + t[48] = "Zero bytes may not occur in string parameters."; + t[49] = "Не може да има нула байта в низ параметрите."; + t[50] = "The JVM claims not to support the encoding: {0}"; + t[51] = "JVM не поддържа тази кодова таблица за момента: {0}"; + t[54] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; + t[55] = "Връзката не бе осъществена, поради вашите настройки за сигурност. Може би трябва да предоставите java.net.SocketPermission права на сървъра и порта с базата данни, към който искате да се свържете."; + t[62] = "Database connection failed when canceling copy operation"; + t[63] = "Неосъществена връзка към базата данни при прекъсване на копирането"; + t[78] = "Error loading default settings from driverconfig.properties"; + t[79] = "Грешка при зареждане на настройките по подразбиране от файла driverconfig.properties"; + t[82] = "Returning autogenerated keys is not supported."; + t[83] = "Автоматично генерирани ключове не се поддържат."; + t[92] = "Unable to find name datatype in the system catalogs."; + t[93] = "Не може да се намери името на типа данни в системните каталози."; + t[94] = "Tried to read from inactive copy"; + t[95] = "Опит за четене при неактивно копиране"; + t[96] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[97] = "ResultSet не може да се обновява. Заявката генерираща този резултат трябва да селектира само една таблица, както и всички първични ключове в нея. За повече информация, вижте раздел 5.6 на JDBC 2.1 API Specification."; + t[98] = "Cannot cast an instance of {0} to type {1}"; + t[99] = "Не може да преобразува инстанция на {0} към тип {1}"; + t[102] = "Requested CopyOut but got {0}"; + t[103] = "Зададено CopyOut но получено {0}"; + t[106] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; + t[107] = "Невъзможна комбинация: Prepare трябва да бъде издадено чрез използване на същата връзка, при която е започната транзакцията. currentXid={0}, prepare xid={1}"; + t[108] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[109] = "Не може да се употребяват методи за заявка, които ползват низове на PreparedStatement."; + t[114] = "Conversion of money failed."; + t[115] = "Неуспешно валутно преобразуване."; + t[118] = "Tried to obtain lock while already holding it"; + t[119] = "Опит за получаване на заключване/резервация докато вече е получено"; + t[120] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; + t[121] = "Този SQLXML обект не е инициализиран, така че не могат да се извличат данни от него."; + t[122] = "This SQLXML object has already been freed."; + t[123] = "Този SQLXML обект вече е освободен."; + t[124] = "Invalid stream length {0}."; + t[125] = "Невалидна дължина {0} на потока данни."; + t[130] = "Position: {0}"; + t[131] = "Позиция: {0}"; + t[132] = "The server does not support SSL."; + t[133] = "Сървърът не поддържа SSL."; + t[134] = "Got {0} error responses to single copy cancel request"; + t[135] = "Получени {0} отговори за грешка при единствено искане да се прекъсне копирането"; + t[136] = "DataSource has been closed."; + t[137] = "Източникът на данни е прекъснат."; + t[138] = "Unable to convert DOMResult SQLXML data to a string."; + t[139] = "Не може да преобразува DOMResult SQLXML данни в низ."; + t[144] = "Invalid UUID data."; + t[145] = "Невалидни UUID данни."; + t[148] = "The fastpath function {0} is unknown."; + t[149] = "Функцията {0} е неизвестна."; + t[154] = "Connection has been closed."; + t[155] = "Връзката бе прекъсната."; + t[156] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[157] = "Тази заявка не декларира изходен параметър. Ползвайте '{' ?= call ... '}' за да декларирате такъв."; + t[158] = "A connection could not be made using the requested protocol {0}."; + t[159] = "Не може да осъществи връзка, ползвайки искания протокол {0}."; + t[162] = "The maximum field size must be a value greater than or equal to 0."; + t[163] = "Максималният размер на полето трябва да бъде стойност по-голяма или равна на 0."; + t[166] = "GSS Authentication failed"; + t[167] = "GSS удостоверяването бе неуспешно"; + t[176] = "Unknown XML Result class: {0}"; + t[177] = "Неизвестен XML изходящ клас: {0}"; + t[180] = "Server SQLState: {0}"; + t[181] = "SQL статус на сървъра: {0}"; + t[182] = "Unknown Response Type {0}."; + t[183] = "Неизвестен тип на отговор {0}."; + t[186] = "Tried to cancel an inactive copy operation"; + t[187] = "Опит за прекъсване на неактивно копиране"; + t[190] = "This PooledConnection has already been closed."; + t[191] = "Тази PooledConnection връзка бе вече прекъсната."; + t[200] = "Multiple ResultSets were returned by the query."; + t[201] = "Заявката върна няколко ResultSets."; + t[202] = "Finalizing a Connection that was never closed:"; + t[203] = "Приключване на връзка, която не бе прекъсната:"; + t[204] = "Unsupported Types value: {0}"; + t[205] = "Неподдържана стойност за тип: {0}"; + t[206] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[207] = "CallableStatement функция бе декларирана, но обработена като registerOutParameter(1, ) "; + t[208] = "Cannot retrieve the name of an unnamed savepoint."; + t[209] = "Не може да определи името на неупомената savepoint."; + t[220] = "Cannot change transaction read-only property in the middle of a transaction."; + t[221] = "Не може да променяте правата на транзакцията по време на нейното извършване."; + t[222] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[223] = "Прекалено голяма дължина {0} на съобщението. Това може да е причинено от прекалено голяма или неправилно зададена дължина на InputStream параметри."; + t[224] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[225] = "Параметърният индекс е извън обхват: {0}, брой параметри: {1}."; + t[226] = "Transaction isolation level {0} not supported."; + t[227] = "Изолационно ниво на транзакциите {0} не се поддържа."; + t[234] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[235] = "Не може да се обнови ResultSet, когато се намираме преди началото или след края на резултатите."; + t[238] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[239] = "опита да извика end без съответстващо извикване на start. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[242] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; + t[243] = "Този SQLXML обект вече е инициализиран и не може да бъде променен."; + t[250] = "Conversion to type {0} failed: {1}."; + t[251] = "Неуспешно преобразуване към тип {0}: {1}."; + t[252] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[253] = "Класът SSLSocketFactory връща {0} и не може да бъде инстанцииран."; + t[254] = "Unable to create SAXResult for SQLXML."; + t[255] = "Не може да се създаде SAXResult за SQLXML."; + t[256] = "Interrupted while attempting to connect."; + t[257] = "Опита за осъществяване на връзка бе своевременно прекъснат. "; + t[260] = "Protocol error. Session setup failed."; + t[261] = "Грешка в протокола. Неуспешна настройка на сесията."; + t[264] = "Database connection failed when starting copy"; + t[265] = "Неосъществена връзка към базата данни при започване на копирането"; + t[272] = "Cannot call cancelRowUpdates() when on the insert row."; + t[273] = "Не може да се изпълни cancelRowUpdates() метода, когато се намираме при редицата на въвеждане."; + t[274] = "Unable to bind parameter values for statement."; + t[275] = "Не може да подготви параметрите на командата."; + t[280] = "A result was returned when none was expected."; + t[281] = "Бе получен резултат, когато такъв не бе очакван."; + t[282] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[283] = "Параметърът standard_conforming_strings при сървъра бе докладван като {0}. JDBC драйвъра очаква този параметър да бъде on или off."; + t[284] = "Unable to translate data into the desired encoding."; + t[285] = "Невъзможно преобразуване на данни в желаното кодиране."; + t[292] = "PostgreSQL LOBs can only index to: {0}"; + t[293] = "PostgreSQL индексира големи обекти LOB само до: {0}"; + t[294] = "Provided InputStream failed."; + t[295] = "Зададения InputStream поток е неуспешен."; + t[296] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[297] = "Транзакция в транзакция не се поддържа за момента. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[304] = "{0} function takes four and only four argument."; + t[305] = "Функцията {0} може да приеме четири и само четири аргумента."; + t[306] = "{0} function doesn''t take any argument."; + t[307] = "Функцията {0} не може да приема аргументи."; + t[310] = "Got CopyOutResponse from server during an active {0}"; + t[311] = "Получен CopyOutResponse отговор от сървъра при активно {0}"; + t[322] = "No value specified for parameter {0}."; + t[323] = "Няма стойност, определена за параметър {0}."; + t[324] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[325] = "Невалидна UTF-8 последователност: първоначален байт е {0}: {1}"; + t[326] = "Error disabling autocommit"; + t[327] = "Грешка при изключване на autocommit"; + t[328] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[329] = "Невалидна UTF-8 последователност: байта {0} от байтова последователност {1} не е 10xxxxxx: {2}"; + t[330] = "Received CommandComplete ''{0}'' without an active copy operation"; + t[331] = "Получено командно допълнение ''{0}'' без активна команда за копиране"; + t[332] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[333] = "Невалидна UTF-8 последователност: крайната стойност е извън стойностните граници: {0}"; + t[336] = "Cannot change transaction isolation level in the middle of a transaction."; + t[337] = "Не може да променяте изолационното ниво на транзакцията по време на нейното извършване."; + t[340] = "An unexpected result was returned by a query."; + t[341] = "Заявката върна неочакван резултат."; + t[346] = "Conversion of interval failed"; + t[347] = "Неуспешно преобразуване на интервал"; + t[350] = "This ResultSet is closed."; + t[351] = "Операциите по този ResultSet са били прекратени."; + t[352] = "Read from copy failed."; + t[353] = "Четене от копието неуспешно."; + t[354] = "Unable to load the class {0} responsible for the datatype {1}"; + t[355] = "Невъзможно е зареждането на клас {0}, отговарящ за типа данни {1}"; + t[356] = "Failed to convert binary xml data to encoding: {0}."; + t[357] = "Неуспешно преобразуване на двоични XML данни за кодиране съгласно: {0}."; + t[362] = "Connection attempt timed out."; + t[363] = "Времето за осъществяване на връзката изтече (таймаут)."; + t[364] = "Expected command status BEGIN, got {0}."; + t[365] = "Очаквана команда BEGIN, получена {0}."; + t[372] = "This copy stream is closed."; + t[373] = "Потока за копиране на данните е затворен."; + t[376] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[377] = "Не може да се определи SQL тип, който да се използва за инстанцията на {0}. Ползвайте метода setObject() с точни стойности, за да определите типа."; + t[378] = "Can''t refresh the insert row."; + t[379] = "Не може да обнови въведения ред."; + t[382] = "You must specify at least one column value to insert a row."; + t[383] = "Трябва да посочите поне една стойност за колона, за да вмъкнете ред."; + t[388] = "Connection is busy with another transaction"; + t[389] = "Връзката е заета с друга транзакция"; + t[392] = "Bad value for type {0} : {1}"; + t[393] = "Невалидна стойност за тип {0} : {1}"; + t[396] = "This statement has been closed."; + t[397] = "Командата е извършена."; + t[404] = "No primary key found for table {0}."; + t[405] = "Няма първичен ключ за таблица {0}."; + t[406] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[407] = "В момента се намираме преди края на ResultSet. Тук не може да се изпълни deleteRow() метода."; + t[414] = "{0} function takes two or three arguments."; + t[415] = "Функцията {0} може да приеме два или три аргумента."; + t[416] = "{0} function takes three and only three arguments."; + t[417] = "Функцията {0} може да приеме три и само три аргумента."; + t[418] = "Unable to find server array type for provided name {0}."; + t[419] = "Не може да се намери типа на сървърен масив за зададеното име {0}."; + t[420] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[421] = "Извикване на {0} - няма резултати и а бе очаквано цяло число."; + t[426] = "Database connection failed when ending copy"; + t[427] = "Неосъществена връзка към базата данни при завършване на копирането"; + t[428] = "Cannot write to copy a byte of value {0}"; + t[429] = "Няма пишещи права, за да копира байтова стойност {0}"; + t[430] = "Results cannot be retrieved from a CallableStatement before it is executed."; + t[431] = "Резултати от CallableStatement функция не могат да бъдат получени, преди тя да бъде обработена."; + t[432] = "Cannot reference a savepoint after it has been released."; + t[433] = "Не може да референцира savepoint, след като е била освободена."; + t[434] = "Failed to create object for: {0}."; + t[435] = "Неуспешно създаване на обект за: {0}."; + t[438] = "Unexpected packet type during copy: {0}"; + t[439] = "Неочакван тип пакет при копиране: {0}"; + t[442] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[443] = "Невъзможно е да се определи стойността за MaxIndexKeys поради липса на системния каталог с данни."; + t[444] = "Tried to end inactive copy"; + t[445] = "Опит за прекъсване на неактивно копиране"; + t[450] = "Unexpected copydata from server for {0}"; + t[451] = "Неочаквано CopyData от сървъра за {0}"; + t[460] = "Zero bytes may not occur in identifiers."; + t[461] = "Не може да има нула байта в идентификаторите."; + t[462] = "Error during one-phase commit. commit xid={0}"; + t[463] = "Грешка при едно-фазов commit. commit xid={0}"; + t[464] = "Ran out of memory retrieving query results."; + t[465] = "Недостатъчна памет при представяна на резултатите от заявката."; + t[468] = "Unable to create StAXResult for SQLXML"; + t[469] = "Не може да се създаде StAXResult за SQLXML."; + t[470] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[471] = "Местоположение: Файл: {0}, Функция: {1}, Ред: {2}"; + t[482] = "A CallableStatement was executed with an invalid number of parameters"; + t[483] = "CallableStatement функция бе обработена, но с непозволен брой параметри."; + t[486] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[487] = "Невалидна UTF-8 последователност: {0} байта използвани за кодирането на {1} байтова стойност: {2}"; + t[496] = "Interrupted while waiting to obtain lock on database connection"; + t[497] = "Прекъсване при чакане да получи заключване/резервация при връзка към базата данни"; + t[502] = "LOB positioning offsets start at 1."; + t[503] = "Позиционалният офсет при големи обекти LOB започва от 1."; + t[506] = "Returning autogenerated keys by column index is not supported."; + t[507] = "Автоматично генерирани ключове спрямо индекс на колона не се поддържат."; + t[510] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[511] = "В момента се намираме в началото на ResultSet. Тук не може да се изпълни deleteRow() метода."; + t[524] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[525] = "Скъсяване на големи обекти LOB е осъществено само във версии след 8.3."; + t[526] = "Statement has been closed."; + t[527] = "Командата е завършена."; + t[540] = "Database connection failed when writing to copy"; + t[541] = "Неосъществена връзка към базата данни при опит за копиране"; + t[544] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[545] = "Параметърът DateStyle при сървъра бе променен на {0}. JDBC драйвъра изисква DateStyle започва с ISO за да функционира правилно."; + t[546] = "Provided Reader failed."; + t[547] = "Грешка с ползвания четец."; + t[550] = "Not on the insert row."; + t[551] = "Не сме в редицата на въвеждане."; + t[566] = "Unable to decode xml data."; + t[567] = "Не може да декодира XML данните."; + t[570] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[571] = "Невъзможна комбинация: втората фаза на commit задължително трябва да бъде издадена при свободна връзка. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[596] = "Tried to write to an inactive copy operation"; + t[597] = "Опит за писане при неактивна операция за копиране"; + t[606] = "An error occurred while setting up the SSL connection."; + t[607] = "Възникна грешка при осъществяване на SSL връзката."; + t[614] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[615] = "Възникна неочаквана грешка с драйвъра. Моля докадвайте това изключение. "; + t[618] = "No results were returned by the query."; + t[619] = "Няма намерени резултати за заявката."; + t[620] = "ClientInfo property not supported."; + t[621] = "Информацията за ClientInfo не се поддържа."; + t[622] = "Unexpected error writing large object to database."; + t[623] = "Неочаквана грешка при записване на голям обект LOB в базата данни."; + t[628] = "The JVM claims not to support the {0} encoding."; + t[629] = "JVM не поддържа за момента {0} кодовата таблица."; + t[630] = "Unknown XML Source class: {0}"; + t[631] = "Неизвестен XML входящ клас: {0}"; + t[632] = "Interval {0} not yet implemented"; + t[633] = "Интервалът {0} не е валиден все още."; + t[636] = "commit called before end. commit xid={0}, state={1}"; + t[637] = "commit извикан преди end. commit xid={0}, state={1}"; + t[638] = "Tried to break lock on database connection"; + t[639] = "Опит за премахване на заключването/резервацията при връзка към базата данни"; + t[642] = "Missing expected error response to copy cancel request"; + t[643] = "Липсва очакван отговор при грешка да прекъсне копирането"; + t[644] = "Maximum number of rows must be a value grater than or equal to 0."; + t[645] = "Максималният брой редове трябва да бъде стойност по-голяма или равна на 0."; + t[652] = "Requested CopyIn but got {0}"; + t[653] = "Зададено CopyIn но получено {0}"; + t[656] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[657] = "Отчетен параметър от тип {0}, но обработено като get{1} (sqltype={2}). "; + t[662] = "Unsupported value for stringtype parameter: {0}"; + t[663] = "Непозволена стойност за StringType параметър: {0}"; + t[664] = "Fetch size must be a value greater to or equal to 0."; + t[665] = "Размера за fetch size трябва да бъде по-голям или равен на 0."; + t[670] = "Cannot tell if path is open or closed: {0}."; + t[671] = "Не може да определи дали адреса е отворен или затворен: {0}."; + t[672] = "Expected an EOF from server, got: {0}"; + t[673] = "Очакван край на файла от сървъра, но получено: {0}"; + t[680] = "Copying from database failed: {0}"; + t[681] = "Копирането от базата данни бе неуспешно: {0}"; + t[682] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[683] = "Връзката бе автоматично прекъсната, защото нова връзка за същата беше осъществена или PooledConnection връзката е вече прекъсната."; + t[698] = "Custom type maps are not supported."; + t[699] = "Специфични типови съответствия не се поддържат."; + t[700] = "xid must not be null"; + t[701] = "xid не може да бъде null"; + t[706] = "Internal Position: {0}"; + t[707] = "Вътрешна позиция: {0}"; + t[708] = "Error during recover"; + t[709] = "Грешка при възстановяване"; + t[712] = "Method {0} is not yet implemented."; + t[713] = "Методът {0} все още не е функционален."; + t[714] = "Unexpected command status: {0}."; + t[715] = "Неочакван статус на команда: {0}."; + t[718] = "The column index is out of range: {0}, number of columns: {1}."; + t[719] = "Индексът на колоната е извън стойностен обхват: {0}, брой колони: {1}."; + t[730] = "Unknown ResultSet holdability setting: {0}."; + t[731] = "Неизвестна ResultSet holdability настройка: {0}."; + t[734] = "Cannot call deleteRow() when on the insert row."; + t[735] = "Не може да се изпълни deleteRow() метода, когато се намираме при редицата на въвеждане."; + t[740] = "ResultSet not positioned properly, perhaps you need to call next."; + t[741] = "ResultSet не е референциран правилно. Вероятно трябва да придвижите курсора посредством next."; + t[742] = "wasNull cannot be call before fetching a result."; + t[743] = "wasNull не може да бьде изпълнен, преди наличието на резултата."; + t[746] = "{0} function takes two and only two arguments."; + t[747] = "Функцията {0} може да приеме два и само два аргумента."; + t[750] = "Malformed function or procedure escape syntax at offset {0}."; + t[751] = "Непозволен синтаксис на функция или процедура при офсет {0}."; + t[752] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[753] = "Преждевременен край на входящ поток на данни, очаквани {0} байта, но прочетени само {1}."; + t[756] = "Got CopyData without an active copy operation"; + t[757] = "Получено CopyData без наличие на активна операция за копиране"; + t[758] = "Cannot retrieve the id of a named savepoint."; + t[759] = "Не може да определи ID на спомената savepoint."; + t[770] = "Where: {0}"; + t[771] = "Където: {0}"; + t[778] = "Got CopyInResponse from server during an active {0}"; + t[779] = "Получен CopyInResponse отговор от сървъра при активно {0}"; + t[780] = "Cannot convert an instance of {0} to type {1}"; + t[781] = "Не може да преобразува инстанцията на {0} във вида {1}"; + t[784] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[785] = "Невъзможна комбинация: едно-фазов commit трябва да бъде издаден чрез използване на същата връзка, при която е започнал"; + t[790] = "Invalid flags {0}"; + t[791] = "Невалидни флагове {0}"; + t[798] = "Query timeout must be a value greater than or equals to 0."; + t[799] = "Времето за изпълнение на заявката трябва да бъде стойност по-голяма или равна на 0."; + t[802] = "Hint: {0}"; + t[803] = "Забележка: {0}"; + t[810] = "The array index is out of range: {0}, number of elements: {1}."; + t[811] = "Индексът на масив е извън обхвата: {0}, брой елементи: {1}."; + t[812] = "Internal Query: {0}"; + t[813] = "Вътрешна заявка: {0}"; + t[816] = "CommandComplete expected COPY but got: "; + t[817] = "Очаквано командно допълнение COPY но получено: "; + t[824] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[825] = "Невалидна UTF-8 последователност: крайната стойност е заместителна стойност: {0}"; + t[826] = "Unknown type {0}."; + t[827] = "Неизвестен тип {0}."; + t[828] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[829] = "ResultSets с concurrency CONCUR_READ_ONLY не могат да бъдат актуализирани."; + t[830] = "The connection attempt failed."; + t[831] = "Опита за връзка бе неуспешен."; + t[834] = "{0} function takes one and only one argument."; + t[835] = "Функцията {0} може да приеме само един единствен аргумент."; + t[838] = "suspend/resume not implemented"; + t[839] = "спиране / започване не се поддържа за момента"; + t[840] = "Error preparing transaction. prepare xid={0}"; + t[841] = "Грешка при подготвяне на транзакция. prepare xid={0}"; + t[842] = "The driver currently does not support COPY operations."; + t[843] = "За момента драйвъра не поддържа COPY команди."; + t[852] = "Heuristic commit/rollback not supported. forget xid={0}"; + t[853] = "Евристичен commit или rollback не се поддържа. forget xid={0}"; + t[856] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[857] = "Бяха намерени невалидни данни. Това най-вероятно се дължи на съхранявани данни, съдържащи символи, които са невалидни за набора от знаци при създаване на базата данни. Чест пример за това е съхраняване на 8bit данни в SQL_ASCII бази данни."; + t[858] = "Cannot establish a savepoint in auto-commit mode."; + t[859] = "Не може да се установи savepoint в auto-commit модус."; + t[862] = "The column name {0} was not found in this ResultSet."; + t[863] = "Името на колоната {0} не бе намерено в този ResultSet."; + t[864] = "Prepare called before end. prepare xid={0}, state={1}"; + t[865] = "Prepare извикано преди края. prepare xid={0}, state={1}"; + t[866] = "Unknown Types value."; + t[867] = "Стойност от неизвестен тип."; + t[870] = "Cannot call updateRow() when on the insert row."; + t[871] = "Не може да се изпълни updateRow() метода, когато се намираме при редицата на въвеждане."; + t[876] = "Database connection failed when reading from copy"; + t[877] = "Неосъществена връзка към базата данни при четене от копие"; + t[880] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[881] = "Грешка при възстановяване на състоянието преди подготвена транзакция. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[882] = "Can''t use relative move methods while on the insert row."; + t[883] = "Не може да се използват относителни методи за движение, когато се намираме при редицата на въвеждане."; + t[884] = "free() was called on this LOB previously"; + t[885] = "Функцията free() бе вече извикана за този голям обект LOB"; + t[888] = "A CallableStatement was executed with nothing returned."; + t[889] = "CallableStatement функция бе обработена, но няма резултати."; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 445) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 443) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 890) + idx -= 890; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + @Override + public Enumeration getKeys () { + return + new Enumeration<>() { + private int idx = 0; + { while (idx < 890 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 890); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; + while (idx < 890 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_cs.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_cs.java new file mode 100644 index 0000000..f617acf --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_cs.java @@ -0,0 +1,236 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_cs extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[314]; + t[0] = ""; + t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-08-21 20:00+0200\nLast-Translator: Petr Dittrich \nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; + t[2] = "A connection could not be made using the requested protocol {0}."; + t[3] = "Spojení nelze vytvořit s použitím žádaného protokolu {0}."; + t[4] = "Malformed function or procedure escape syntax at offset {0}."; + t[5] = "Poškozená funkce nebo opuštění procedury na pozici {0}."; + t[8] = "Cannot cast an instance of {0} to type {1}"; + t[9] = "Nemohu přetypovat instanci {0} na typ {1}"; + t[12] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[13] = "ResultSet není aktualizavatelný. Dotaz musí vybírat pouze z jedné tabulky a musí obsahovat všechny primární klíče tabulky. Koukni do JDBC 2.1 API Specifikace, sekce 5.6 pro více podrobností."; + t[14] = "The JVM claims not to support the {0} encoding."; + t[15] = "JVM tvrdí, že nepodporuje kodování {0}."; + t[16] = "An I/O error occurred while sending to the backend."; + t[17] = "Vystupně/výstupní chyba při odesílání k backend."; + t[18] = "Statement has been closed."; + t[19] = "Statement byl uzavřen."; + t[20] = "Unknown Types value."; + t[21] = "Neznámá hodnota typu."; + t[22] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[23] = "ResultSets se souběžností CONCUR_READ_ONLY nemůže být aktualizováno"; + t[26] = "You must specify at least one column value to insert a row."; + t[27] = "Musíte vyplnit alespoň jeden sloupec pro vložení řádku."; + t[32] = "No primary key found for table {0}."; + t[33] = "Nenalezen primární klíč pro tabulku {0}."; + t[34] = "Cannot establish a savepoint in auto-commit mode."; + t[35] = "Nemohu vytvořit savepoint v auto-commit modu."; + t[38] = "Can''t use relative move methods while on the insert row."; + t[39] = "Nemůžete používat relativní přesuny při vkládání řádku."; + t[44] = "The column name {0} was not found in this ResultSet."; + t[45] = "Sloupec pojmenovaný {0} nebyl nalezen v ResultSet."; + t[46] = "This statement has been closed."; + t[47] = "Příkaz byl uzavřen."; + t[48] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[49] = "Třída SSLSocketFactory poskytla {0} což nemůže být instancionizováno."; + t[50] = "Multiple ResultSets were returned by the query."; + t[51] = "Vícenásobný ResultSet byl vrácen dotazem."; + t[52] = "DataSource has been closed."; + t[53] = "DataSource byl uzavřen."; + t[56] = "Error loading default settings from driverconfig.properties"; + t[57] = "Chyba načítání standardního nastavení z driverconfig.properties"; + t[62] = "Bad value for type {0} : {1}"; + t[63] = "Špatná hodnota pro typ {0} : {1}"; + t[66] = "Method {0} is not yet implemented."; + t[67] = "Metoda {0} není implementována."; + t[68] = "The array index is out of range: {0}"; + t[69] = "Index pole mimo rozsah: {0}"; + t[70] = "Unexpected command status: {0}."; + t[71] = "Neočekávaný stav příkazu: {0}."; + t[74] = "Expected command status BEGIN, got {0}."; + t[75] = "Očekáván příkaz BEGIN, obdržen {0}."; + t[76] = "Cannot retrieve the id of a named savepoint."; + t[77] = "Nemohu získat id nepojmenovaného savepointu."; + t[78] = "Unexpected error writing large object to database."; + t[79] = "Neočekávaná chyba při zapisování velkého objektu do databáze."; + t[84] = "Not on the insert row."; + t[85] = "Ne na vkládaném řádku."; + t[86] = "Returning autogenerated keys is not supported."; + t[87] = "Vrácení automaticky generovaných klíčů není podporováno."; + t[88] = "The server requested password-based authentication, but no password was provided."; + t[89] = "Server vyžaduje ověření heslem, ale žádné nebylo posláno."; + t[98] = "Unable to load the class {0} responsible for the datatype {1}"; + t[99] = "Nemohu načíst třídu {0} odpovědnou za typ {1}"; + t[100] = "Invalid fetch direction constant: {0}."; + t[101] = "Špatný směr čtení: {0}."; + t[102] = "Conversion of money failed."; + t[103] = "Převod peněz selhal."; + t[104] = "Connection has been closed."; + t[105] = "Spojeni bylo uzavřeno."; + t[106] = "Cannot retrieve the name of an unnamed savepoint."; + t[107] = "Nemohu získat název nepojmenovaného savepointu."; + t[108] = "Large Objects may not be used in auto-commit mode."; + t[109] = "Velké objecky nemohou být použity v auto-commit modu."; + t[110] = "This ResultSet is closed."; + t[111] = "Tento ResultSet je uzavřený."; + t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[117] = "Něco neobvyklého přinutilo ovladač selhat. Prosím nahlaste tuto vyjímku."; + t[118] = "The server does not support SSL."; + t[119] = "Server nepodporuje SSL."; + t[120] = "Invalid stream length {0}."; + t[121] = "Vadná délka proudu {0}."; + t[126] = "The maximum field size must be a value greater than or equal to 0."; + t[127] = "Maximální velikost pole musí být nezáporné číslo."; + t[130] = "Cannot call updateRow() when on the insert row."; + t[131] = "Nemohu volat updateRow() na vlkádaném řádku."; + t[132] = "A CallableStatement was executed with nothing returned."; + t[133] = "CallableStatement byl spuštěn, leč nic nebylo vráceno."; + t[134] = "Provided Reader failed."; + t[135] = "Selhal poskytnutý Reader."; + t[146] = "Cannot call deleteRow() when on the insert row."; + t[147] = "Nemůžete volat deleteRow() při vkládání řádku."; + t[156] = "Where: {0}"; + t[157] = "Kde: {0}"; + t[158] = "An unexpected result was returned by a query."; + t[159] = "Obdržen neočekávaný výsledek dotazu."; + t[160] = "The connection attempt failed."; + t[161] = "Pokus o připojení selhal."; + t[162] = "Too many update results were returned."; + t[163] = "Bylo vráceno příliš mnoho výsledků aktualizací."; + t[164] = "Unknown type {0}."; + t[165] = "Neznámý typ {0}."; + t[166] = "{0} function takes two and only two arguments."; + t[167] = "Funkce {0} bere právě dva argumenty."; + t[168] = "{0} function doesn''t take any argument."; + t[169] = "Funkce {0} nebere žádný argument."; + t[172] = "Unable to find name datatype in the system catalogs."; + t[173] = "Nemohu najít název typu v systémovém katalogu."; + t[174] = "Protocol error. Session setup failed."; + t[175] = "Chyba protokolu. Nastavení relace selhalo."; + t[176] = "{0} function takes one and only one argument."; + t[177] = "Funkce {0} bere jeden argument."; + t[186] = "The driver currently does not support COPY operations."; + t[187] = "Ovladač nyní nepodporuje příkaz COPY."; + t[190] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[191] = "Nalezena vada ve znakových datech. Toto může být způsobeno uloženými daty obsahujícími znaky, které jsou závadné pro znakovou sadu nastavenou při zakládání databáze. Nejznámejší příklad je ukládání 8bitových dat vSQL_ASCII databázi."; + t[196] = "Fetch size must be a value greater to or equal to 0."; + t[197] = "Nabraná velikost musí být nezáporná."; + t[204] = "Unsupported Types value: {0}"; + t[205] = "Nepodporovaná hodnota typu: {0}"; + t[206] = "Can''t refresh the insert row."; + t[207] = "Nemohu obnovit vkládaný řádek."; + t[210] = "Maximum number of rows must be a value grater than or equal to 0."; + t[211] = "Maximální počet řádek musí být nezáporné číslo."; + t[216] = "No value specified for parameter {0}."; + t[217] = "Nespecifikována hodnota parametru {0}."; + t[218] = "The array index is out of range: {0}, number of elements: {1}."; + t[219] = "Index pole mimo rozsah: {0}, počet prvků: {1}."; + t[220] = "Provided InputStream failed."; + t[221] = "Selhal poskytnutý InputStream."; + t[228] = "Cannot reference a savepoint after it has been released."; + t[229] = "Nemohu získat odkaz na savepoint, když byl uvolněn."; + t[232] = "An error occurred while setting up the SSL connection."; + t[233] = "Nastala chyba při nastavení SSL spojení."; + t[246] = "Detail: {0}"; + t[247] = "Detail: {0}"; + t[248] = "This PooledConnection has already been closed."; + t[249] = "Tento PooledConnection byl uzavřen."; + t[250] = "A result was returned when none was expected."; + t[251] = "Obdržen výsledek, ikdyž žádný nebyl očekáván."; + t[254] = "The JVM claims not to support the encoding: {0}"; + t[255] = "JVM tvrdí, že nepodporuje kodování: {0}"; + t[256] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[257] = "Index parametru mimo rozsah: {0}, počet parametrů {1}."; + t[258] = "LOB positioning offsets start at 1."; + t[259] = "Začátek pozicování LOB začína na 1."; + t[260] = "{0} function takes two or three arguments."; + t[261] = "Funkce {0} bere dva nebo tři argumenty."; + t[262] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[263] = "Právě jste za pozicí konce ResultSetu. Zde nemůžete volat deleteRow().s"; + t[266] = "Server SQLState: {0}"; + t[267] = "Server SQLState: {0}"; + t[270] = "{0} function takes four and only four argument."; + t[271] = "Funkce {0} bere přesně čtyři argumenty."; + t[272] = "Failed to create object for: {0}."; + t[273] = "Selhalo vytvoření objektu: {0}."; + t[274] = "No results were returned by the query."; + t[275] = "Neobdržen žádný výsledek dotazu."; + t[276] = "Position: {0}"; + t[277] = "Pozice: {0}"; + t[278] = "The column index is out of range: {0}, number of columns: {1}."; + t[279] = "Index sloupece je mimo rozsah: {0}, počet sloupců: {1}."; + t[280] = "Unknown Response Type {0}."; + t[281] = "Neznámý typ odpovědi {0}."; + t[284] = "Hint: {0}"; + t[285] = "Rada: {0}"; + t[286] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[287] = "Poloha: Soubor: {0}, Rutina: {1}, Řádek: {2}"; + t[288] = "Query timeout must be a value greater than or equals to 0."; + t[289] = "Časový limit dotazu musí být nezáporné číslo."; + t[292] = "Unable to translate data into the desired encoding."; + t[293] = "Nemohu přeložit data do požadovaného kódování."; + t[296] = "Cannot call cancelRowUpdates() when on the insert row."; + t[297] = "Nemůžete volat cancelRowUpdates() při vkládání řádku."; + t[298] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[299] = "Ověření typu {0} není podporováno. Zkontrolujte zda konfigurační soubor pg_hba.conf obsahuje klientskou IP adresu či podsíť a zda je použité ověřenovací schéma podporováno ovladačem."; + t[308] = "There are no rows in this ResultSet."; + t[309] = "Žádný řádek v ResultSet."; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 157) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 155) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 314) + idx -= 314; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return + new Enumeration<>() { + private int idx = 0; + { while (idx < 314 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 314); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 314 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_de.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_de.java new file mode 100644 index 0000000..c268e85 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_de.java @@ -0,0 +1,343 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_de extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[794]; + t[0] = ""; + t[1] = "Project-Id-Version: head-de\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-09-12 14:22+0200\nLast-Translator: Andre Bialojahn \nLanguage-Team: Deutsch\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.0.2\nX-Poedit-Language: German\nX-Poedit-Country: GERMANY\n"; + t[4] = "DataSource has been closed."; + t[5] = "Die Datenquelle wurde geschlossen."; + t[18] = "Where: {0}"; + t[19] = "Wobei: {0}"; + t[26] = "The connection attempt failed."; + t[27] = "Der Verbindungsversuch schlug fehl."; + t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[29] = "Die augenblickliche Position ist hinter dem Ende des ResultSets. Dort kann ''deleteRow()'' nicht aufgerufen werden."; + t[36] = "Multiple ResultSets were returned by the query."; + t[37] = "Die Abfrage ergab mehrere ResultSets."; + t[50] = "Too many update results were returned."; + t[51] = "Zu viele Updateergebnisse wurden zurückgegeben."; + t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[59] = "Ungültige UTF-8-Sequenz: das erste Byte ist {0}: {1}"; + t[66] = "The column name {0} was not found in this ResultSet."; + t[67] = "Der Spaltenname {0} wurde in diesem ResultSet nicht gefunden."; + t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[71] = "Der Fastpath-Aufruf {0} gab kein Ergebnis zurück, jedoch wurde ein Integer erwartet."; + t[74] = "Protocol error. Session setup failed."; + t[75] = "Protokollfehler. Die Sitzung konnte nicht gestartet werden."; + t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[77] = "Ein CallableStatement wurde deklariert, aber kein Aufruf von ''registerOutParameter(1, )'' erfolgte."; + t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[79] = "ResultSets, deren Zugriffsart CONCUR_READ_ONLY ist, können nicht aktualisiert werden."; + t[90] = "LOB positioning offsets start at 1."; + t[91] = "Positionsoffsets für LOBs beginnen bei 1."; + t[92] = "Internal Position: {0}"; + t[93] = "Interne Position: {0}"; + t[96] = "free() was called on this LOB previously"; + t[97] = "free() wurde bereits für dieses LOB aufgerufen."; + t[100] = "Cannot change transaction read-only property in the middle of a transaction."; + t[101] = "Die Nur-Lesen-Eigenschaft einer Transaktion kann nicht während der Transaktion verändert werden."; + t[102] = "The JVM claims not to support the {0} encoding."; + t[103] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen."; + t[108] = "{0} function doesn''t take any argument."; + t[109] = "Die {0}-Funktion akzeptiert kein Argument."; + t[112] = "xid must not be null"; + t[113] = "Die xid darf nicht null sein."; + t[114] = "Connection has been closed."; + t[115] = "Die Verbindung wurde geschlossen."; + t[122] = "The server does not support SSL."; + t[123] = "Der Server unterstützt SSL nicht."; + t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[141] = "Ungültige UTF-8-Sequenz: Byte {0} der {1} Bytesequenz ist nicht 10xxxxxx: {2}"; + t[148] = "Hint: {0}"; + t[149] = "Hinweis: {0}"; + t[152] = "Unable to find name datatype in the system catalogs."; + t[153] = "In den Systemkatalogen konnte der Namensdatentyp nicht gefunden werden."; + t[156] = "Unsupported Types value: {0}"; + t[157] = "Unbekannter Typ: {0}."; + t[158] = "Unknown type {0}."; + t[159] = "Unbekannter Typ {0}."; + t[166] = "{0} function takes two and only two arguments."; + t[167] = "Die {0}-Funktion erwartet genau zwei Argumente."; + t[170] = "Finalizing a Connection that was never closed:"; + t[171] = "Eine Connection wurde finalisiert, die nie geschlossen wurde:"; + t[180] = "The maximum field size must be a value greater than or equal to 0."; + t[181] = "Die maximale Feldgröße muss ein Wert größer oder gleich Null sein."; + t[186] = "PostgreSQL LOBs can only index to: {0}"; + t[187] = "LOBs in PostgreSQL können nur auf {0} verweisen."; + t[194] = "Method {0} is not yet implemented."; + t[195] = "Die Methode {0} ist noch nicht implementiert."; + t[198] = "Error loading default settings from driverconfig.properties"; + t[199] = "Fehler beim Laden der Voreinstellungen aus driverconfig.properties"; + t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; + t[201] = "Ergebnisse können nicht von einem CallableStatement abgerufen werden, bevor es ausgeführt wurde."; + t[202] = "Large Objects may not be used in auto-commit mode."; + t[203] = "LargeObjects (LOB) dürfen im Modus ''auto-commit'' nicht verwendet werden."; + t[208] = "Expected command status BEGIN, got {0}."; + t[209] = "Statt des erwarteten Befehlsstatus BEGIN, wurde {0} empfangen."; + t[218] = "Invalid fetch direction constant: {0}."; + t[219] = "Unzulässige Richtungskonstante bei fetch: {0}."; + t[222] = "{0} function takes three and only three arguments."; + t[223] = "Die {0}-Funktion erwartet genau drei Argumente."; + t[226] = "Error during recover"; + t[227] = "Beim Wiederherstellen trat ein Fehler auf."; + t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[229] = "Das ResultSet kann nicht aktualisiert werden, da es entweder vor oder nach dem Ende der Ergebnisse ist."; + t[230] = "The JVM claims not to support the encoding: {0}"; + t[231] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen."; + t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[233] = "Ein Parameter des Typs {0} wurde registriert, jedoch erfolgte ein Aufruf get{1} (sqltype={2})."; + t[240] = "Cannot establish a savepoint in auto-commit mode."; + t[241] = "Ein Rettungspunkt kann im Modus ''auto-commit'' nicht erstellt werden."; + t[242] = "Cannot retrieve the id of a named savepoint."; + t[243] = "Die ID eines benamten Rettungspunktes kann nicht ermittelt werden."; + t[244] = "The column index is out of range: {0}, number of columns: {1}."; + t[245] = "Der Spaltenindex {0} ist außerhalb des gültigen Bereichs. Anzahl Spalten: {1}."; + t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[251] = "Etwas Ungewöhnliches ist passiert, das den Treiber fehlschlagen ließ. Bitte teilen Sie diesen Fehler mit."; + t[260] = "Cannot cast an instance of {0} to type {1}"; + t[261] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich."; + t[264] = "Unknown Types value."; + t[265] = "Unbekannter Typ."; + t[266] = "Invalid stream length {0}."; + t[267] = "Ungültige Länge des Datenstroms: {0}."; + t[272] = "Cannot retrieve the name of an unnamed savepoint."; + t[273] = "Der Name eines namenlosen Rettungpunktes kann nicht ermittelt werden."; + t[274] = "Unable to translate data into the desired encoding."; + t[275] = "Die Daten konnten nicht in die gewünschte Kodierung gewandelt werden."; + t[276] = "Expected an EOF from server, got: {0}"; + t[277] = "Vom Server wurde ein EOF erwartet, jedoch {0} gelesen."; + t[278] = "Bad value for type {0} : {1}"; + t[279] = "Unzulässiger Wert für den Typ {0} : {1}."; + t[280] = "The server requested password-based authentication, but no password was provided."; + t[281] = "Der Server verlangt passwortbasierte Authentifizierung, jedoch wurde kein Passwort angegeben."; + t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[297] = "Das Abschneiden großer Objekte ist nur in Versionen nach 8.3 implementiert."; + t[298] = "This PooledConnection has already been closed."; + t[299] = "Diese PooledConnection ist bereits geschlossen worden."; + t[302] = "ClientInfo property not supported."; + t[303] = "Die ClientInfo-Eigenschaft ist nicht unterstützt."; + t[306] = "Fetch size must be a value greater to or equal to 0."; + t[307] = "Die Fetch-Größe muss ein Wert größer oder gleich Null sein."; + t[312] = "A connection could not be made using the requested protocol {0}."; + t[313] = "Es konnte keine Verbindung unter Verwendung des Protokolls {0} hergestellt werden."; + t[322] = "There are no rows in this ResultSet."; + t[323] = "Es gibt keine Zeilen in diesem ResultSet."; + t[324] = "Unexpected command status: {0}."; + t[325] = "Unerwarteter Befehlsstatus: {0}."; + t[334] = "Not on the insert row."; + t[335] = "Nicht in der Einfügezeile."; + t[344] = "Server SQLState: {0}"; + t[345] = "Server SQLState: {0}"; + t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[349] = "Der standard_conforming_strings Parameter des Servers steht auf {0}. Der JDBC-Treiber erwartete on oder off."; + t[360] = "The driver currently does not support COPY operations."; + t[361] = "Der Treiber unterstützt derzeit keine COPY-Operationen."; + t[364] = "The array index is out of range: {0}, number of elements: {1}."; + t[365] = "Der Arrayindex {0} ist außerhalb des gültigen Bereichs. Vorhandene Elemente: {1}."; + t[374] = "suspend/resume not implemented"; + t[375] = "Anhalten/Fortsetzen ist nicht implementiert."; + t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[379] = "Nicht implementiert: Die einphasige Bestätigung muss über die selbe Verbindung abgewickelt werden, die verwendet wurde, um sie zu beginnen."; + t[398] = "Cannot call cancelRowUpdates() when on the insert row."; + t[399] = "''cancelRowUpdates()'' kann in der Einfügezeile nicht aufgerufen werden."; + t[400] = "Cannot reference a savepoint after it has been released."; + t[401] = "Ein Rettungspunkt kann nicht angesprochen werden, nach dem er entfernt wurde."; + t[402] = "You must specify at least one column value to insert a row."; + t[403] = "Sie müssen mindestens einen Spaltenwert angeben, um eine Zeile einzufügen."; + t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[405] = "Es konnte kein Wert für MaxIndexKeys gefunden werden, da die Systemkatalogdaten fehlen."; + t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[413] = "Ungültige UTF-8-Sequenz: Der letzte Wert ist außerhalb des zulässigen Bereichs: {0}"; + t[414] = "{0} function takes two or three arguments."; + t[415] = "Die {0}-Funktion erwartet zwei oder drei Argumente."; + t[440] = "Unexpected error writing large object to database."; + t[441] = "Beim Schreiben eines LargeObjects (LOB) in die Datenbank trat ein unerwarteter Fehler auf."; + t[442] = "Zero bytes may not occur in string parameters."; + t[443] = "Stringparameter dürfen keine Nullbytes enthalten."; + t[444] = "A result was returned when none was expected."; + t[445] = "Die Anweisung lieferte ein Ergebnis obwohl keines erwartet wurde."; + t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[451] = "Das ResultSet kann nicht aktualisiert werden. Die Abfrage, die es erzeugte, darf nur eine Tabelle und muss darin alle Primärschlüssel auswählen. Siehe JDBC 2.1 API-Spezifikation, Abschnitt 5.6 für mehr Details."; + t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[455] = "Die Nachrichtenlänge {0} ist zu groß. Das kann von sehr großen oder inkorrekten Längenangaben eines InputStream-Parameters herrühren."; + t[460] = "Statement has been closed."; + t[461] = "Die Anweisung wurde geschlossen."; + t[462] = "No value specified for parameter {0}."; + t[463] = "Für den Parameter {0} wurde kein Wert angegeben."; + t[468] = "The array index is out of range: {0}"; + t[469] = "Der Arrayindex ist außerhalb des gültigen Bereichs: {0}."; + t[474] = "Unable to bind parameter values for statement."; + t[475] = "Der Anweisung konnten keine Parameterwerte zugewiesen werden."; + t[476] = "Can''t refresh the insert row."; + t[477] = "Die Einfügezeile kann nicht aufgefrischt werden."; + t[480] = "No primary key found for table {0}."; + t[481] = "Für die Tabelle {0} konnte kein Primärschlüssel gefunden werden."; + t[482] = "Cannot change transaction isolation level in the middle of a transaction."; + t[483] = "Die Transaktions-Trennungsstufe kann nicht während einer Transaktion verändert werden."; + t[498] = "Provided InputStream failed."; + t[499] = "Der bereitgestellte InputStream scheiterte."; + t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[501] = "Der Parameterindex {0} ist außerhalb des gültigen Bereichs. Es gibt {1} Parameter."; + t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[503] = "Der Parameter ''Date Style'' wurde auf dem Server auf {0} verändert. Der JDBC-Treiber setzt für korrekte Funktion voraus, dass ''Date Style'' mit ''ISO'' beginnt."; + t[508] = "Connection attempt timed out."; + t[509] = "Keine Verbindung innerhalb des Zeitintervalls möglich."; + t[512] = "Internal Query: {0}"; + t[513] = "Interne Abfrage: {0}"; + t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[519] = "Der Authentifizierungstyp {0} wird nicht unterstützt. Stellen Sie sicher, dass die Datei ''pg_hba.conf'' die IP-Adresse oder das Subnetz des Clients enthält und dass der Client ein Authentifizierungsschema nutzt, das vom Treiber unterstützt wird."; + t[526] = "Interval {0} not yet implemented"; + t[527] = "Intervall {0} ist noch nicht implementiert."; + t[532] = "Conversion of interval failed"; + t[533] = "Die Umwandlung eines Intervalls schlug fehl."; + t[540] = "Query timeout must be a value greater than or equals to 0."; + t[541] = "Das Abfragetimeout muss ein Wert größer oder gleich Null sein."; + t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[543] = "Die Verbindung wurde automatisch geschlossen, da entweder eine neue Verbindung für die gleiche PooledConnection geöffnet wurde, oder die PooledConnection geschlossen worden ist.."; + t[544] = "ResultSet not positioned properly, perhaps you need to call next."; + t[545] = "Das ResultSet ist nicht richtig positioniert. Eventuell muss ''next'' aufgerufen werden."; + t[550] = "This statement has been closed."; + t[551] = "Die Anweisung wurde geschlossen."; + t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[553] = "Der in SQL für eine Instanz von {0} zu verwendende Datentyp kann nicht abgeleitet werden. Benutzen Sie ''setObject()'' mit einem expliziten Typ, um ihn festzulegen."; + t[554] = "Cannot call updateRow() when on the insert row."; + t[555] = "''updateRow()'' kann in der Einfügezeile nicht aufgerufen werden."; + t[562] = "Detail: {0}"; + t[563] = "Detail: {0}"; + t[566] = "Cannot call deleteRow() when on the insert row."; + t[567] = "''deleteRow()'' kann in der Einfügezeile nicht aufgerufen werden."; + t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[569] = "Die augenblickliche Position ist vor dem Beginn des ResultSets. Dort kann ''deleteRow()'' nicht aufgerufen werden."; + t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[577] = "Ungültige UTF-8-Sequenz: der letzte Wert ist ein Ersatzwert: {0}"; + t[578] = "Unknown Response Type {0}."; + t[579] = "Die Antwort weist einen unbekannten Typ auf: {0}."; + t[582] = "Unsupported value for stringtype parameter: {0}"; + t[583] = "Nichtunterstützter Wert für den Stringparameter: {0}"; + t[584] = "Conversion to type {0} failed: {1}."; + t[585] = "Die Umwandlung in den Typ {0} schlug fehl: {1}."; + t[586] = "Conversion of money failed."; + t[587] = "Die Umwandlung eines Währungsbetrags schlug fehl."; + t[600] = "Unable to load the class {0} responsible for the datatype {1}"; + t[601] = "Die für den Datentyp {1} verantwortliche Klasse {0} konnte nicht geladen werden."; + t[604] = "The fastpath function {0} is unknown."; + t[605] = "Die Fastpath-Funktion {0} ist unbekannt."; + t[608] = "Malformed function or procedure escape syntax at offset {0}."; + t[609] = "Unzulässige Syntax für ein Funktions- oder Prozedur-Escape an Offset {0}."; + t[612] = "Provided Reader failed."; + t[613] = "Der bereitgestellte Reader scheiterte."; + t[614] = "Maximum number of rows must be a value grater than or equal to 0."; + t[615] = "Die maximale Zeilenzahl muss ein Wert größer oder gleich Null sein."; + t[616] = "Failed to create object for: {0}."; + t[617] = "Erstellung des Objektes schlug fehl für: {0}."; + t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[623] = "Vorzeitiges Ende des Eingabedatenstroms. Es wurden {0} Bytes erwartet, jedoch nur {1} gelesen."; + t[626] = "An unexpected result was returned by a query."; + t[627] = "Eine Abfrage lieferte ein unerwartetes Resultat."; + t[646] = "An error occurred while setting up the SSL connection."; + t[647] = "Beim Aufbau der SSL-Verbindung trat ein Fehler auf."; + t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[655] = "Ungültige UTF-8-Sequenz: {0} Bytes wurden verwendet um einen {1} Bytewert zu kodieren: {2}"; + t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[659] = "Die von {0} bereitgestellte SSLSocketFactory-Klasse konnte nicht instanziiert werden."; + t[670] = "Position: {0}"; + t[671] = "Position: {0}"; + t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[677] = "Ort: Datei: {0}, Routine: {1}, Zeile: {2}."; + t[684] = "Cannot tell if path is open or closed: {0}."; + t[685] = "Es konnte nicht ermittelt werden, ob der Pfad offen oder geschlossen ist: {0}."; + t[700] = "Cannot convert an instance of {0} to type {1}"; + t[701] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich."; + t[710] = "{0} function takes four and only four argument."; + t[711] = "Die {0}-Funktion erwartet genau vier Argumente."; + t[718] = "Interrupted while attempting to connect."; + t[719] = "Beim Verbindungsversuch trat eine Unterbrechung auf."; + t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; + t[723] = "Ihre Sicherheitsrichtlinie hat den Versuch des Verbindungsaufbaus verhindert. Sie müssen wahrscheinlich der Verbindung zum Datenbankrechner java.net.SocketPermission gewähren, um den Rechner auf dem gewählten Port zu erreichen."; + t[736] = "{0} function takes one and only one argument."; + t[737] = "Die {0}-Funktion erwartet nur genau ein Argument."; + t[744] = "This ResultSet is closed."; + t[745] = "Dieses ResultSet ist geschlossen."; + t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[747] = "Ungültige Zeichendaten. Das ist höchstwahrscheinlich von in der Datenbank gespeicherten Zeichen hervorgerufen, die in einer anderen Kodierung vorliegen, als die, in der die Datenbank erstellt wurde. Das häufigste Beispiel dafür ist es, 8Bit-Daten in SQL_ASCII-Datenbanken abzulegen."; + t[752] = "Error disabling autocommit"; + t[753] = "Fehler beim Abschalten von Autocommit."; + t[754] = "Ran out of memory retrieving query results."; + t[755] = "Nicht genügend Speicher beim Abholen der Abfrageergebnisse."; + t[756] = "Returning autogenerated keys is not supported."; + t[757] = "Die Rückgabe automatisch generierter Schlüssel wird nicht unterstützt,"; + t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[761] = "Die Operation erfordert ein scrollbares ResultSet, dieses jedoch ist FORWARD_ONLY."; + t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[763] = "Eine CallableStatement-Funktion wurde ausgeführt und der Rückgabewert {0} war vom Typ {1}. Jedoch wurde der Typ {2} dafür registriert."; + t[768] = "Unknown ResultSet holdability setting: {0}."; + t[769] = "Unbekannte Einstellung für die Haltbarkeit des ResultSets: {0}."; + t[772] = "Transaction isolation level {0} not supported."; + t[773] = "Die Transaktions-Trennungsstufe {0} ist nicht unterstützt."; + t[774] = "Zero bytes may not occur in identifiers."; + t[775] = "Nullbytes dürfen in Bezeichnern nicht vorkommen."; + t[776] = "No results were returned by the query."; + t[777] = "Die Abfrage lieferte kein Ergebnis."; + t[778] = "A CallableStatement was executed with nothing returned."; + t[779] = "Ein CallableStatement wurde ausgeführt ohne etwas zurückzugeben."; + t[780] = "wasNull cannot be call before fetching a result."; + t[781] = "wasNull kann nicht aufgerufen werden, bevor ein Ergebnis abgefragt wurde."; + t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[787] = "Diese Anweisung deklariert keinen OUT-Parameter. Benutzen Sie '{' ?= call ... '}' um das zu tun."; + t[788] = "Can''t use relative move methods while on the insert row."; + t[789] = "Relative Bewegungen können in der Einfügezeile nicht durchgeführt werden."; + t[790] = "A CallableStatement was executed with an invalid number of parameters"; + t[791] = "Ein CallableStatement wurde mit einer falschen Anzahl Parameter ausgeführt."; + t[792] = "Connection is busy with another transaction"; + t[793] = "Die Verbindung ist derzeit mit einer anderen Transaktion beschäftigt."; + table = t; + } + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 397) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 395) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 794) + idx -= 794; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + public Enumeration getKeys () { + return + new Enumeration<>() { + private int idx = 0; + { while (idx < 794 && table[idx] == null) idx += 2; } + + public boolean hasMoreElements () { + return (idx < 794); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 794 && table[idx] == null); + return key.toString(); + } + }; + } + public java.util.ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_es.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_es.java new file mode 100644 index 0000000..eb28e05 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_es.java @@ -0,0 +1,93 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_es extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[74]; + t[0] = ""; + t[1] = "Project-Id-Version: JDBC PostgreSQL Driver\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-22 16:51-0300\nLast-Translator: Diego Gil \nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Spanish\n"; + t[4] = "The column index is out of range: {0}, number of columns: {1}."; + t[5] = "El índice de la columna está fuera de rango: {0}, número de columnas: {1}."; + t[12] = "Unknown Response Type {0}."; + t[13] = "Tipo de respuesta desconocida {0}."; + t[16] = "Protocol error. Session setup failed."; + t[17] = "Error de protocolo. Falló el inicio de la sesión."; + t[20] = "The server requested password-based authentication, but no password was provided."; + t[21] = "El servidor requiere autenticación basada en contraseña, pero no se ha provisto ninguna contraseña."; + t[26] = "A result was returned when none was expected."; + t[27] = "Se retornó un resultado cuando no se esperaba ninguno."; + t[28] = "Server SQLState: {0}"; + t[29] = "SQLState del servidor: {0}."; + t[30] = "The array index is out of range: {0}, number of elements: {1}."; + t[31] = "El índice del arreglo esta fuera de rango: {0}, número de elementos: {1}."; + t[32] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[33] = "Final prematuro del flujo de entrada, se esperaban {0} bytes, pero solo se leyeron {1}."; + t[36] = "The connection attempt failed."; + t[37] = "El intento de conexión falló."; + t[38] = "Failed to create object for: {0}."; + t[39] = "Fallo al crear objeto: {0}."; + t[42] = "An error occurred while setting up the SSL connection."; + t[43] = "Ha ocorrido un error mientras se establecía la conexión SSL."; + t[48] = "No value specified for parameter {0}."; + t[49] = "No se ha especificado un valor para el parámetro {0}."; + t[50] = "The server does not support SSL."; + t[51] = "Este servidor no soporta SSL."; + t[52] = "An unexpected result was returned by a query."; + t[53] = "Una consulta retornó un resultado inesperado."; + t[60] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[61] = "Algo inusual ha ocurrido que provocó un fallo en el controlador. Por favor reporte esta excepción."; + t[64] = "No results were returned by the query."; + t[65] = "La consulta no retornó ningún resultado."; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 37) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 35) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 74) + idx -= 74; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 74 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 74); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 74 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_fr.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_fr.java new file mode 100644 index 0000000..a9e5e63 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_fr.java @@ -0,0 +1,347 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_fr extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[794]; + t[0] = ""; + t[1] = "Project-Id-Version: head-fr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2007-07-27 12:27+0200\nLast-Translator: \nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.11.4\nPlural-Forms: nplurals=2; plural=(n > 1);\n"; + t[4] = "DataSource has been closed."; + t[5] = "DataSource a été fermée."; + t[18] = "Where: {0}"; + t[19] = "Où : {0}"; + t[26] = "The connection attempt failed."; + t[27] = "La tentative de connexion a échoué."; + t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[29] = "Actuellement positionné après la fin du ResultSet. Vous ne pouvez pas appeler deleteRow() ici."; + t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[33] = "Impossible d''utiliser les fonctions de requête qui utilisent une chaîne de caractères sur un PreparedStatement."; + t[36] = "Multiple ResultSets were returned by the query."; + t[37] = "Plusieurs ResultSets ont été retournés par la requête."; + t[50] = "Too many update results were returned."; + t[51] = "Trop de résultats de mise à jour ont été retournés."; + t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[59] = "Séquence UTF-8 illégale: le premier octet est {0}: {1}"; + t[66] = "The column name {0} was not found in this ResultSet."; + t[67] = "Le nom de colonne {0} n''a pas été trouvé dans ce ResultSet."; + t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[71] = "Appel Fastpath {0} - Aucun résultat n''a été retourné et nous attendions un entier."; + t[74] = "Protocol error. Session setup failed."; + t[75] = "Erreur de protocole. Ouverture de la session en échec."; + t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[77] = "Un CallableStatement a été déclaré, mais aucun appel à registerOutParameter(1, ) n''a été fait."; + t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[79] = "Les ResultSets avec la concurrence CONCUR_READ_ONLY ne peuvent être mis à jour."; + t[90] = "LOB positioning offsets start at 1."; + t[91] = "Les décalages de position des LOB commencent à 1."; + t[92] = "Internal Position: {0}"; + t[93] = "Position interne : {0}"; + t[96] = "free() was called on this LOB previously"; + t[97] = "free() a été appelée auparavant sur ce LOB"; + t[100] = "Cannot change transaction read-only property in the middle of a transaction."; + t[101] = "Impossible de changer la propriété read-only d''une transaction au milieu d''une transaction."; + t[102] = "The JVM claims not to support the {0} encoding."; + t[103] = "La JVM prétend ne pas supporter l''encodage {0}."; + t[108] = "{0} function doesn''t take any argument."; + t[109] = "La fonction {0} n''accepte aucun argument."; + t[112] = "xid must not be null"; + t[113] = "xid ne doit pas être nul"; + t[114] = "Connection has been closed."; + t[115] = "La connexion a été fermée."; + t[122] = "The server does not support SSL."; + t[123] = "Le serveur ne supporte pas SSL."; + t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[141] = "Séquence UTF-8 illégale: l''octet {0} de la séquence d''octet {1} n''est pas 10xxxxxx: {2}"; + t[148] = "Hint: {0}"; + t[149] = "Indice : {0}"; + t[152] = "Unable to find name datatype in the system catalogs."; + t[153] = "Incapable de trouver le type de donnée name dans les catalogues systèmes."; + t[156] = "Unsupported Types value: {0}"; + t[157] = "Valeur de type non supportée : {0}"; + t[158] = "Unknown type {0}."; + t[159] = "Type inconnu : {0}."; + t[166] = "{0} function takes two and only two arguments."; + t[167] = "La fonction {0} n''accepte que deux et seulement deux arguments."; + t[170] = "Finalizing a Connection that was never closed:"; + t[171] = "Destruction d''une connection qui n''a jamais été fermée:"; + t[180] = "The maximum field size must be a value greater than or equal to 0."; + t[181] = "La taille maximum des champs doit être une valeur supérieure ou égale à 0."; + t[186] = "PostgreSQL LOBs can only index to: {0}"; + t[187] = "Les LOB PostgreSQL peuvent seulement s''indicer à: {0}"; + t[194] = "Method {0} is not yet implemented."; + t[195] = "La fonction {0} n''est pas encore implémentée."; + t[198] = "Error loading default settings from driverconfig.properties"; + t[199] = "Erreur de chargement des valeurs par défaut depuis driverconfig.properties"; + t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; + t[201] = "Les résultats ne peuvent être récupérés à partir d''un CallableStatement avant qu''il ne soit exécuté."; + t[202] = "Large Objects may not be used in auto-commit mode."; + t[203] = "Les Large Objects ne devraient pas être utilisés en mode auto-commit."; + t[208] = "Expected command status BEGIN, got {0}."; + t[209] = "Attendait le statut de commande BEGIN, obtenu {0}."; + t[218] = "Invalid fetch direction constant: {0}."; + t[219] = "Constante de direction pour la récupération invalide : {0}."; + t[222] = "{0} function takes three and only three arguments."; + t[223] = "La fonction {0} n''accepte que trois et seulement trois arguments."; + t[226] = "Error during recover"; + t[227] = "Erreur durant la restauration"; + t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[229] = "Impossible de mettre à jour le ResultSet car c''est soit avant le début ou après la fin des résultats."; + t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[233] = "Un paramètre de type {0} a été enregistré, mais un appel à get{1} (sqltype={2}) a été fait."; + t[240] = "Cannot establish a savepoint in auto-commit mode."; + t[241] = "Impossible d''établir un savepoint en mode auto-commit."; + t[242] = "Cannot retrieve the id of a named savepoint."; + t[243] = "Impossible de retrouver l''identifiant d''un savepoint nommé."; + t[244] = "The column index is out of range: {0}, number of columns: {1}."; + t[245] = "L''indice de la colonne est hors limite : {0}, nombre de colonnes : {1}."; + t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[251] = "Quelque chose d''inhabituel a provoqué l''échec du pilote. Veuillez faire un rapport sur cette erreur."; + t[260] = "Cannot cast an instance of {0} to type {1}"; + t[261] = "Impossible de convertir une instance de {0} vers le type {1}"; + t[264] = "Unknown Types value."; + t[265] = "Valeur de Types inconnue."; + t[266] = "Invalid stream length {0}."; + t[267] = "Longueur de flux invalide {0}."; + t[272] = "Cannot retrieve the name of an unnamed savepoint."; + t[273] = "Impossible de retrouver le nom d''un savepoint sans nom."; + t[274] = "Unable to translate data into the desired encoding."; + t[275] = "Impossible de traduire les données dans l''encodage désiré."; + t[276] = "Expected an EOF from server, got: {0}"; + t[277] = "Attendait une fin de fichier du serveur, reçu: {0}"; + t[278] = "Bad value for type {0} : {1}"; + t[279] = "Mauvaise valeur pour le type {0} : {1}"; + t[280] = "The server requested password-based authentication, but no password was provided."; + t[281] = "Le serveur a demandé une authentification par mots de passe, mais aucun mot de passe n''a été fourni."; + t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[297] = "Le troncage des large objects n''est implémenté que dans les serveurs 8.3 et supérieurs."; + t[298] = "This PooledConnection has already been closed."; + t[299] = "Cette PooledConnection a déjà été fermée."; + t[306] = "Fetch size must be a value greater to or equal to 0."; + t[307] = "Fetch size doit être une valeur supérieur ou égal à 0."; + t[312] = "A connection could not be made using the requested protocol {0}."; + t[313] = "Aucune connexion n''a pu être établie en utilisant le protocole demandé {0}. "; + t[322] = "There are no rows in this ResultSet."; + t[323] = "Il n''y pas pas de lignes dans ce ResultSet."; + t[324] = "Unexpected command status: {0}."; + t[325] = "Statut de commande inattendu : {0}."; + t[334] = "Not on the insert row."; + t[335] = "Pas sur la ligne en insertion."; + t[344] = "Server SQLState: {0}"; + t[345] = "SQLState serveur : {0}"; + t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[349] = "Le paramètre serveur standard_conforming_strings a pour valeur {0}. Le driver JDBC attend on ou off."; + t[360] = "The driver currently does not support COPY operations."; + t[361] = "Le pilote ne supporte pas actuellement les opérations COPY."; + t[364] = "The array index is out of range: {0}, number of elements: {1}."; + t[365] = "L''indice du tableau est hors limites : {0}, nombre d''éléments : {1}."; + t[374] = "suspend/resume not implemented"; + t[375] = "suspend/resume pas implémenté"; + t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[379] = "Pas implémenté: le commit à une phase doit avoir lieu en utilisant la même connection que celle où il a commencé"; + t[398] = "Cannot call cancelRowUpdates() when on the insert row."; + t[399] = "Impossible d''appeler cancelRowUpdates() pendant l''insertion d''une ligne."; + t[400] = "Cannot reference a savepoint after it has been released."; + t[401] = "Impossible de référencer un savepoint après qu''il ait été libéré."; + t[402] = "You must specify at least one column value to insert a row."; + t[403] = "Vous devez spécifier au moins une valeur de colonne pour insérer une ligne."; + t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[405] = "Incapable de déterminer la valeur de MaxIndexKeys en raison de données manquante dans lecatalogue système."; + t[412] = "The JVM claims not to support the encoding: {0}"; + t[413] = "La JVM prétend ne pas supporter l''encodage: {0}"; + t[414] = "{0} function takes two or three arguments."; + t[415] = "La fonction {0} n''accepte que deux ou trois arguments."; + t[440] = "Unexpected error writing large object to database."; + t[441] = "Erreur inattendue pendant l''écriture de large object dans la base."; + t[442] = "Zero bytes may not occur in string parameters."; + t[443] = "Zéro octets ne devrait pas se produire dans les paramètres de type chaîne de caractères."; + t[444] = "A result was returned when none was expected."; + t[445] = "Un résultat a été retourné alors qu''aucun n''était attendu."; + t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[451] = "Le ResultSet n''est pas modifiable. La requête qui a généré ce résultat doit sélectionner seulement une table, et doit sélectionner toutes les clés primaires de cette table. Voir la spécification de l''API JDBC 2.1, section 5.6 pour plus de détails."; + t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[455] = "La longueur du message de liaison {0} est trop grande. Cela peut être causé par des spécification de longueur très grandes ou incorrectes pour les paramètres de type InputStream."; + t[460] = "Statement has been closed."; + t[461] = "Statement a été fermé."; + t[462] = "No value specified for parameter {0}."; + t[463] = "Pas de valeur spécifiée pour le paramètre {0}."; + t[468] = "The array index is out of range: {0}"; + t[469] = "L''indice du tableau est hors limites : {0}"; + t[474] = "Unable to bind parameter values for statement."; + t[475] = "Incapable de lier les valeurs des paramètres pour la commande."; + t[476] = "Can''t refresh the insert row."; + t[477] = "Impossible de rafraîchir la ligne insérée."; + t[480] = "No primary key found for table {0}."; + t[481] = "Pas de clé primaire trouvée pour la table {0}."; + t[482] = "Cannot change transaction isolation level in the middle of a transaction."; + t[483] = "Impossible de changer le niveau d''isolation des transactions au milieu d''une transaction."; + t[498] = "Provided InputStream failed."; + t[499] = "L''InputStream fourni a échoué."; + t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[501] = "L''indice du paramètre est hors limites : {0}, nombre de paramètres : {1}."; + t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[503] = "Le paramètre DateStyle du serveur a été changé pour {0}. Le pilote JDBC nécessite que DateStyle commence par ISO pour un fonctionnement correct."; + t[508] = "Connection attempt timed out."; + t[509] = "La tentative de connexion a échoué dans le délai imparti."; + t[512] = "Internal Query: {0}"; + t[513] = "Requête interne: {0}"; + t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[519] = "Le type d''authentification {0} n''est pas supporté. Vérifiez que vous avez configuré le fichier pg_hba.conf pour inclure l''adresse IP du client ou le sous-réseau et qu''il utilise un schéma d''authentification supporté par le pilote."; + t[526] = "Interval {0} not yet implemented"; + t[527] = "L''interval {0} n''est pas encore implémenté"; + t[532] = "Conversion of interval failed"; + t[533] = "La conversion de l''intervalle a échoué"; + t[540] = "Query timeout must be a value greater than or equals to 0."; + t[541] = "Query timeout doit être une valeur supérieure ou égale à 0."; + t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[543] = "La connexion a été fermée automatiquement car une nouvelle connexion a été ouverte pour la même PooledConnection ou la PooledConnection a été fermée."; + t[544] = "ResultSet not positioned properly, perhaps you need to call next."; + t[545] = "Le ResultSet n''est pas positionné correctement, vous devez peut-être appeler next()."; + t[550] = "This statement has been closed."; + t[551] = "Ce statement a été fermé."; + t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[553] = "Impossible de déduire le type SQL à utiliser pour une instance de {0}. Utilisez setObject() avec une valeur de type explicite pour spécifier le type à utiliser."; + t[554] = "Cannot call updateRow() when on the insert row."; + t[555] = "Impossible d''appeler updateRow() tant que l''on est sur la ligne insérée."; + t[562] = "Detail: {0}"; + t[563] = "Détail : {0}"; + t[566] = "Cannot call deleteRow() when on the insert row."; + t[567] = "Impossible d''appeler deleteRow() pendant l''insertion d''une ligne."; + t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[569] = "Actuellement positionné avant le début du ResultSet. Vous ne pouvez pas appeler deleteRow() ici."; + t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[577] = "Séquence UTF-8 illégale: la valeur finale est une valeur de remplacement: {0}"; + t[578] = "Unknown Response Type {0}."; + t[579] = "Type de réponse inconnu {0}."; + t[582] = "Unsupported value for stringtype parameter: {0}"; + t[583] = "Valeur non supportée pour les paramètre de type chaîne de caractères : {0}"; + t[584] = "Conversion to type {0} failed: {1}."; + t[585] = "La conversion vers le type {0} a échoué : {1}."; + t[586] = "Conversion of money failed."; + t[587] = "La conversion de money a échoué."; + t[600] = "Unable to load the class {0} responsible for the datatype {1}"; + t[601] = "Incapable de charger la classe {0} responsable du type de données {1}"; + t[604] = "The fastpath function {0} is unknown."; + t[605] = "La fonction fastpath {0} est inconnue."; + t[608] = "Malformed function or procedure escape syntax at offset {0}."; + t[609] = "Syntaxe de fonction ou d''échappement de procédure malformée à l''indice {0}."; + t[612] = "Provided Reader failed."; + t[613] = "Le Reader fourni a échoué."; + t[614] = "Maximum number of rows must be a value grater than or equal to 0."; + t[615] = "Le nombre maximum de lignes doit être une valeur supérieure ou égale à 0."; + t[616] = "Failed to create object for: {0}."; + t[617] = "Échec à la création de l''objet pour : {0}."; + t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[623] = "Fin prématurée du flux en entrée, {0} octets attendus, mais seulement {1} lus."; + t[626] = "An unexpected result was returned by a query."; + t[627] = "Un résultat inattendu a été retourné par une requête."; + t[646] = "An error occurred while setting up the SSL connection."; + t[647] = "Une erreur s''est produite pendant l''établissement de la connexion SSL."; + t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[655] = "Séquence UTF-8 illégale: {0} octets utilisé pour encoder une valeur à {1} octets: {2}"; + t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[659] = "La classe SSLSocketFactory fournie {0} n''a pas pu être instanciée."; + t[670] = "Position: {0}"; + t[671] = "Position : {0}"; + t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[677] = "Localisation : Fichier : {0}, Routine : {1}, Ligne : {2}"; + t[684] = "Cannot tell if path is open or closed: {0}."; + t[685] = "Impossible de dire si path est fermé ou ouvert : {0}."; + t[700] = "Cannot convert an instance of {0} to type {1}"; + t[701] = "Impossible de convertir une instance de type {0} vers le type {1}"; + t[710] = "{0} function takes four and only four argument."; + t[711] = "La fonction {0} n''accepte que quatre et seulement quatre arguments."; + t[718] = "Interrupted while attempting to connect."; + t[719] = "Interrompu pendant l''établissement de la connexion."; + t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[723] = "Séquence UTF-8 illégale: la valeur finale est en dehors des limites: {0}"; + t[734] = "No function outputs were registered."; + t[735] = "Aucune fonction outputs n''a été enregistrée."; + t[736] = "{0} function takes one and only one argument."; + t[737] = "La fonction {0} n''accepte qu''un et un seul argument."; + t[744] = "This ResultSet is closed."; + t[745] = "Ce ResultSet est fermé."; + t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[747] = "Des données de caractères invalides ont été trouvées. C''est probablement causé par le stockage de caractères invalides pour le jeu de caractères de création de la base. L''exemple le plus courant est le stockage de données 8bit dans une base SQL_ASCII."; + t[750] = "An I/O error occurred while sending to the backend."; + t[751] = "Une erreur d''entrée/sortie a eu lieu lors d''envoi vers le serveur."; + t[752] = "Error disabling autocommit"; + t[753] = "Erreur en désactivant autocommit"; + t[754] = "Ran out of memory retrieving query results."; + t[755] = "Ai manqué de mémoire en récupérant les résultats de la requête."; + t[756] = "Returning autogenerated keys is not supported."; + t[757] = "Le renvoi des clés automatiquement générées n''est pas supporté."; + t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[761] = "L''opération nécessite un scrollable ResultSet, mais ce ResultSet est FORWARD_ONLY."; + t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[763] = "Une fonction CallableStatement a été exécutée et le paramètre en sortie {0} était du type {1} alors que le type {2} était prévu."; + t[768] = "Unknown ResultSet holdability setting: {0}."; + t[769] = "Paramètre holdability du ResultSet inconnu : {0}."; + t[772] = "Transaction isolation level {0} not supported."; + t[773] = "Le niveau d''isolation de transaction {0} n''est pas supporté."; + t[774] = "Zero bytes may not occur in identifiers."; + t[775] = "Des octects à 0 ne devraient pas apparaître dans les identifiants."; + t[776] = "No results were returned by the query."; + t[777] = "Aucun résultat retourné par la requête."; + t[778] = "A CallableStatement was executed with nothing returned."; + t[779] = "Un CallableStatement a été exécuté mais n''a rien retourné."; + t[780] = "wasNull cannot be call before fetching a result."; + t[781] = "wasNull ne peut pas être appelé avant la récupération d''un résultat."; + t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[787] = "Cette requête ne déclare pas de paramètre OUT. Utilisez '{' ?= call ... '}' pour en déclarer un."; + t[788] = "Can''t use relative move methods while on the insert row."; + t[789] = "Impossible d''utiliser les fonctions de déplacement relatif pendant l''insertion d''une ligne."; + t[792] = "Connection is busy with another transaction"; + t[793] = "La connection est occupée avec une autre transaction"; + table = t; + } + + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 397) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 395) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 794) + idx -= 794; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return + new Enumeration<>() { + private int idx = 0; + { while (idx < 794 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 794); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 794 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_it.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_it.java new file mode 100644 index 0000000..4498eb8 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_it.java @@ -0,0 +1,333 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_it extends ResourceBundle { + private static final String[] table; + static { + java.lang.String[] t = new java.lang.String[794]; + t[0] = ""; + t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.2\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2006-06-23 17:25+0200\nLast-Translator: Giuseppe Sacco \nLanguage-Team: Italian \nLanguage: it\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; + t[4] = "DataSource has been closed."; + t[5] = "Questo «DataSource» è stato chiuso."; + t[18] = "Where: {0}"; + t[19] = "Dove: {0}"; + t[26] = "The connection attempt failed."; + t[27] = "Il tentativo di connessione è fallito."; + t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[29] = "La posizione attuale è successiva alla fine del ResultSet. Non è possibile invocare «deleteRow()» qui."; + t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[33] = "Non si possono utilizzare i metodi \"query\" che hanno come argomento una stringa nel caso di «PreparedStatement»."; + t[36] = "Multiple ResultSets were returned by the query."; + t[37] = "La query ha restituito «ResultSet» multipli."; + t[50] = "Too many update results were returned."; + t[51] = "Sono stati restituiti troppi aggiornamenti."; + t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[59] = "Sequenza UTF-8 illegale: il byte iniziale è {0}: {1}"; + t[66] = "The column name {0} was not found in this ResultSet."; + t[67] = "Colonna denominata «{0}» non è presente in questo «ResultSet»."; + t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[71] = "Chiamata Fastpath «{0}»: Nessun risultato restituito mentre ci si aspettava un intero."; + t[74] = "Protocol error. Session setup failed."; + t[75] = "Errore di protocollo. Impostazione della sessione fallita."; + t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[77] = "È stato definito un «CallableStatement» ma non è stato invocato il metodo «registerOutParameter(1, )»."; + t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[79] = "I «ResultSet» in modalità CONCUR_READ_ONLY non possono essere aggiornati."; + t[90] = "LOB positioning offsets start at 1."; + t[91] = "L''offset per la posizione dei LOB comincia da 1."; + t[92] = "Internal Position: {0}"; + t[93] = "Posizione interna: {0}"; + t[100] = "Cannot change transaction read-only property in the middle of a transaction."; + t[101] = "Non è possibile modificare la proprietà «read-only» delle transazioni nel mezzo di una transazione."; + t[102] = "The JVM claims not to support the {0} encoding."; + t[103] = "La JVM sostiene di non supportare la codifica {0}."; + t[108] = "{0} function doesn''t take any argument."; + t[109] = "Il metodo «{0}» non accetta argomenti."; + t[112] = "xid must not be null"; + t[113] = "xid non può essere NULL"; + t[114] = "Connection has been closed."; + t[115] = "Questo «Connection» è stato chiuso."; + t[122] = "The server does not support SSL."; + t[123] = "Il server non supporta SSL."; + t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[141] = "Sequenza UTF-8 illegale: il byte {0} di una sequenza di {1} byte non è 10xxxxxx: {2}"; + t[148] = "Hint: {0}"; + t[149] = "Suggerimento: {0}"; + t[152] = "Unable to find name datatype in the system catalogs."; + t[153] = "Non è possibile trovare il datatype «name» nel catalogo di sistema."; + t[156] = "Unsupported Types value: {0}"; + t[157] = "Valore di tipo «{0}» non supportato."; + t[158] = "Unknown type {0}."; + t[159] = "Tipo sconosciuto {0}."; + t[166] = "{0} function takes two and only two arguments."; + t[167] = "Il metodo «{0}» accetta due e solo due argomenti."; + t[170] = "Finalizing a Connection that was never closed:"; + t[171] = "Finalizzazione di una «Connection» che non è stata chiusa."; + t[186] = "PostgreSQL LOBs can only index to: {0}"; + t[187] = "Il massimo valore per l''indice dei LOB di PostgreSQL è {0}. "; + t[194] = "Method {0} is not yet implemented."; + t[195] = "Il metodo «{0}» non è stato ancora implementato."; + t[198] = "Error loading default settings from driverconfig.properties"; + t[199] = "Si è verificato un errore caricando le impostazioni predefinite da «driverconfig.properties»."; + t[202] = "Large Objects may not be used in auto-commit mode."; + t[203] = "Non è possibile impostare i «Large Object» in modalità «auto-commit»."; + t[208] = "Expected command status BEGIN, got {0}."; + t[209] = "Lo stato del comando avrebbe dovuto essere BEGIN, mentre invece è {0}."; + t[218] = "Invalid fetch direction constant: {0}."; + t[219] = "Costante per la direzione dell''estrazione non valida: {0}."; + t[222] = "{0} function takes three and only three arguments."; + t[223] = "Il metodo «{0}» accetta tre e solo tre argomenti."; + t[226] = "Error during recover"; + t[227] = "Errore durante il ripristino"; + t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[229] = "Non è possibile aggiornare il «ResultSet» perché la posizione attuale è precedente all''inizio o successiva alla file dei risultati."; + t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[233] = "È stato definito il parametro di tipo «{0}», ma poi è stato invocato il metodo «get{1}()» (sqltype={2})."; + t[240] = "Cannot establish a savepoint in auto-commit mode."; + t[241] = "Non è possibile impostare i punti di ripristino in modalità «auto-commit»."; + t[242] = "Cannot retrieve the id of a named savepoint."; + t[243] = "Non è possibile trovare l''id del punto di ripristino indicato."; + t[244] = "The column index is out of range: {0}, number of columns: {1}."; + t[245] = "Indice di colonna, {0}, è maggiore del numero di colonne {1}."; + t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[251] = "Qualcosa di insolito si è verificato causando il fallimento del driver. Per favore riferire all''autore del driver questa eccezione."; + t[260] = "Cannot cast an instance of {0} to type {1}"; + t[261] = "Non è possibile fare il cast di una istanza di «{0}» al tipo «{1}»."; + t[264] = "Unknown Types value."; + t[265] = "Valore di tipo sconosciuto."; + t[266] = "Invalid stream length {0}."; + t[267] = "La dimensione specificata, {0}, per lo «stream» non è valida."; + t[272] = "Cannot retrieve the name of an unnamed savepoint."; + t[273] = "Non è possibile trovare il nome di un punto di ripristino anonimo."; + t[274] = "Unable to translate data into the desired encoding."; + t[275] = "Impossibile tradurre i dati nella codifica richiesta."; + t[276] = "Expected an EOF from server, got: {0}"; + t[277] = "Ricevuto dal server «{0}» mentre era atteso un EOF"; + t[278] = "Bad value for type {0} : {1}"; + t[279] = "Il valore «{1}» non è adeguato al tipo «{0}»."; + t[280] = "The server requested password-based authentication, but no password was provided."; + t[281] = "Il server ha richiesto l''autenticazione con password, ma tale password non è stata fornita."; + t[298] = "This PooledConnection has already been closed."; + t[299] = "Questo «PooledConnection» è stato chiuso."; + t[306] = "Fetch size must be a value greater to or equal to 0."; + t[307] = "La dimensione dell''area di «fetch» deve essere maggiore o eguale a 0."; + t[312] = "A connection could not be made using the requested protocol {0}."; + t[313] = "Non è stato possibile attivare la connessione utilizzando il protocollo richiesto {0}."; + t[322] = "There are no rows in this ResultSet."; + t[323] = "Non ci sono righe in questo «ResultSet»."; + t[324] = "Unexpected command status: {0}."; + t[325] = "Stato del comando non previsto: {0}."; + t[334] = "Not on the insert row."; + t[335] = "Non si è in una nuova riga."; + t[344] = "Server SQLState: {0}"; + t[345] = "SQLState del server: {0}"; + t[360] = "The driver currently does not support COPY operations."; + t[361] = "Il driver non supporta al momento l''operazione «COPY»."; + t[364] = "The array index is out of range: {0}, number of elements: {1}."; + t[365] = "L''indice dell''array è fuori intervallo: {0}, numero di elementi: {1}."; + t[374] = "suspend/resume not implemented"; + t[375] = "«suspend»/«resume» non implementato"; + t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[379] = "Non implementato: il commit \"one-phase\" deve essere invocato sulla stessa connessione che ha iniziato la transazione."; + t[398] = "Cannot call cancelRowUpdates() when on the insert row."; + t[399] = "Non è possibile invocare «cancelRowUpdates()» durante l''inserimento di una riga."; + t[400] = "Cannot reference a savepoint after it has been released."; + t[401] = "Non è possibile utilizzare un punto di ripristino successivamente al suo rilascio."; + t[402] = "You must specify at least one column value to insert a row."; + t[403] = "Per inserire un record si deve specificare almeno il valore di una colonna."; + t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[405] = "Non è possibile trovare il valore di «MaxIndexKeys» nel catalogo si sistema."; + t[412] = "The JVM claims not to support the encoding: {0}"; + t[413] = "La JVM sostiene di non supportare la codifica: {0}."; + t[414] = "{0} function takes two or three arguments."; + t[415] = "Il metodo «{0}» accetta due o tre argomenti."; + t[440] = "Unexpected error writing large object to database."; + t[441] = "Errore inatteso inviando un «large object» al database."; + t[442] = "Zero bytes may not occur in string parameters."; + t[443] = "Byte con valore zero non possono essere contenuti nei parametri stringa."; + t[444] = "A result was returned when none was expected."; + t[445] = "È stato restituito un valore nonostante non ne fosse atteso nessuno."; + t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[451] = "Il «ResultSet» non è aggiornabile. La query che lo genera deve selezionare una sola tabella e deve selezionarne tutti i campi che ne compongono la chiave primaria. Si vedano le specifiche dell''API JDBC 2.1, sezione 5.6, per ulteriori dettagli."; + t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[455] = "Il messaggio di «bind» è troppo lungo ({0}). Questo può essere causato da una dimensione eccessiva o non corretta dei parametri dell''«InputStream»."; + t[460] = "Statement has been closed."; + t[461] = "Questo «Statement» è stato chiuso."; + t[462] = "No value specified for parameter {0}."; + t[463] = "Nessun valore specificato come parametro {0}."; + t[468] = "The array index is out of range: {0}"; + t[469] = "Indice di colonna fuori dall''intervallo ammissibile: {0}"; + t[474] = "Unable to bind parameter values for statement."; + t[475] = "Impossibile fare il «bind» dei valori passati come parametri per lo statement."; + t[476] = "Can''t refresh the insert row."; + t[477] = "Non è possibile aggiornare la riga in inserimento."; + t[480] = "No primary key found for table {0}."; + t[481] = "Non è stata trovata la chiave primaria della tabella «{0}»."; + t[482] = "Cannot change transaction isolation level in the middle of a transaction."; + t[483] = "Non è possibile cambiare il livello di isolamento delle transazioni nel mezzo di una transazione."; + t[498] = "Provided InputStream failed."; + t[499] = "L''«InputStream» fornito è fallito."; + t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[501] = "Il parametro indice è fuori intervallo: {0}, numero di elementi: {1}."; + t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[503] = "Il parametro del server «DateStyle» è stato cambiato in {0}. Il driver JDBC richiede che «DateStyle» cominci con «ISO» per un corretto funzionamento."; + t[508] = "Connection attempt timed out."; + t[509] = "Il tentativo di connessione è scaduto."; + t[512] = "Internal Query: {0}"; + t[513] = "Query interna: {0}"; + t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[519] = "L''autenticazione di tipo {0} non è supportata. Verificare che nel file di configurazione pg_hba.conf sia presente l''indirizzo IP o la sottorete del client, e che lo schema di autenticazione utilizzato sia supportato dal driver."; + t[526] = "Interval {0} not yet implemented"; + t[527] = "L''intervallo «{0}» non è stato ancora implementato."; + t[532] = "Conversion of interval failed"; + t[533] = "Fallita la conversione di un «interval»."; + t[540] = "Query timeout must be a value greater than or equals to 0."; + t[541] = "Il timeout relativo alle query deve essere maggiore o eguale a 0."; + t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[543] = "La «Connection» è stata chiusa automaticamente perché una nuova l''ha sostituita nello stesso «PooledConnection», oppure il «PooledConnection» è stato chiuso."; + t[544] = "ResultSet not positioned properly, perhaps you need to call next."; + t[545] = "Il «ResultSet» non è correttamente posizionato; forse è necessario invocare «next()»."; + t[550] = "This statement has been closed."; + t[551] = "Questo statement è stato chiuso."; + t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[553] = "Non è possibile identificare il tipo SQL da usare per l''istanza di tipo «{0}». Usare «setObject()» specificando esplicitamente il tipo da usare per questo valore."; + t[554] = "Cannot call updateRow() when on the insert row."; + t[555] = "Non è possibile invocare «updateRow()» durante l''inserimento di una riga."; + t[562] = "Detail: {0}"; + t[563] = "Dettaglio: {0}"; + t[566] = "Cannot call deleteRow() when on the insert row."; + t[567] = "Non è possibile invocare «deleteRow()» durante l''inserimento di una riga."; + t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[569] = "La posizione attuale è precedente all''inizio del ResultSet. Non è possibile invocare «deleteRow()» qui."; + t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[577] = "Sequenza UTF-8 illegale: il valore è finale è un surrogato: {0}"; + t[578] = "Unknown Response Type {0}."; + t[579] = "Risposta di tipo sconosciuto {0}."; + t[582] = "Unsupported value for stringtype parameter: {0}"; + t[583] = "Il valore per il parametro di tipo string «{0}» non è supportato."; + t[584] = "Conversion to type {0} failed: {1}."; + t[585] = "Conversione al tipo {0} fallita: {1}."; + t[586] = "Conversion of money failed."; + t[587] = "Fallita la conversione di un «money»."; + t[600] = "Unable to load the class {0} responsible for the datatype {1}"; + t[601] = "Non è possibile caricare la class «{0}» per gestire il tipo «{1}»."; + t[604] = "The fastpath function {0} is unknown."; + t[605] = "La funzione fastpath «{0}» è sconosciuta."; + t[608] = "Malformed function or procedure escape syntax at offset {0}."; + t[609] = "Sequenza di escape definita erroneamente nella funzione o procedura all''offset {0}."; + t[612] = "Provided Reader failed."; + t[613] = "Il «Reader» fornito è fallito."; + t[614] = "Maximum number of rows must be a value grater than or equal to 0."; + t[615] = "Il numero massimo di righe deve essere maggiore o eguale a 0."; + t[616] = "Failed to create object for: {0}."; + t[617] = "Fallita la creazione dell''oggetto per: {0}."; + t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[623] = "Il flusso di input è stato interrotto, sono arrivati {1} byte al posto dei {0} attesi."; + t[626] = "An unexpected result was returned by a query."; + t[627] = "Un risultato inaspettato è stato ricevuto dalla query."; + t[646] = "An error occurred while setting up the SSL connection."; + t[647] = "Si è verificato un errore impostando la connessione SSL."; + t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[655] = "Sequenza UTF-8 illegale: {0} byte utilizzati per codificare un valore di {1} byte: {2}"; + t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[659] = "La classe «SSLSocketFactory» specificata, «{0}», non può essere istanziata."; + t[670] = "Position: {0}"; + t[671] = "Posizione: {0}"; + t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[677] = "Individuazione: file: \"{0}\", routine: {1}, linea: {2}"; + t[684] = "Cannot tell if path is open or closed: {0}."; + t[685] = "Impossibile stabilire se il percorso è aperto o chiuso: {0}."; + t[700] = "Cannot convert an instance of {0} to type {1}"; + t[701] = "Non è possibile convertire una istanza di «{0}» nel tipo «{1}»"; + t[710] = "{0} function takes four and only four argument."; + t[711] = "Il metodo «{0}» accetta quattro e solo quattro argomenti."; + t[718] = "Interrupted while attempting to connect."; + t[719] = "Si è verificata una interruzione durante il tentativo di connessione."; + t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[723] = "Sequenza UTF-8 illegale: il valore finale è fuori dall''intervallo permesso: {0}"; + t[736] = "{0} function takes one and only one argument."; + t[737] = "Il metodo «{0}» accetta un ed un solo argomento."; + t[744] = "This ResultSet is closed."; + t[745] = "Questo «ResultSet» è chiuso."; + t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[747] = "Sono stati trovati caratteri non validi tra i dati. Molto probabilmente sono stati memorizzati dei caratteri che non sono validi per la codifica dei caratteri impostata alla creazione del database. Il caso più diffuso è quello nel quale si memorizzano caratteri a 8bit in un database con codifica SQL_ASCII."; + t[750] = "An I/O error occurred while sending to the backend."; + t[751] = "Si è verificato un errore di I/O nella spedizione di dati al server."; + t[754] = "Ran out of memory retrieving query results."; + t[755] = "Fine memoria scaricando i risultati della query."; + t[756] = "Returning autogenerated keys is not supported."; + t[757] = "La restituzione di chiavi autogenerate non è supportata."; + t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[761] = "L''operazione richiete un «ResultSet» scorribile mentre questo è «FORWARD_ONLY»."; + t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[763] = "È stato eseguito un «CallableStatement» ma il parametro in uscita «{0}» era di tipo «{1}» al posto di «{2}», che era stato dichiarato."; + t[768] = "Unknown ResultSet holdability setting: {0}."; + t[769] = "Il parametro «holdability» per il «ResultSet» è sconosciuto: {0}."; + t[772] = "Transaction isolation level {0} not supported."; + t[773] = "Il livello di isolamento delle transazioni «{0}» non è supportato."; + t[776] = "No results were returned by the query."; + t[777] = "Nessun risultato è stato restituito dalla query."; + t[778] = "A CallableStatement was executed with nothing returned."; + t[779] = "Un «CallableStatement» è stato eseguito senza produrre alcun risultato. "; + t[780] = "The maximum field size must be a value greater than or equal to 0."; + t[781] = "La dimensione massima del campo deve essere maggiore o eguale a 0."; + t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[787] = "Questo statement non dichiara il parametro in uscita. Usare «{ ?= call ... }» per farlo."; + t[788] = "Can''t use relative move methods while on the insert row."; + t[789] = "Non è possibile utilizzare gli spostamenti relativi durante l''inserimento di una riga."; + t[792] = "Connection is busy with another transaction"; + t[793] = "La connessione è utilizzata da un''altra transazione"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 397) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 395) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 794) + idx -= 794; + java.lang.Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return + new Enumeration<>() { + private int idx = 0; + { while (idx < 794 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 794); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; + while (idx < 794 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_ja.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_ja.java new file mode 100644 index 0000000..6346a35 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_ja.java @@ -0,0 +1,631 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_ja extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[1426]; + t[0] = ""; + t[1] = "Project-Id-Version: head-ja\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2018-07-23 11:10+0900\nLast-Translator: Kyotaro Horiguchi \nLanguage-Team: PostgreSQL \nLanguage: ja_JP\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.4\n"; + t[2] = "Method {0} is not yet implemented."; + t[3] = "{0} メソッドはまだ実装されていません。"; + t[10] = "Got {0} error responses to single copy cancel request"; + t[11] = "一つのコピー中断要求にたいして {0} 個のエラー応答が返されました"; + t[20] = "The array index is out of range: {0}, number of elements: {1}."; + t[21] = "配列インデックスが範囲外です: {0} 、要素の数: {1}"; + t[26] = "Tried to obtain lock while already holding it"; + t[27] = "すでに取得中のロックを取得しようとしました"; + t[28] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[29] = "不正なプロトコル状態が要求されました。Transaction interleaving を試みましたが実装されていません。xid={0}, currentXid={1}, state={2}, flags={3}"; + t[34] = "Unsupported property name: {0}"; + t[35] = "サポートされていないプロパティ名: {0}"; + t[36] = "Unsupported Types value: {0}"; + t[37] = "サポートされない Types の値: {0}."; + t[44] = "The hostname {0} could not be verified by hostnameverifier {1}."; + t[45] = "ホスト名 {0} は、hostnameverifier {1} で検証できませんでした。"; + t[52] = "Invalid UUID data."; + t[53] = "不正なUUIDデータです。"; + t[54] = "{0} parameter value must be an integer but was: {1}"; + t[55] = "パラメータ {0} の値は整数でなければなりませんが指定された値は {1} でした"; + t[56] = "Copying from database failed: {0}"; + t[57] = "データベースからのコピーに失敗しました: {0}"; + t[58] = "Requested CopyDual but got {0}"; + t[59] = "CopyDualを要求しましたが {0} が返却されました。"; + t[64] = "Multiple ResultSets were returned by the query."; + t[65] = "クエリの実行により、複数のResultSetが返されました。"; + t[76] = "Too many update results were returned."; + t[77] = "返却された更新結果が多すぎます。"; + t[84] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[85] = "システムカタログにデータがないため MaxIndexKeys の値を決定できません。"; + t[90] = "Database connection failed when starting copy"; + t[91] = "コピー開始時のデータベース接続に失敗しました"; + t[94] = "Unknown XML Result class: {0}"; + t[95] = "未知のXML結果クラス: {0}"; + t[100] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[101] = "サーバのstandard_conforming_stringsパラメータは、{0}であると報告されました。JDBCドライバは、on または off を想定しています。"; + t[102] = "Batch entry {0} {1} was aborted: {2} Call getNextException to see other errors in the batch."; + t[103] = "バッチ {0} {1} はアボートしました: {2} このバッチの他のエラーは getNextException を呼び出すことで確認できます。"; + t[104] = "Protocol error. Session setup failed."; + t[105] = "プロトコルエラー。セッションは準備できませんでした。"; + t[106] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; + t[107] = "このSQLXMLオブジェクトは初期化されてなかったため、そこからデータを取得できません。"; + t[116] = "Bad value for type {0} : {1}"; + t[117] = "型 {0} に対する不正な値 : {1}"; + t[120] = "A CallableStatement was executed with an invalid number of parameters"; + t[121] = "CallableStatement は不正な数のパラメータで実行されました。"; + t[124] = "Error preparing transaction. prepare xid={0}"; + t[125] = "トランザクションの準備エラー。prepare xid={0}"; + t[126] = "Can''t use relative move methods while on the insert row."; + t[127] = "行挿入中に相対移動メソッドは使えません。"; + t[130] = "Failed to create object for: {0}."; + t[131] = "{0} のオブジェクトの生成に失敗しました。"; + t[138] = "Cannot change transaction read-only property in the middle of a transaction."; + t[139] = "トランザクションの中で read-only プロパティは変更できません。"; + t[154] = "{0} function takes three and only three arguments."; + t[155] = "{0} 関数はちょうど3個の引数を取ります。"; + t[158] = "One-phase commit called for xid {0} but connection was prepared with xid {1}"; + t[159] = "単相コミットが xid {0} に対してよびだされましたが、コネクションは xid {1} と関連付けられています"; + t[160] = "Validating connection."; + t[161] = "コネクションを検証しています"; + t[166] = "This replication stream has been closed."; + t[167] = "このレプリケーション接続は既にクローズされています。"; + t[168] = "An error occurred while trying to get the socket timeout."; + t[169] = "ソケットタイムアウト取得中にエラーが発生しました。"; + t[170] = "Conversion of money failed."; + t[171] = "貨幣金額の変換に失敗しました。"; + t[172] = "Provided Reader failed."; + t[173] = "渡された Reader で異常が発生しました。"; + t[174] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[175] = "対応する start の呼び出しなしで、end を呼び出しました。state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[178] = "Got CopyBothResponse from server during an active {0}"; + t[179] = "{0} を実行中のサーバから CopyOutResponse を受け取りました"; + t[186] = "Unknown ResultSet holdability setting: {0}."; + t[187] = "ResultSet の holdability に対する未知の設定値です: {0}"; + t[188] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[189] = "実装されていません: 第二フェーズの COMMIT は、待機接続で使わなくてはなりません。xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[190] = "Invalid server SCRAM signature"; + t[191] = "不正なサーバSCRAM署名です"; + t[192] = "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation."; + t[193] = "サーバの client_encoding パラメータが {0} に変わりました。JDBCドライバが正しく動作するためには、 client_encoding は UTF8 である必要があります。"; + t[198] = "Detail: {0}"; + t[199] = "詳細: {0}"; + t[200] = "Unexpected packet type during copy: {0}"; + t[201] = "コピー中の想定外のパケット型です: {0}"; + t[206] = "Transaction isolation level {0} not supported."; + t[207] = "トランザクション分離レベル{0} はサポートされていません。"; + t[210] = "The server requested password-based authentication, but no password was provided."; + t[211] = "サーバはパスワード・ベースの認証を要求しましたが、パスワードが渡されませんでした。"; + t[214] = "Interrupted while attempting to connect."; + t[215] = "接続試行中に割り込みがありました。"; + t[216] = "Fetch size must be a value greater to or equal to 0."; + t[217] = "フェッチサイズは、0または、より大きな値でなくてはなりません。"; + t[228] = "Added parameters index out of range: {0}, number of columns: {1}."; + t[229] = "パラメータ・インデックスは範囲外です: {0} , カラム数: {1}"; + t[230] = "Could not decrypt SSL key file {0}."; + t[231] = "SSL keyファイル {0} を復号できませんでした。"; + t[242] = "Could not initialize SSL context."; + t[243] = "SSLコンテクストを初期化できませんでした。"; + t[244] = "{0} function takes one and only one argument."; + t[245] = "{0} 関数はちょうど1個の引数を取ります。"; + t[248] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[249] = "{0} 型のパラメータが登録されましたが、get{1} (sqltype={2}) が呼び出されました。"; + t[258] = "Conversion of interval failed"; + t[259] = "時間間隔の変換に失敗しました。"; + t[262] = "xid must not be null"; + t[263] = "xidはnullではいけません。"; + t[264] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; + t[265] = "セキュリティ・ポリシーにより、接続が妨げられました。おそらく、接続先のデータベースサーバのホストとポートに対して java.net.SocketPermission の connect 権限を許可する必要があります。"; + t[270] = "ClientInfo property not supported."; + t[271] = "ClientInfo プロパティはサポートされていません。"; + t[272] = "LOB positioning offsets start at 1."; + t[273] = "LOB 位置指定のオフセット値は 1 以上です。"; + t[276] = "Tried to write to an inactive copy operation"; + t[277] = "実行中ではないコピー操作に書き込もうとしました"; + t[278] = "suspend/resume not implemented"; + t[279] = "停止/再開 は実装されていません。"; + t[290] = "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active."; + t[291] = "トランザクション制御メソッド setAutoCommit(true), commit, rollback, setSavePoint は、XAトランザクションが有効である間は利用できません。"; + t[292] = "Unable to find server array type for provided name {0}."; + t[293] = "指定された名前 {0} のサーバ配列型はありません。"; + t[300] = "Statement has been closed."; + t[301] = "ステートメントはクローズされました。"; + t[302] = "The fastpath function {0} is unknown."; + t[303] = "{0} は未知の fastpath 関数です。"; + t[306] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[307] = "サーバのDateStyleパラメータは、{0} に変わりました。JDBCドライバが正しく動作するためには、DateStyle が ISO で始まる値である必要があります。"; + t[308] = "Invalid flags {0}"; + t[309] = "不正なフラグ {0}"; + t[324] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[325] = "CallableStatementは宣言されましたが、registerOutParameter(1, ) は呼び出されませんでした。"; + t[328] = "Cannot commit when autoCommit is enabled."; + t[329] = "autoCommit有効時に、明示的なコミットはできません。"; + t[330] = "Database connection failed when writing to copy"; + t[331] = "コピーへの書き込み中にデータベース接続で異常が発生しました"; + t[334] = "Hint: {0}"; + t[335] = "ヒント: {0}"; + t[336] = "Interval {0} not yet implemented"; + t[337] = "時間間隔 {0} は実装されていません"; + t[338] = "No X509TrustManager found"; + t[339] = "X509TrustManager が見つかりません"; + t[346] = "No results were returned by the query."; + t[347] = "クエリは結果を返却しませんでした。"; + t[354] = "Heuristic commit/rollback not supported. forget xid={0}"; + t[355] = "ヒューリスティック commit/rollback はサポートされません。forget xid={0}"; + t[362] = "Fastpath call {0} - No result was returned or wrong size while expecting an integer."; + t[363] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されないかまたは間違った大きさでした。"; + t[364] = "Cannot cast an instance of {0} to type {1}"; + t[365] = "{0} のインスタンスは {1} 型へキャストできません"; + t[366] = "ResultSet not positioned properly, perhaps you need to call next."; + t[367] = "適切な位置にいない ResultSetです。おそらく、nextを呼ぶ必要があります。"; + t[372] = "Cannot establish a savepoint in auto-commit mode."; + t[373] = "自動コミットモードでsavepointを作成できません。"; + t[374] = "Prepare called before end. prepare xid={0}, state={1}"; + t[375] = "end より前に prepare が呼ばれました prepare xid={0}, state={1}"; + t[382] = "You must specify at least one column value to insert a row."; + t[383] = "行挿入には、最低でも1つの列の値が必要です。"; + t[388] = "Query timeout must be a value greater than or equals to 0."; + t[389] = "クエリタイムアウトは、0またはより大きな値でなくてはなりません。"; + t[394] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[395] = "渡された SSLSocketFactoryクラス {0} はインスタンス化できませんでした。"; + t[396] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[397] = "パラメータのインデックスが範囲外です: {0} , パラメータ数: {1}"; + t[400] = "This ResultSet is closed."; + t[401] = "この ResultSet はクローズされています。"; + t[402] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[403] = "開始位置より前もしくは終了位置より後ろであるため、ResultSetを更新することができません。"; + t[404] = "SSL error: {0}"; + t[405] = "SSL エラー: {0}"; + t[408] = "The column name {0} was not found in this ResultSet."; + t[409] = "この ResultSet に列名 {0} ありません。"; + t[412] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[413] = "認証タイプ {0} はサポートされません。pg_hba.confでクライアントのIPアドレスまたはサブネットの指定があり、そのエントリでこのドライバがサポートする認証機構を使うように設定されていることを確認してください。"; + t[440] = "The driver currently does not support COPY operations."; + t[441] = "ドライバはコピー操作をサポートしていません。"; + t[442] = "This statement has been closed."; + t[443] = "このステートメントはクローズされています。"; + t[444] = "Object is too large to send over the protocol."; + t[445] = "オブジェクトが大きすぎてこのプロトコルでは送信できません。"; + t[448] = "oid type {0} not known and not a number"; + t[449] = "OID型 {0} は未知でかつ数値でもありません"; + t[452] = "No hstore extension installed."; + t[453] = "hstore 拡張がインストールされてません。"; + t[454] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[455] = "ResultSet の最後尾より後ろにいるため、deleteRow() を呼ぶことはできません。"; + t[462] = "The column index is out of range: {0}, number of columns: {1}."; + t[463] = "列インデックスは範囲外です: {0} , 列の数: {1}"; + t[468] = "Got CopyInResponse from server during an active {0}"; + t[469] = "{0} を実行中のサーバから CopyInResponse を受け取りました"; + t[474] = "Fastpath call {0} - No result was returned and we expected a numeric."; + t[475] = "Fastpath 呼び出し {0} - numeric を想定していましたが、結果は返却されませんでした。"; + t[482] = "An error occurred while setting up the SSL connection."; + t[483] = "SSL接続のセットアップ中に、エラーが起こりました。"; + t[484] = "Could not open SSL certificate file {0}."; + t[485] = "SSL証明書ファイル {0} を開けませんでした。"; + t[490] = "free() was called on this LOB previously"; + t[491] = "このLOBに対して free() はすでに呼び出し済みです"; + t[492] = "Finalizing a Connection that was never closed:"; + t[493] = "クローズされていないコネクションの終了処理を行います: "; + t[494] = "Unsupported properties: {0}"; + t[495] = "サポートされないプロパティ: {0}"; + t[498] = "Interrupted while waiting to obtain lock on database connection"; + t[499] = "データベース接続のロック待ちの最中に割り込みがありました"; + t[504] = "The HostnameVerifier class provided {0} could not be instantiated."; + t[505] = "与えれた HostnameVerifier クラス {0} はインスタンス化できませんした。"; + t[506] = "Unable to create SAXResult for SQLXML."; + t[507] = "SQLXMLに対するSAXResultを生成できません。"; + t[510] = "The server does not support SSL."; + t[511] = "サーバはSSLをサポートしていません。"; + t[516] = "Got CopyData without an active copy operation"; + t[517] = "実行中のコピー操作がないにもかかわらず CopyData を受け取りました"; + t[518] = "Error during one-phase commit. commit xid={0}"; + t[519] = "単一フェーズのCOMMITの処理中のエラー commit xid={0}"; + t[522] = "Network timeout must be a value greater than or equal to 0."; + t[523] = "ネットワークタイムアウトは、0またはより大きな値でなくてはなりません。"; + t[532] = "Unsupported type conversion to {1}."; + t[533] = "{1} への型変換はサポートされていません。"; + t[534] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[535] = "入力ストリームが途中で終了しました、{0} バイトを読み込もうとしましたが、 {1} バイトしかありませんでした。"; + t[536] = "Zero bytes may not occur in string parameters."; + t[537] = "バイト値0を文字列ラメータに含めることはできません。"; + t[538] = "This connection has been closed."; + t[539] = "このコネクションは既にクローズされています。"; + t[540] = "Cannot call deleteRow() when on the insert row."; + t[541] = "行挿入時に deleteRow() を呼び出せません。"; + t[544] = "Unable to bind parameter values for statement."; + t[545] = "ステートメントのパラメータ値をバインドできませんでした。"; + t[552] = "Cannot convert an instance of {0} to type {1}"; + t[553] = "{0} のインスタンスは {1} 型に変換できません"; + t[554] = "Conversion to type {0} failed: {1}."; + t[555] = "{0} への型変換に失敗しました: {1}"; + t[556] = "Error loading default settings from driverconfig.properties"; + t[557] = "driverconfig.properties からの初期設定ロード中のエラー"; + t[558] = "Expected command status BEGIN, got {0}."; + t[559] = "BEGINコマンドステータスを想定しましたが、{0} が返却されました。"; + t[564] = "An unexpected result was returned by a query."; + t[565] = "クエリが想定外の結果を返却しました。"; + t[568] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[569] = "何らかの異常によりドライバが動作できません。この例外を報告して下さい。"; + t[576] = "One or more ClientInfo failed."; + t[577] = "1つ以上の ClinentInfo で問題が発生しました。"; + t[578] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[579] = "場所: ファイル: {0}, ルーチン: {1},行: {2}"; + t[582] = "Unknown type {0}."; + t[583] = "未知の型 {0}."; + t[590] = "This SQLXML object has already been freed."; + t[591] = "このSQLXMLオブジェクトはすでに解放されています。"; + t[594] = "Unexpected copydata from server for {0}"; + t[595] = "{0} を実行中のサーバからのあり得ない CopyData"; + t[596] = "{0} function takes two or three arguments."; + t[597] = "{0} 関数は2個、または3個の引数を取ります。"; + t[602] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections."; + t[603] = "{0} への接続が拒絶されました。ホスト名とポート番号が正しいことと、postmaster がTCP/IP接続を受け付けていることを確認してください。"; + t[612] = "Unsupported binary encoding of {0}."; + t[613] = "{0} 型に対するサポートされないバイナリエンコーディング。"; + t[616] = "Returning autogenerated keys is not supported."; + t[617] = "自動生成キーを返すことはサポートされていません。"; + t[620] = "Provided InputStream failed."; + t[621] = "渡された InputStream で異常が発生しました。"; + t[626] = "No IOException expected from StringBuffer or StringBuilder"; + t[627] = "StringBuffer または StringBuilder からの IOException は想定されていません"; + t[638] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[639] = "実装されていません: 単一フェーズのCOMMITは、開始時と同じ接続で発行されなければなりません。"; + t[640] = "Cannot reference a savepoint after it has been released."; + t[641] = "解放された savepoint は参照できません。"; + t[642] = "Ran out of memory retrieving query results."; + t[643] = "クエリの結果取得中にメモリ不足が起きました。"; + t[654] = "No primary key found for table {0}."; + t[655] = "テーブル {0} には主キーがありません。"; + t[658] = "Error during recover"; + t[659] = "recover 処理中のエラー"; + t[666] = "This copy stream is closed."; + t[667] = "このコピーストリームはクローズされています。"; + t[668] = "Could not open SSL root certificate file {0}."; + t[669] = "SSLルート証明書ファイル {0} をオープンできませんでした。"; + t[676] = "Invalid sslmode value: {0}"; + t[677] = "不正な sslmode 値: {0}"; + t[678] = "Cannot tell if path is open or closed: {0}."; + t[679] = "経路が開いているか、閉じているか判別できません: {0}"; + t[682] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[683] = "不正なUTF-8シーケンス: {1} バイトの値のエンコードに{0} バイト使用しています: {2}"; + t[684] = "Unknown XML Source class: {0}"; + t[685] = "未知のXMLソースクラス: {0}"; + t[686] = "Internal Query: {0}"; + t[687] = "内部クエリ: {0}"; + t[702] = "Could not find a java cryptographic algorithm: {0}."; + t[703] = "javaの暗号化アルゴリズム {0} を見つけることができませんでした。"; + t[706] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[707] = "同じ PooledConnection に対して新しい接続をオープンしたか、この PooledConnection がクローズされたため、接続が自動的にクローズされました。"; + t[708] = "Invalid fetch direction constant: {0}."; + t[709] = "不正なフェッチ方向の定数です: {0}"; + t[714] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[715] = "PreparedStatement でクエリ文字列を取るクエリメソッドは使えません。"; + t[716] = "SCRAM authentication failed, server returned error: {0}"; + t[717] = "スクラム認証が失敗しました、サーバはエラーを返却しました: {0}"; + t[722] = "Invalid elements {0}"; + t[723] = "不正な要素です: {0}"; + t[738] = "Not on the insert row."; + t[739] = "挿入行上にいません。"; + t[740] = "Unable to load the class {0} responsible for the datatype {1}"; + t[741] = "データ型 {1} に対応するクラス{0} をロードできません。"; + t[752] = "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."; + t[753] = "javaの暗号化アルゴリズムを見つけることができませんでした。X.509 CertificateFactory は利用できません。"; + t[756] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[757] = "{0} のインスタンスに対して使うべきSQL型を推測できません。明示的な Types 引数をとる setObject() で使うべき型を指定してください。"; + t[760] = "Invalid server-first-message: {0}"; + t[761] = "不正な server-first-message: {0}"; + t[762] = "No value specified for parameter {0}."; + t[763] = "パラメータ {0} に値が設定されてません。"; + t[766] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[767] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されませんでした。"; + t[774] = "Unable to create StAXResult for SQLXML"; + t[775] = "SQLXMLに対するStAXResultを生成できません。"; + t[798] = "CommandComplete expected COPY but got: "; + t[799] = "CommandComplete はCOPYを想定しましたが、次の結果が返却されました:"; + t[800] = "Enter SSL password: "; + t[801] = "SSLパスワード入力: "; + t[802] = "Failed to convert binary xml data to encoding: {0}."; + t[803] = "バイナリxmlデータのエンコード: {0} への変換に失敗しました。"; + t[804] = "No SCRAM mechanism(s) advertised by the server"; + t[805] = "サーバは SCRAM認証機構を広告していません"; + t[818] = "Custom type maps are not supported."; + t[819] = "カスタム型マップはサポートされません。"; + t[822] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[823] = "不正なUTF-8シーケンス: 変換後の値がサロゲート値です: {0}"; + t[824] = "The SocketFactory class provided {0} could not be instantiated."; + t[825] = "渡された SocketFactoryクラス {0} はインスタンス化できませんでした。"; + t[832] = "Large Objects may not be used in auto-commit mode."; + t[833] = "ラージオブジェクトは、自動コミットモードで使うことができません。"; + t[834] = "Fastpath call {0} - No result was returned or wrong size while expecting a long."; + t[835] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されないかまたは間違った大きさでした。"; + t[844] = "Invalid stream length {0}."; + t[845] = "不正なストリーム長 {0}。"; + t[850] = "The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----."; + t[851] = "プロパティ sslfactoryarg の先頭はプリフィクス file:, classpath:, env:, sys: もしくは -----BEGIN CERTIFICATE----- のいずれかでなければなりません。"; + t[852] = "Can''t use executeWithFlags(int) on a Statement."; + t[853] = "executeWithFlags(int) は Statement インスタンスでは使えません。"; + t[856] = "Cannot retrieve the id of a named savepoint."; + t[857] = "名前付き savepoint の id は取得できません。"; + t[860] = "Could not read password for SSL key file by callbackhandler {0}."; + t[861] = "callbackhandler {0} で、SSL keyファイルを読めませんでした。"; + t[874] = "Tried to break lock on database connection"; + t[875] = "データベース接続のロックを破壊しようとしました"; + t[878] = "Unexpected error writing large object to database."; + t[879] = "データベースへのラージオブジェクト書き込み中に想定外のエラーが起きました。"; + t[880] = "Expected an EOF from server, got: {0}"; + t[881] = "サーバからの EOF を期待していましたが、{0} が送られてきました"; + t[886] = "Could not read SSL root certificate file {0}."; + t[887] = "SSLルート証明書ファイル {0} を読めませんでした。"; + t[888] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; + t[889] = "このSQLXMLオブジェクトは既に初期化済みであるため、これ以上操作できません。"; + t[896] = "The array index is out of range: {0}"; + t[897] = "配列インデックスが範囲外です: {0}"; + t[898] = "Unable to set network timeout."; + t[899] = "ネットワークタイムアウトが設定できません。"; + t[900] = "{0} function takes four and only four argument."; + t[901] = "{0} 関数はちょうど4個の引数を取ります。"; + t[904] = "Unable to decode xml data."; + t[905] = "xmlデータをデコードできません。"; + t[916] = "Bad value for type timestamp/date/time: {1}"; + t[917] = "timestamp/date/time 型に対する不正な値: {1}"; + t[928] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[929] = "不正なUTF-8シーケンス: 変換後の値が範囲外です: {0}"; + t[932] = "Unable to parse the count in command completion tag: {0}."; + t[933] = "コマンド完了タグのカウントをパースできません: {0}"; + t[942] = "Read from copy failed."; + t[943] = "コピーストリームからの読み取りに失敗しました。"; + t[944] = "Maximum number of rows must be a value grater than or equal to 0."; + t[945] = "行数の制限値は 0またはより大きな値でなくてはなりません。"; + t[958] = "The password callback class provided {0} could not be instantiated."; + t[959] = "渡されたパスワードコールバッククラス {0} はインスタンス化できませんでした。"; + t[960] = "Returning autogenerated keys by column index is not supported."; + t[961] = "列インデックスで自動生成キーを返すことはサポートされていません。"; + t[966] = "Properties for the driver contains a non-string value for the key "; + t[967] = "このドライバのプロパティでは以下のキーに対して文字列ではない値が設定されています: "; + t[974] = "Database connection failed when canceling copy operation"; + t[975] = "コピー操作中断のためのデータベース接続に失敗しました"; + t[976] = "DataSource has been closed."; + t[977] = "データソースはクローズされました。"; + t[996] = "Unable to get network timeout."; + t[997] = "ネットワークタイムアウトが取得できません。"; + t[1000] = "A CallableStatement was executed with nothing returned."; + t[1001] = "CallableStatement が実行されましたがなにも返却されませんでした。"; + t[1002] = "Can''t refresh the insert row."; + t[1003] = "挿入行を再フェッチすることはできません。"; + t[1004] = "Could not find a server with specified targetServerType: {0}"; + t[1005] = "指定された targetServerType のサーバーが見つかりません: {0}"; + t[1006] = "This PooledConnection has already been closed."; + t[1007] = "この PooledConnectionは、すでに閉じられています。"; + t[1010] = "Cannot call cancelRowUpdates() when on the insert row."; + t[1011] = "行挿入時に cancelRowUpdates() を呼び出せません。"; + t[1012] = "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}"; + t[1013] = "すでにプリペアされているトランザクションをプリペアしようとしました、プリペアされている xid={0}, プリペアしようとした xid={1}"; + t[1018] = "CopyIn copy direction can't receive data"; + t[1019] = "コピー方向 CopyIn はデータを受信できません"; + t[1024] = "conversion to {0} from {1} not supported"; + t[1025] = "{1} から {0} への変換はサポートされていません。"; + t[1030] = "An error occurred reading the certificate"; + t[1031] = "証明書の読み込み中にエラーが起きました"; + t[1032] = "Invalid or unsupported by client SCRAM mechanisms"; + t[1033] = "不正であるかクライアントのSCRAM機構でサポートされていません"; + t[1034] = "Malformed function or procedure escape syntax at offset {0}."; + t[1035] = "関数またはプロシージャの間違ったエスケープ構文が位置{0}で見つかりました。"; + t[1038] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[1039] = "バインドメッセージ長 {0} は長すぎます。InputStreamのパラメータにとても大きな長さ、あるいは不正確な長さが設定されている可能性があります。"; + t[1050] = "Cannot change transaction isolation level in the middle of a transaction."; + t[1051] = "トランザクションの中でトランザクション分離レベルは変更できません。"; + t[1058] = "Internal Position: {0}"; + t[1059] = "内部位置: {0}"; + t[1062] = "No function outputs were registered."; + t[1063] = "関数出力は登録されていません。"; + t[1072] = "Unexpected packet type during replication: {0}"; + t[1073] = "レプリケーション中に想定外のパケット型: {0}"; + t[1076] = "Error disabling autocommit"; + t[1077] = "自動コミットの無効化処理中のエラー"; + t[1080] = "Requested CopyOut but got {0}"; + t[1081] = "CopyOut を要求しましたが {0} が返却されました"; + t[1084] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[1085] = "プリペアドトランザクションのロールバック中のエラー rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[1086] = "Database connection failed when ending copy"; + t[1087] = "コピー操作の終了中にデータベース接続で異常が発生しました"; + t[1090] = "Unsupported value for stringtype parameter: {0}"; + t[1091] = "サポートされないstringtypeパラメータ値です: {0}"; + t[1094] = "The sslfactoryarg property may not be empty."; + t[1095] = "プロパティ sslfactoryarg は空であってはなりません。"; + t[1102] = "Loading the SSL root certificate {0} into a TrustManager failed."; + t[1103] = "SSLルート証明書 {0} をTrustManagerへ読み込めませんでした。"; + t[1104] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[1105] = "不正なUTF-8シーケンス: 先頭バイトが {0}: {1}"; + t[1116] = "The environment variable containing the server's SSL certificate must not be empty."; + t[1117] = "サーバのSSL証明書を指定する環境変数は空であってはなりません。"; + t[1118] = "Connection attempt timed out."; + t[1119] = "接続試行がタイムアウトしました。"; + t[1130] = "Cannot write to copy a byte of value {0}"; + t[1131] = "バイト値{0}はコピーストリームへの書き込みはできません"; + t[1132] = "Connection has been closed."; + t[1133] = "接続はクローズされました。"; + t[1136] = "Could not read password for SSL key file, console is not available."; + t[1137] = "SSL keyファイルのパスワードを読めませんでした。コンソールは利用できません。"; + t[1140] = "The JVM claims not to support the encoding: {0}"; + t[1141] = "JVMでサポートされないエンコーディングです: {0}"; + t[1146] = "Unexpected command status: {0}."; + t[1147] = "想定外のコマンドステータス: {0}。"; + t[1154] = "Cannot rollback when autoCommit is enabled."; + t[1155] = "autoCommit有効時に、明示的なロールバックはできません。"; + t[1158] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; + t[1159] = "実装されていません: Prepareは、トランザクションを開始したものと同じコネクションで発行しなくてはなりません。currentXid={0}, prepare xid={1}"; + t[1162] = "The connection attempt failed."; + t[1163] = "接続試行は失敗しました。"; + t[1166] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[1167] = "不正なUTF-8シーケンス: {1} バイトのシーケンス中 {0} バイト目が、10xxxxxx ではありません: {2}"; + t[1178] = "A connection could not be made using the requested protocol {0}."; + t[1179] = "要求されたプロトコル {0} で接続することができませんでした。"; + t[1182] = "The system property containing the server's SSL certificate must not be empty."; + t[1183] = "サーバーのSSL証明書を指定するシステムプロパティは空であってはなりません。"; + t[1188] = "Cannot call updateRow() when on the insert row."; + t[1189] = "挿入行上では updateRow() を呼び出すことができません。"; + t[1192] = "Fastpath call {0} - No result was returned and we expected a long."; + t[1193] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されませんでした。"; + t[1198] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[1199] = "ラージオブジェクトの切り詰めは、バージョン8.3 以降のサーバでのみ実装されています。"; + t[1200] = "Cannot convert the column of type {0} to requested type {1}."; + t[1201] = "{0}型のカラムの値を指定の型 {1} に変換できませんでした。"; + t[1204] = "Requested CopyIn but got {0}"; + t[1205] = "CopyIn を要求しましたが {0} が返却されました"; + t[1206] = "Cannot cast to boolean: \"{0}\""; + t[1207] = "boolean へのキャストはできません: \"{0}\""; + t[1212] = "Invalid server-final-message: {0}"; + t[1213] = "不正な server-final-message: {0}."; + t[1214] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[1215] = "このステートメントは、OUTパラメータを宣言していません。'{' ?= call ... '}' を使って宣言して下さい。"; + t[1218] = "Cannot truncate LOB to a negative length."; + t[1219] = "LOBを負の長さに切り詰めることはできません。"; + t[1220] = "Zero bytes may not occur in identifiers."; + t[1221] = "バイト値0を識別子に含めることはできません。"; + t[1222] = "Unable to convert DOMResult SQLXML data to a string."; + t[1223] = "DOMResult SQLXMLデータを文字列に変換することができません。"; + t[1224] = "Missing expected error response to copy cancel request"; + t[1225] = "予期していたコピーの中断要求へのエラー応答がありませんでした"; + t[1234] = "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)"; + t[1235] = "SCRAM認証はこのドライバではサポートされません。JDK8 以降かつ pgjdbc 42.2.0 以降(\".jre\"のバージョンではありません)が必要です。"; + t[1240] = "Tried to end inactive copy"; + t[1241] = "実行中ではないコピー操作を終了しようとしました"; + t[1246] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[1247] = "CallableStatement 関数が実行され、出力パラメータ {0} は {1} 型 でした。しかし、{2} 型 が登録されました。"; + t[1250] = "Failed to setup DataSource."; + t[1251] = "データソースのセットアップに失敗しました。"; + t[1252] = "Loading the SSL certificate {0} into a KeyManager failed."; + t[1253] = "SSL証明書 {0} をKeyManagerへ読み込めませんでした。"; + t[1254] = "Could not read SSL key file {0}."; + t[1255] = "SSL keyファイル {0} を読めませんでした。"; + t[1258] = "Tried to read from inactive copy"; + t[1259] = "実行中ではないコピーから読み取ろうとしました"; + t[1260] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[1261] = "ResultSetは更新不可です。この結果セットを生成したクエリは、ただ一つのテーブルを選択して、そのテーブルの全ての主キーを選択する必要があります。詳細に関しては JDBC 2.1 API仕様、章 5.6 を参照して下さい。"; + t[1264] = "A result was returned when none was expected."; + t[1265] = "ないはずの結果が返却されました。"; + t[1266] = "Tried to cancel an inactive copy operation"; + t[1267] = "実行中ではないコピー操作の中断を試みました"; + t[1268] = "Server SQLState: {0}"; + t[1269] = "サーバ SQLState: {0}"; + t[1272] = "Unable to find keywords in the system catalogs."; + t[1273] = "キーワードはシステムカタログにありません。"; + t[1276] = "Connection is busy with another transaction"; + t[1277] = "接続は、別のトランザクションを処理中です"; + t[1280] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[1281] = "CONCUR_READ_ONLYに設定されている ResultSet は更新できません。"; + t[1296] = "commit called before end. commit xid={0}, state={1}"; + t[1297] = "end の前に COMMIT を呼びました commit xid={0}, state={1}"; + t[1308] = "PostgreSQL LOBs can only index to: {0}"; + t[1309] = "PostgreSQL LOB 上の位置指定は最大 {0} までです"; + t[1310] = "Where: {0}"; + t[1311] = "場所: {0}"; + t[1312] = "Unable to find name datatype in the system catalogs."; + t[1313] = "name データ型がシステムカタログにありません。"; + t[1314] = "Invalid targetServerType value: {0}"; + t[1315] = "不正な targetServerType 値です。{0}."; + t[1318] = "Cannot retrieve the name of an unnamed savepoint."; + t[1319] = "無名 savepoint の名前は取得できません。"; + t[1320] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}"; + t[1321] = "プリペアドトランザクションの COMMIT 処理中のエラー。commit xid={0}, preparedXid={1}, currentXid={2}"; + t[1324] = "Invalid timeout ({0}<0)."; + t[1325] = "不正なタイムアウト値 ({0}<0)。"; + t[1328] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[1329] = "操作は、スクロール可能なResultSetを必要としますが、このResultSetは、 FORWARD_ONLYです。"; + t[1330] = "Results cannot be retrieved from a CallableStatement before it is executed."; + t[1331] = "実行前の CallableStatement から結果の取得はできません。"; + t[1332] = "wasNull cannot be call before fetching a result."; + t[1333] = "wasNullは、結果フェッチ前に呼び出せません。"; + t[1336] = "{0} function doesn''t take any argument."; + t[1337] = "{0} 関数は引数を取りません。"; + t[1344] = "Unknown Response Type {0}."; + t[1345] = "未知の応答タイプ {0} です。"; + t[1346] = "The JVM claims not to support the {0} encoding."; + t[1347] = "JVMは、エンコーディング {0} をサポートしません。"; + t[1348] = "{0} function takes two and only two arguments."; + t[1349] = "{0} 関数はちょうど2個の引数を取ります。"; + t[1350] = "The maximum field size must be a value greater than or equal to 0."; + t[1351] = "最大の項目サイズは、0またはより大きな値でなくてはなりません。"; + t[1352] = "Received CommandComplete ''{0}'' without an active copy operation"; + t[1353] = "実行中のコピー操作がないにもかかわらず CommandComplete ''{0}'' を受信しました"; + t[1354] = "Unable to translate data into the desired encoding."; + t[1355] = "データを指定されたエンコーディングに変換することができません。"; + t[1368] = "Got CopyOutResponse from server during an active {0}"; + t[1369] = "{0} を実行中のサーバから CopyOutResponse を受け取りました"; + t[1370] = "Failed to set ClientInfo property: {0}"; + t[1371] = "ClientInfo のプロパティの設定に失敗しました: {0}"; + t[1372] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[1373] = "不正な文字データが見つかりました。これはデータベース作成時の文字セットに対して不正な文字を含むデータが格納されているために起きている可能性が高いです。最も一般的な例は、SQL_ASCIIデータベースに8bitデータが保存されている場合です。"; + t[1374] = "Unknown Types value."; + t[1375] = "未知の Types の値です。"; + t[1376] = " (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)"; + t[1377] = "(pgjdbc: server-encoding として {0} を自動検出しました、メッセージが読めない場合はデータベースログおよび host, port, dbname, user, password, pg_dba.conf を確認してください)"; + t[1386] = "GSS Authentication failed"; + t[1387] = "GSS認証は失敗しました。"; + t[1390] = "An error occurred while trying to reset the socket timeout."; + t[1391] = "ソケットタイムアウトのリセット中にエラーが発生しました。"; + t[1392] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[1393] = "RsultSet の開始点より前にいるため、deleteRow() を呼ぶことはできません。"; + t[1394] = "Current connection does not have an associated xid. prepare xid={0}"; + t[1395] = "この接続は xid と関連付けられていません。プリペア xid={0}"; + t[1408] = "An I/O error occurred while sending to the backend."; + t[1409] = "バックエンドへの送信中に、入出力エラーが起こりました。"; + t[1416] = "One-phase commit with unknown xid. commit xid={0}, currentXid={1}"; + t[1417] = "未知の xid の単相コミット。 コミットxid={0}, 現在のxid={1}"; + t[1420] = "Position: {0}"; + t[1421] = "位置: {0}"; + t[1422] = "There are no rows in this ResultSet."; + t[1423] = "このResultSetに行がありません。"; + t[1424] = "Database connection failed when reading from copy"; + t[1425] = "コピーからの読み取り中にデータベース接続で異常が発生しました"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 713) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 711) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 1426) + idx -= 1426; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 1426 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 1426); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 1426 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_nl.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_nl.java new file mode 100644 index 0000000..59fdbf4 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_nl.java @@ -0,0 +1,59 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_nl extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[36]; + t[0] = ""; + t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-11 23:55-0700\nLast-Translator: Arnout Kuiper \nLanguage-Team: Dutch \nLanguage: nl\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; + t[2] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[3] = "Iets ongewoons is opgetreden, wat deze driver doet falen. Rapporteer deze fout AUB: {0}"; + t[8] = "Unknown Types value."; + t[9] = "Onbekende Types waarde."; + t[12] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[13] = "Fastpath aanroep {0} - Geen resultaat werd teruggegeven, terwijl we een integer verwacht hadden."; + t[20] = "The fastpath function {0} is unknown."; + t[21] = "De fastpath functie {0} is onbekend."; + t[22] = "No results were returned by the query."; + t[23] = "Geen resultaten werden teruggegeven door de query."; + t[26] = "An unexpected result was returned by a query."; + t[27] = "Een onverwacht resultaat werd teruggegeven door een query"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 18) << 1; + Object found = table[idx]; + if (found != null && msgid.equals(found)) + return table[idx + 1]; + return null; + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 36 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 36); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 36 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_pl.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_pl.java new file mode 100644 index 0000000..097627c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_pl.java @@ -0,0 +1,195 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_pl extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[346]; + t[0] = ""; + t[1] = "Project-Id-Version: head-pl\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-05-22 03:01+0200\nLast-Translator: Jarosław Jan Pyszny \nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.10\nPlural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"; + t[2] = "The driver currently does not support COPY operations."; + t[3] = "Sterownik nie obsługuje aktualnie operacji COPY."; + t[4] = "Internal Query: {0}"; + t[5] = "Wewnętrzne Zapytanie: {0}"; + t[6] = "There are no rows in this ResultSet."; + t[7] = "Nie ma żadnych wierszy w tym ResultSet."; + t[8] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[9] = "Znaleziono nieprawidłowy znak. Najprawdopodobniej jest to spowodowane przechowywaniem w bazie znaków, które nie pasują do zestawu znaków wybranego podczas tworzenia bazy danych. Najczęstszy przykład to przechowywanie 8-bitowych znaków w bazie o kodowaniu SQL_ASCII."; + t[12] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[13] = "Wywołanie fastpath {0} - Nie otrzymano żadnego wyniku, a oczekiwano liczby całkowitej."; + t[14] = "An error occurred while setting up the SSL connection."; + t[15] = "Wystąpił błąd podczas ustanawiania połączenia SSL."; + t[20] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[21] = "Funkcja CallableStatement została zadeklarowana, ale nie wywołano registerOutParameter (1, )."; + t[24] = "Unexpected command status: {0}."; + t[25] = "Nieoczekiwany status komendy: {0}."; + t[32] = "A connection could not be made using the requested protocol {0}."; + t[33] = "Nie można było nawiązać połączenia stosując żądany protokołu {0}."; + t[38] = "Bad value for type {0} : {1}"; + t[39] = "Zła wartość dla typu {0}: {1}"; + t[40] = "Not on the insert row."; + t[41] = "Nie na wstawianym rekordzie."; + t[42] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[43] = "Przedwczesny koniec strumienia wejściowego, oczekiwano {0} bajtów, odczytano tylko {1}."; + t[48] = "Unknown type {0}."; + t[49] = "Nieznany typ {0}."; + t[52] = "The server does not support SSL."; + t[53] = "Serwer nie obsługuje SSL."; + t[60] = "Cannot call updateRow() when on the insert row."; + t[61] = "Nie można wywołać updateRow() na wstawianym rekordzie."; + t[62] = "Where: {0}"; + t[63] = "Gdzie: {0}"; + t[72] = "Cannot call cancelRowUpdates() when on the insert row."; + t[73] = "Nie można wywołać cancelRowUpdates() na wstawianym rekordzie."; + t[82] = "Server SQLState: {0}"; + t[83] = "Serwer SQLState: {0}"; + t[92] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[93] = "ResultSet nie jest modyfikowalny (not updateable). Zapytanie, które zwróciło ten wynik musi dotyczyć tylko jednej tabeli oraz musi pobierać wszystkie klucze główne tej tabeli. Zobacz Specyfikację JDBC 2.1 API, rozdział 5.6, by uzyskać więcej szczegółów."; + t[102] = "Cannot tell if path is open or closed: {0}."; + t[103] = "Nie można stwierdzić, czy ścieżka jest otwarta czy zamknięta: {0}."; + t[108] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[109] = "Indeks parametru jest poza zakresem: {0}, liczba parametrów: {1}."; + t[110] = "Unsupported Types value: {0}"; + t[111] = "Nieznana wartość Types: {0}"; + t[112] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[113] = "Aktualna pozycja za końcem ResultSet. Nie można wywołać deleteRow()."; + t[114] = "This ResultSet is closed."; + t[115] = "Ten ResultSet jest zamknięty."; + t[120] = "Conversion of interval failed"; + t[121] = "Konwersja typu interval nie powiodła się"; + t[122] = "Unable to load the class {0} responsible for the datatype {1}"; + t[123] = "Nie jest możliwe załadowanie klasy {0} odpowiedzialnej za typ danych {1}"; + t[138] = "Error loading default settings from driverconfig.properties"; + t[139] = "Błąd podczas wczytywania ustawień domyślnych z driverconfig.properties"; + t[142] = "The array index is out of range: {0}"; + t[143] = "Indeks tablicy jest poza zakresem: {0}"; + t[146] = "Unknown Types value."; + t[147] = "Nieznana wartość Types."; + t[154] = "The maximum field size must be a value greater than or equal to 0."; + t[155] = "Maksymalny rozmiar pola musi być wartością dodatnią lub 0."; + t[168] = "Detail: {0}"; + t[169] = "Szczegóły: {0}"; + t[170] = "Unknown Response Type {0}."; + t[171] = "Nieznany typ odpowiedzi {0}."; + t[172] = "Maximum number of rows must be a value grater than or equal to 0."; + t[173] = "Maksymalna liczba rekordów musi być wartością dodatnią lub 0."; + t[184] = "Query timeout must be a value greater than or equals to 0."; + t[185] = "Timeout zapytania musi być wartością dodatnią lub 0."; + t[186] = "Too many update results were returned."; + t[187] = "Zapytanie nie zwróciło żadnych wyników."; + t[190] = "The connection attempt failed."; + t[191] = "Próba nawiązania połączenia nie powiodła się."; + t[198] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[199] = "Połączenie zostało zamknięte automatycznie, ponieważ nowe połączenie zostało otwarte dla tego samego PooledConnection lub PooledConnection zostało zamknięte."; + t[204] = "Protocol error. Session setup failed."; + t[205] = "Błąd protokołu. Nie udało się utworzyć sesji."; + t[206] = "This PooledConnection has already been closed."; + t[207] = "To PooledConnection zostało już zamknięte."; + t[208] = "DataSource has been closed."; + t[209] = "DataSource zostało zamknięte."; + t[212] = "Method {0} is not yet implemented."; + t[213] = "Metoda {0}nie jest jeszcze obsługiwana."; + t[216] = "Hint: {0}"; + t[217] = "Wskazówka: {0}"; + t[218] = "No value specified for parameter {0}."; + t[219] = "Nie podano wartości dla parametru {0}."; + t[222] = "Position: {0}"; + t[223] = "Pozycja: {0}"; + t[226] = "Cannot call deleteRow() when on the insert row."; + t[227] = "Nie można wywołać deleteRow() na wstawianym rekordzie."; + t[240] = "Conversion of money failed."; + t[241] = "Konwersja typu money nie powiodła się."; + t[244] = "Internal Position: {0}"; + t[245] = "Wewnętrzna Pozycja: {0}"; + t[248] = "Connection has been closed."; + t[249] = "Połączenie zostało zamknięte."; + t[254] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[255] = "Aktualna pozycja przed początkiem ResultSet. Nie można wywołać deleteRow()."; + t[258] = "Failed to create object for: {0}."; + t[259] = "Nie powiodło się utworzenie obiektu dla: {0}."; + t[262] = "Fetch size must be a value greater to or equal to 0."; + t[263] = "Rozmiar pobierania musi być wartością dodatnią lub 0."; + t[270] = "No results were returned by the query."; + t[271] = "Zapytanie nie zwróciło żadnych wyników."; + t[276] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[277] = "Uwierzytelnienie typu {0} nie jest obsługiwane. Upewnij się, że skonfigurowałeś plik pg_hba.conf tak, że zawiera on adres IP lub podsieć klienta oraz że użyta metoda uwierzytelnienia jest wspierana przez ten sterownik."; + t[280] = "Conversion to type {0} failed: {1}."; + t[281] = "Konwersja do typu {0} nie powiodła się: {1}."; + t[282] = "A result was returned when none was expected."; + t[283] = "Zwrócono wynik zapytania, choć nie był on oczekiwany."; + t[292] = "Transaction isolation level {0} not supported."; + t[293] = "Poziom izolacji transakcji {0} nie jest obsługiwany."; + t[306] = "ResultSet not positioned properly, perhaps you need to call next."; + t[307] = "Zła pozycja w ResultSet, może musisz wywołać next."; + t[308] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[309] = "Lokalizacja: Plik: {0}, Procedura: {1}, Linia: {2}"; + t[314] = "An unexpected result was returned by a query."; + t[315] = "Zapytanie zwróciło nieoczekiwany wynik."; + t[316] = "The column index is out of range: {0}, number of columns: {1}."; + t[317] = "Indeks kolumny jest poza zakresem: {0}, liczba kolumn: {1}."; + t[318] = "Expected command status BEGIN, got {0}."; + t[319] = "Spodziewano się statusu komendy BEGIN, otrzymano {0}."; + t[320] = "The fastpath function {0} is unknown."; + t[321] = "Funkcja fastpath {0} jest nieznana."; + t[324] = "The server requested password-based authentication, but no password was provided."; + t[325] = "Serwer zażądał uwierzytelnienia opartego na haśle, ale żadne hasło nie zostało dostarczone."; + t[332] = "The array index is out of range: {0}, number of elements: {1}."; + t[333] = "Indeks tablicy jest poza zakresem: {0}, liczba elementów: {1}."; + t[338] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[339] = "Coś niezwykłego spowodowało pad sterownika. Proszę, zgłoś ten wyjątek."; + t[342] = "Zero bytes may not occur in string parameters."; + t[343] = "Zerowe bajty nie mogą pojawiać się w parametrach typu łańcuch znakowy."; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 173) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 171) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 346) + idx -= 346; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 346 && table[idx] == null) idx += 2; } + + @Override + public boolean hasMoreElements () { + return (idx < 346); + } + + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 346 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_pt_BR.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_pt_BR.java new file mode 100644 index 0000000..1ee4680 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_pt_BR.java @@ -0,0 +1,399 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_pt_BR extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[794]; + t[0] = ""; + t[1] = "Project-Id-Version: PostgreSQL 8.4\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-31 20:48-0300\nLast-Translator: Euler Taveira de Oliveira \nLanguage-Team: Brazilian Portuguese \nLanguage: pt_BR\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"; + t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[3] = "Não está implementado: efetivação da segunda fase deve ser executada utilizado uma conexão ociosa. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[4] = "DataSource has been closed."; + t[5] = "DataSource foi fechado."; + t[8] = "Invalid flags {0}"; + t[9] = "Marcadores={0} inválidos"; + t[18] = "Where: {0}"; + t[19] = "Onde: {0}"; + t[24] = "Unknown XML Source class: {0}"; + t[25] = "Classe XML Source desconhecida: {0}"; + t[26] = "The connection attempt failed."; + t[27] = "A tentativa de conexão falhou."; + t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[29] = "Posicionado depois do fim do ResultSet. Você não pode chamar deleteRow() aqui."; + t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[33] = "Não pode utilizar métodos de consulta que pegam uma consulta de um comando preparado."; + t[36] = "Multiple ResultSets were returned by the query."; + t[37] = "ResultSets múltiplos foram retornados pela consulta."; + t[50] = "Too many update results were returned."; + t[51] = "Muitos resultados de atualização foram retornados."; + t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[59] = "Sequência UTF-8 ilegal: byte inicial é {0}: {1}"; + t[66] = "The column name {0} was not found in this ResultSet."; + t[67] = "A nome da coluna {0} não foi encontrado neste ResultSet."; + t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[71] = "Chamada ao Fastpath {0} - Nenhum resultado foi retornado e nós esperávamos um inteiro."; + t[74] = "Protocol error. Session setup failed."; + t[75] = "Erro de Protocolo. Configuração da sessão falhou."; + t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[77] = "Uma função foi declarada mas nenhuma chamada a registerOutParameter (1, ) foi feita."; + t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[79] = "ResultSets com CONCUR_READ_ONLY concorrentes não podem ser atualizados."; + t[90] = "LOB positioning offsets start at 1."; + t[91] = "Deslocamentos da posição de LOB começam em 1."; + t[92] = "Internal Position: {0}"; + t[93] = "Posição Interna: {0}"; + t[96] = "free() was called on this LOB previously"; + t[97] = "free() já foi chamado neste LOB"; + t[100] = "Cannot change transaction read-only property in the middle of a transaction."; + t[101] = "Não pode mudar propriedade somente-leitura da transação no meio de uma transação."; + t[102] = "The JVM claims not to support the {0} encoding."; + t[103] = "A JVM reclamou que não suporta a codificação {0}."; + t[108] = "{0} function doesn''t take any argument."; + t[109] = "função {0} não recebe nenhum argumento."; + t[112] = "xid must not be null"; + t[113] = "xid não deve ser nulo"; + t[114] = "Connection has been closed."; + t[115] = "Conexão foi fechada."; + t[122] = "The server does not support SSL."; + t[123] = "O servidor não suporta SSL."; + t[124] = "Custom type maps are not supported."; + t[125] = "Mapeamento de tipos personalizados não são suportados."; + t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[141] = "Sequência UTF-8 ilegal: byte {0} da sequência de bytes {1} não é 10xxxxxx: {2}"; + t[148] = "Hint: {0}"; + t[149] = "Dica: {0}"; + t[152] = "Unable to find name datatype in the system catalogs."; + t[153] = "Não foi possível encontrar tipo de dado name nos catálogos do sistema."; + t[156] = "Unsupported Types value: {0}"; + t[157] = "Valor de Types não é suportado: {0}"; + t[158] = "Unknown type {0}."; + t[159] = "Tipo desconhecido {0}."; + t[166] = "{0} function takes two and only two arguments."; + t[167] = "função {0} recebe somente dois argumentos."; + t[170] = "Finalizing a Connection that was never closed:"; + t[171] = "Fechando uma Conexão que não foi fechada:"; + t[180] = "The maximum field size must be a value greater than or equal to 0."; + t[181] = "O tamanho máximo de um campo deve ser um valor maior ou igual a 0."; + t[186] = "PostgreSQL LOBs can only index to: {0}"; + t[187] = "LOBs do PostgreSQL só podem indexar até: {0}"; + t[194] = "Method {0} is not yet implemented."; + t[195] = "Método {0} ainda não foi implementado."; + t[198] = "Error loading default settings from driverconfig.properties"; + t[199] = "Erro ao carregar configurações padrão do driverconfig.properties"; + t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; + t[201] = "Resultados não podem ser recuperados de uma função antes dela ser executada."; + t[202] = "Large Objects may not be used in auto-commit mode."; + t[203] = "Objetos Grandes não podem ser usados no modo de efetivação automática (auto-commit)."; + t[208] = "Expected command status BEGIN, got {0}."; + t[209] = "Status do comando BEGIN esperado, recebeu {0}."; + t[218] = "Invalid fetch direction constant: {0}."; + t[219] = "Constante de direção da busca é inválida: {0}."; + t[222] = "{0} function takes three and only three arguments."; + t[223] = "função {0} recebe três e somente três argumentos."; + t[226] = "This SQLXML object has already been freed."; + t[227] = "Este objeto SQLXML já foi liberado."; + t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[229] = "Não pode atualizar o ResultSet porque ele está antes do início ou depois do fim dos resultados."; + t[230] = "The JVM claims not to support the encoding: {0}"; + t[231] = "A JVM reclamou que não suporta a codificação: {0}"; + t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[233] = "Parâmetro do tipo {0} foi registrado, mas uma chamada a get{1} (tiposql={2}) foi feita."; + t[240] = "Cannot establish a savepoint in auto-commit mode."; + t[241] = "Não pode estabelecer um savepoint no modo de efetivação automática (auto-commit)."; + t[242] = "Cannot retrieve the id of a named savepoint."; + t[243] = "Não pode recuperar o id de um savepoint com nome."; + t[244] = "The column index is out of range: {0}, number of columns: {1}."; + t[245] = "O índice da coluna está fora do intervalo: {0}, número de colunas: {1}."; + t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[251] = "Alguma coisa não usual ocorreu para causar a falha do driver. Por favor reporte esta exceção."; + t[260] = "Cannot cast an instance of {0} to type {1}"; + t[261] = "Não pode converter uma instância de {0} para tipo {1}"; + t[264] = "Unknown Types value."; + t[265] = "Valor de Types desconhecido."; + t[266] = "Invalid stream length {0}."; + t[267] = "Tamanho de dado {0} é inválido."; + t[272] = "Cannot retrieve the name of an unnamed savepoint."; + t[273] = "Não pode recuperar o nome de um savepoint sem nome."; + t[274] = "Unable to translate data into the desired encoding."; + t[275] = "Não foi possível traduzir dado para codificação desejada."; + t[276] = "Expected an EOF from server, got: {0}"; + t[277] = "Esperado um EOF do servidor, recebido: {0}"; + t[278] = "Bad value for type {0} : {1}"; + t[279] = "Valor inválido para tipo {0} : {1}"; + t[280] = "The server requested password-based authentication, but no password was provided."; + t[281] = "O servidor pediu autenticação baseada em senha, mas nenhuma senha foi fornecida."; + t[286] = "Unable to create SAXResult for SQLXML."; + t[287] = "Não foi possível criar SAXResult para SQLXML."; + t[292] = "Error during recover"; + t[293] = "Erro durante recuperação"; + t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[295] = "tentou executar end sem a chamada ao start correspondente. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[297] = "Truncar objetos grandes só é implementado por servidores 8.3 ou superiores."; + t[298] = "This PooledConnection has already been closed."; + t[299] = "Este PooledConnection já foi fechado."; + t[302] = "ClientInfo property not supported."; + t[303] = "propriedade ClientInfo não é suportada."; + t[306] = "Fetch size must be a value greater to or equal to 0."; + t[307] = "Tamanho da busca deve ser um valor maior ou igual a 0."; + t[312] = "A connection could not be made using the requested protocol {0}."; + t[313] = "A conexão não pode ser feita usando protocolo informado {0}."; + t[318] = "Unknown XML Result class: {0}"; + t[319] = "Classe XML Result desconhecida: {0}"; + t[322] = "There are no rows in this ResultSet."; + t[323] = "Não há nenhum registro neste ResultSet."; + t[324] = "Unexpected command status: {0}."; + t[325] = "Status do comando inesperado: {0}."; + t[330] = "Heuristic commit/rollback not supported. forget xid={0}"; + t[331] = "Efetivação/Cancelamento heurístico não é suportado. forget xid={0}"; + t[334] = "Not on the insert row."; + t[335] = "Não está inserindo um registro."; + t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; + t[337] = "Este objeto SQLXML já foi inicializado, então você não pode manipulá-lo depois."; + t[344] = "Server SQLState: {0}"; + t[345] = "SQLState: {0}"; + t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[349] = "O parâmetro do servidor standard_conforming_strings foi definido como {0}. O driver JDBC espera que seja on ou off."; + t[360] = "The driver currently does not support COPY operations."; + t[361] = "O driver atualmente não suporta operações COPY."; + t[364] = "The array index is out of range: {0}, number of elements: {1}."; + t[365] = "O índice da matriz está fora do intervalo: {0}, número de elementos: {1}."; + t[374] = "suspend/resume not implemented"; + t[375] = "suspender/recomeçar não está implementado"; + t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[379] = "Não está implementado: efetivada da primeira fase deve ser executada utilizando a mesma conexão que foi utilizada para iniciá-la"; + t[380] = "Error during one-phase commit. commit xid={0}"; + t[381] = "Erro durante efetivação de uma fase. commit xid={0}"; + t[398] = "Cannot call cancelRowUpdates() when on the insert row."; + t[399] = "Não pode chamar cancelRowUpdates() quando estiver inserindo registro."; + t[400] = "Cannot reference a savepoint after it has been released."; + t[401] = "Não pode referenciar um savepoint após ele ser descartado."; + t[402] = "You must specify at least one column value to insert a row."; + t[403] = "Você deve especificar pelo menos uma coluna para inserir um registro."; + t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[405] = "Não foi possível determinar um valor para MaxIndexKeys por causa de falta de dados no catálogo do sistema."; + t[410] = "commit called before end. commit xid={0}, state={1}"; + t[411] = "commit executado antes do end. commit xid={0}, state={1}"; + t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[413] = "Sequência UTF-8 ilegal: valor final está fora do intervalo: {0}"; + t[414] = "{0} function takes two or three arguments."; + t[415] = "função {0} recebe dois ou três argumentos."; + t[428] = "Unable to convert DOMResult SQLXML data to a string."; + t[429] = "Não foi possível converter dado SQLXML do DOMResult para uma cadeia de caracteres."; + t[434] = "Unable to decode xml data."; + t[435] = "Não foi possível decodificar dado xml."; + t[440] = "Unexpected error writing large object to database."; + t[441] = "Erro inesperado ao escrever objeto grande no banco de dados."; + t[442] = "Zero bytes may not occur in string parameters."; + t[443] = "Zero bytes não podem ocorrer em parâmetros de cadeia de caracteres."; + t[444] = "A result was returned when none was expected."; + t[445] = "Um resultado foi retornado quando nenhum era esperado."; + t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[451] = "ResultSet não é atualizável. A consulta que gerou esse conjunto de resultados deve selecionar somente uma tabela, e deve selecionar todas as chaves primárias daquela tabela. Veja a especificação na API do JDBC 2.1, seção 5.6 para obter mais detalhes."; + t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[455] = "Tamanho de mensagem de ligação {0} é muito longo. Isso pode ser causado por especificações de tamanho incorretas ou muito grandes nos parâmetros do InputStream."; + t[460] = "Statement has been closed."; + t[461] = "Comando foi fechado."; + t[462] = "No value specified for parameter {0}."; + t[463] = "Nenhum valor especificado para parâmetro {0}."; + t[468] = "The array index is out of range: {0}"; + t[469] = "O índice da matriz está fora do intervalo: {0}"; + t[474] = "Unable to bind parameter values for statement."; + t[475] = "Não foi possível ligar valores de parâmetro ao comando."; + t[476] = "Can''t refresh the insert row."; + t[477] = "Não pode renovar um registro inserido."; + t[480] = "No primary key found for table {0}."; + t[481] = "Nenhuma chave primária foi encontrada para tabela {0}."; + t[482] = "Cannot change transaction isolation level in the middle of a transaction."; + t[483] = "Não pode mudar nível de isolamento da transação no meio de uma transação."; + t[498] = "Provided InputStream failed."; + t[499] = "InputStream fornecido falhou."; + t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[501] = "O índice de parâmetro está fora do intervalo: {0}, número de parâmetros: {1}."; + t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[503] = "O parâmetro do servidor DateStyle foi alterado para {0}. O driver JDBC requer que o DateStyle começe com ISO para operação normal."; + t[508] = "Connection attempt timed out."; + t[509] = "Tentativa de conexão falhou."; + t[512] = "Internal Query: {0}"; + t[513] = "Consulta Interna: {0}"; + t[514] = "Error preparing transaction. prepare xid={0}"; + t[515] = "Erro ao preparar transação. prepare xid={0}"; + t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[519] = "O tipo de autenticação {0} não é suportado. Verifique se você configurou o arquivo pg_hba.conf incluindo a subrede ou endereço IP do cliente, e se está utilizando o esquema de autenticação suportado pelo driver."; + t[526] = "Interval {0} not yet implemented"; + t[527] = "Intervalo {0} ainda não foi implementado"; + t[532] = "Conversion of interval failed"; + t[533] = "Conversão de interval falhou"; + t[540] = "Query timeout must be a value greater than or equals to 0."; + t[541] = "Tempo de espera da consulta deve ser um valor maior ou igual a 0."; + t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[543] = "Conexão foi fechada automaticamente porque uma nova conexão foi aberta pelo mesmo PooledConnection ou o PooledConnection foi fechado."; + t[544] = "ResultSet not positioned properly, perhaps you need to call next."; + t[545] = "ResultSet não está posicionado corretamente, talvez você precise chamar next."; + t[546] = "Prepare called before end. prepare xid={0}, state={1}"; + t[547] = "Prepare executado antes do end. prepare xid={0}, state={1}"; + t[548] = "Invalid UUID data."; + t[549] = "dado UUID é inválido."; + t[550] = "This statement has been closed."; + t[551] = "Este comando foi fechado."; + t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[553] = "Não pode inferir um tipo SQL a ser usado para uma instância de {0}. Use setObject() com um valor de Types explícito para especificar o tipo a ser usado."; + t[554] = "Cannot call updateRow() when on the insert row."; + t[555] = "Não pode chamar updateRow() quando estiver inserindo registro."; + t[562] = "Detail: {0}"; + t[563] = "Detalhe: {0}"; + t[566] = "Cannot call deleteRow() when on the insert row."; + t[567] = "Não pode chamar deleteRow() quando estiver inserindo registro."; + t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[569] = "Posicionado antes do início do ResultSet. Você não pode chamar deleteRow() aqui."; + t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[577] = "Sequência UTF-8 ilegal: valor final é um valor suplementar: {0}"; + t[578] = "Unknown Response Type {0}."; + t[579] = "Tipo de Resposta Desconhecido {0}."; + t[582] = "Unsupported value for stringtype parameter: {0}"; + t[583] = "Valor do parâmetro stringtype não é suportado: {0}"; + t[584] = "Conversion to type {0} failed: {1}."; + t[585] = "Conversão para tipo {0} falhou: {1}."; + t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; + t[587] = "Este objeto SQLXML não foi inicializado, então você não pode recuperar dados dele."; + t[600] = "Unable to load the class {0} responsible for the datatype {1}"; + t[601] = "Não foi possível carregar a classe {0} responsável pelo tipo de dado {1}"; + t[604] = "The fastpath function {0} is unknown."; + t[605] = "A função do fastpath {0} é desconhecida."; + t[608] = "Malformed function or procedure escape syntax at offset {0}."; + t[609] = "Sintaxe de escape mal formada da função ou do procedimento no deslocamento {0}."; + t[612] = "Provided Reader failed."; + t[613] = "Reader fornecido falhou."; + t[614] = "Maximum number of rows must be a value grater than or equal to 0."; + t[615] = "Número máximo de registros deve ser um valor maior ou igual a 0."; + t[616] = "Failed to create object for: {0}."; + t[617] = "Falhou ao criar objeto para: {0}."; + t[620] = "Conversion of money failed."; + t[621] = "Conversão de money falhou."; + t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[623] = "Fim de entrada prematuro, eram esperados {0} bytes, mas somente {1} foram lidos."; + t[626] = "An unexpected result was returned by a query."; + t[627] = "Um resultado inesperado foi retornado pela consulta."; + t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[645] = "Intercalação de transação não está implementado. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[646] = "An error occurred while setting up the SSL connection."; + t[647] = "Um erro ocorreu ao estabelecer uma conexão SSL."; + t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[655] = "Sequência UTF-8 ilegal: {0} bytes utilizados para codificar um valor de {1} bytes: {2}"; + t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; + t[657] = "Não está implementado: Prepare deve ser executado utilizando a mesma conexão que iniciou a transação. currentXid={0}, prepare xid={1}"; + t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[659] = "A classe SSLSocketFactory forneceu {0} que não pôde ser instanciado."; + t[662] = "Failed to convert binary xml data to encoding: {0}."; + t[663] = "Falhou ao converter dados xml binários para codificação: {0}."; + t[670] = "Position: {0}"; + t[671] = "Posição: {0}"; + t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[677] = "Local: Arquivo: {0}, Rotina: {1}, Linha: {2}"; + t[684] = "Cannot tell if path is open or closed: {0}."; + t[685] = "Não pode dizer se caminho está aberto ou fechado: {0}."; + t[690] = "Unable to create StAXResult for SQLXML"; + t[691] = "Não foi possível criar StAXResult para SQLXML"; + t[700] = "Cannot convert an instance of {0} to type {1}"; + t[701] = "Não pode converter uma instância de {0} para tipo {1}"; + t[710] = "{0} function takes four and only four argument."; + t[711] = "função {0} recebe somente quatro argumentos."; + t[716] = "Error disabling autocommit"; + t[717] = "Erro ao desabilitar autocommit"; + t[718] = "Interrupted while attempting to connect."; + t[719] = "Interrompido ao tentar se conectar."; + t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; + t[723] = "Sua política de segurança impediu que a conexão pudesse ser estabelecida. Você provavelmente precisa conceder permissão em java.net.SocketPermission para a máquina e a porta do servidor de banco de dados que você deseja se conectar."; + t[734] = "No function outputs were registered."; + t[735] = "Nenhum saída de função foi registrada."; + t[736] = "{0} function takes one and only one argument."; + t[737] = "função {0} recebe somente um argumento."; + t[744] = "This ResultSet is closed."; + t[745] = "Este ResultSet está fechado."; + t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[747] = "Caracter inválido foi encontrado. Isso é mais comumente causado por dado armazenado que contém caracteres que são inválidos para a codificação que foi criado o banco de dados. O exemplo mais comum disso é armazenar dados de 8 bits em um banco de dados SQL_ASCII."; + t[752] = "GSS Authentication failed"; + t[753] = "Autenticação GSS falhou"; + t[754] = "Ran out of memory retrieving query results."; + t[755] = "Memória insuficiente ao recuperar resultados da consulta."; + t[756] = "Returning autogenerated keys is not supported."; + t[757] = "Retorno de chaves geradas automaticamente não é suportado."; + t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[761] = "Operação requer um ResultSet rolável, mas este ResultSet é FORWARD_ONLY (somente para frente)."; + t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[763] = "Uma função foi executada e o parâmetro de retorno {0} era do tipo {1} contudo tipo {2} foi registrado."; + t[764] = "Unable to find server array type for provided name {0}."; + t[765] = "Não foi possível encontrar tipo matriz para nome fornecido {0}."; + t[768] = "Unknown ResultSet holdability setting: {0}."; + t[769] = "Definição de durabilidade do ResultSet desconhecida: {0}."; + t[772] = "Transaction isolation level {0} not supported."; + t[773] = "Nível de isolamento da transação {0} não é suportado."; + t[774] = "Zero bytes may not occur in identifiers."; + t[775] = "Zero bytes não podem ocorrer em identificadores."; + t[776] = "No results were returned by the query."; + t[777] = "Nenhum resultado foi retornado pela consulta."; + t[778] = "A CallableStatement was executed with nothing returned."; + t[779] = "Uma função foi executada e nada foi retornado."; + t[780] = "wasNull cannot be call before fetching a result."; + t[781] = "wasNull não pode ser chamado antes de obter um resultado."; + t[784] = "Returning autogenerated keys by column index is not supported."; + t[785] = "Retorno de chaves geradas automaticamente por índice de coluna não é suportado."; + t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[787] = "Este comando não declara um parâmetro de saída. Utilize '{' ?= chamada ... '}' para declarar um)"; + t[788] = "Can''t use relative move methods while on the insert row."; + t[789] = "Não pode utilizar métodos de movimentação relativos enquanto estiver inserindo registro."; + t[790] = "A CallableStatement was executed with an invalid number of parameters"; + t[791] = "Uma função foi executada com um número inválido de parâmetros"; + t[792] = "Connection is busy with another transaction"; + t[793] = "Conexão está ocupada com outra transação"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 397) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 395) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 794) + idx -= 794; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 794 && table[idx] == null) idx += 2; } + public boolean hasMoreElements () { + return (idx < 794); + } + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 794 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_ru.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_ru.java new file mode 100644 index 0000000..f70975a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_ru.java @@ -0,0 +1,271 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_ru extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[538]; + t[0] = ""; + t[1] = "Project-Id-Version: JDBC Driver for PostgreSQL 8.x.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2016-01-07 15:09+0300\nLast-Translator: Vladimir Sitnikov \nLanguage-Team: pgsql-rus \nLanguage: ru_RU\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.7\n"; + t[4] = "Server SQLState: {0}"; + t[5] = "SQLState сервера: {0}"; + t[14] = "suspend/resume not implemented"; + t[15] = "Операции XA suspend/resume не реализованы"; + t[18] = "The array index is out of range: {0}"; + t[19] = "Индекс массива вне диапазона: {0}"; + t[28] = "This PooledConnection has already been closed."; + t[29] = "Это соединение уже было закрыто"; + t[30] = "Malformed function or procedure escape syntax at offset {0}."; + t[31] = "Невозможно разобрать SQL команду. Ошибка на позиции {0}"; + t[32] = "The column index is out of range: {0}, number of columns: {1}."; + t[33] = "Индекс колонки вне диапазона: {0}. Допустимые значения: 1..{1}"; + t[34] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[35] = "Раннее завершение входного потока, ожидалось байт: {0}, но считано только {1}"; + t[44] = "An I/O error occurred while sending to the backend."; + t[45] = "Ошибка ввода/вывода при отправке бэкенду"; + t[46] = "Prepare called before end. prepare xid={0}, state={1}"; + t[47] = "Вызов prepare должен происходить только после вызова end. prepare xid={0}, state={1}"; + t[48] = "Transaction isolation level {0} not supported."; + t[49] = "Уровень изоляции транзакций {0} не поддерживается."; + t[50] = "Could not find a server with specified targetServerType: {0}"; + t[51] = "Не удалось найти сервер с указанным значением targetServerType: {0}"; + t[52] = "Conversion of interval failed"; + t[53] = "Невозможно обработать PGInterval: {0}"; + t[54] = "The array index is out of range: {0}, number of elements: {1}."; + t[55] = "Индекс массива вне диапазона: {0}. Допустимые значения: 1..{1}"; + t[62] = "Unsupported value for stringtype parameter: {0}"; + t[63] = "Неподдерживаемое значение для параметра stringtype: {0}"; + t[72] = "Invalid stream length {0}."; + t[73] = "Неверная длина потока {0}."; + t[80] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[81] = "Ошибка при откате подготовленной транзакции. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[84] = "The driver currently does not support COPY operations."; + t[85] = "Драйвер в данный момент не поддерживате операции COPY."; + t[94] = "DataSource has been closed."; + t[95] = "DataSource закрыт."; + t[96] = "Cannot write to copy a byte of value {0}"; + t[97] = "Значение byte должно быть в диапазоне 0..255, переданное значение: {0}"; + t[98] = "Fastpath call {0} - No result was returned and we expected a long."; + t[99] = "Вызов fastpath {0} ничего не вернул, а ожидалось long"; + t[100] = "Connection attempt timed out."; + t[101] = "Закончилось время ожидания"; + t[102] = "Detail: {0}"; + t[103] = "Подробности: {0}"; + t[104] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections."; + t[105] = "Подсоединение по адресу {0} отклонено. Проверьте что хост и порт указаны правильно и что postmaster принимает TCP/IP-подсоединения."; + t[108] = "This statement has been closed."; + t[109] = "Этот statement был закрыт."; + t[110] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}"; + t[111] = "Ошибка при фиксации подготовленной транзакции. commit xid={0}, preparedXid={1}, currentXid={2}"; + t[114] = "Position: {0}"; + t[115] = "Позиция: {0}"; + t[116] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; + t[117] = "В каком соединении транзакцию начинали, в таком и вызывайте prepare. По-другому не работает. currentXid={0}, prepare xid={1}"; + t[118] = "The connection attempt failed."; + t[119] = "Ошибка при попытке подсоединения."; + t[120] = "Unexpected copydata from server for {0}"; + t[121] = "Неожиданный статус команды COPY: {0}"; + t[124] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[125] = "Неверная последовательность UTF-8: начальное значеие {0}: {1}"; + t[128] = "This ResultSet is closed."; + t[129] = "ResultSet закрыт."; + t[142] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[143] = "Духфазная фиксация работает только, если соединение неактивно (state=idle и транзакцция отсутствует). commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[146] = "Too many update results were returned."; + t[147] = "Возвращено слишком много результатов обновления."; + t[148] = "An error occurred reading the certificate"; + t[149] = "Ошибка при чтении сертификата"; + t[160] = "Unknown type {0}."; + t[161] = "Неизвестный тип {0}."; + t[172] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[173] = "Неверная последовательность UTF-8: {0} bytes used to encode a {1} byte value: {2}"; + t[182] = "Protocol error. Session setup failed."; + t[183] = "Ошибка протокола. Установление сессии не удалось."; + t[184] = "Connection has been closed."; + t[185] = "Это соединение уже было закрыто"; + t[188] = "This copy stream is closed."; + t[189] = "Поток уже был закрыт"; + t[196] = "Statement has been closed."; + t[197] = "Statement закрыт."; + t[200] = "Failed to set ClientInfo property: {0}"; + t[201] = "Невозможно установить свойство ClientInfo: {0}"; + t[204] = "Where: {0}"; + t[205] = "Где: {0}"; + t[212] = "Expected command status BEGIN, got {0}."; + t[213] = "Ожидался статус команды BEGIN, но получен {0}"; + t[216] = "The HostnameVerifier class provided {0} could not be instantiated."; + t[217] = "Невозможно создать HostnameVerifier с помощью указанного класса {0}"; + t[220] = "Unsupported properties: {0}"; + t[221] = "Указанные свойства не поддерживаются: {0}"; + t[222] = "Failed to create object for: {0}."; + t[223] = "Ошибка при создании объект для: {0}."; + t[230] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[231] = "Случилось что-то необычное, что заставило драйвер произвести ошибку. Пожалуйста сообщите это исключение."; + t[236] = "Finalizing a Connection that was never closed:"; + t[237] = "Соединение «утекло». Проверьте, что в коде приложения вызывается connection.close(). Далее следует стектрейс того места, где создавалось проблемное соединение"; + t[238] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[239] = "Найдены неверные символьные данные. Причиной этого скорее всего являются хранимые данные содержащие символы не соответствующие набору символов базы. Типичным примером этого является хранение 8-битных данных в базе SQL_ASCII."; + t[252] = "Unable to create SAXResult for SQLXML."; + t[253] = "Невозможно создать SAXResult для SQLXML"; + t[260] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[261] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}"; + t[266] = "No IOException expected from StringBuffer or StringBuilder"; + t[267] = "Что-то пошло не так: из классов StringBuffer и StringBuilder исключений не ожидалось"; + t[280] = "Interrupted while waiting to obtain lock on database connection"; + t[281] = "Ожидание COPY блокировки прервано получением interrupt"; + t[284] = "Zero bytes may not occur in identifiers."; + t[285] = "Символ с кодом 0 в идентификаторах не допустим"; + t[286] = "There are no rows in this ResultSet."; + t[287] = "Невозможно удалить строку, т.к. в текущем ResultSet’е строк вообще нет"; + t[288] = "Expected an EOF from server, got: {0}"; + t[289] = "Неожиданный ответ от сервера. Ожидалось окончание потока, получен байт {0}"; + t[304] = "No results were returned by the query."; + t[305] = "Запрос не вернул результатов."; + t[306] = "Invalid targetServerType value: {0}"; + t[307] = "Неверное значение targetServerType: {0}"; + t[310] = "Requested CopyOut but got {0}"; + t[311] = "Ожидался ответ CopyOut, а получен {0}"; + t[318] = "Invalid flags {0}"; + t[319] = "Неверные флаги {0}"; + t[324] = "Unsupported Types value: {0}"; + t[325] = "Неподдерживаемый java.sql.Types тип: {0}"; + t[326] = "Invalid timeout ({0}<0)."; + t[327] = "Значение таймаута должно быть неотрицательным: {0}"; + t[328] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[329] = "Невозможно завершить транзакцию, т.к. транзакция не была начата. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[350] = "A result was returned when none was expected."; + t[351] = "Результат возвращён когда его не ожидалось."; + t[352] = "Unsupported binary encoding of {0}."; + t[353] = "Бинарная передача не поддерживается для типа {0}"; + t[354] = "Zero bytes may not occur in string parameters."; + t[355] = "Байт с кодом 0 не может втречаться в строковых параметрах"; + t[360] = "Requested CopyIn but got {0}"; + t[361] = "Ожидался ответ CopyIn, а получен {0}"; + t[364] = "Error during one-phase commit. commit xid={0}"; + t[365] = "Ошибка при однофазной фиксации транзакции. commit xid={0}"; + t[372] = "Unable to bind parameter values for statement."; + t[373] = "Не в состоянии ассоциировать значения параметров для команды (PGBindException)"; + t[374] = "Interrupted while attempting to connect."; + t[375] = "Подключение прервано получаением interrupt"; + t[380] = "An unexpected result was returned by a query."; + t[381] = "Запрос вернул неожиданный результат."; + t[384] = "Method {0} is not yet implemented."; + t[385] = "Метод {0} ещё не реализован"; + t[386] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[387] = "Местонахождение: Файл {0}, Процедура: {1}, Строка: {2}"; + t[388] = "The server does not support SSL."; + t[389] = "Сервер не поддерживает SSL."; + t[392] = "The password callback class provided {0} could not be instantiated."; + t[393] = "Невозможно создать password callback с помощью указанного класса {0}"; + t[396] = "Unknown Types value."; + t[397] = "Неизвестное значение Types."; + t[400] = "Unknown Response Type {0}."; + t[401] = "Неизвестный тип ответа {0}."; + t[406] = "commit called before end. commit xid={0}, state={1}"; + t[407] = "Операция commit должна вызываться только после операции end. commit xid={0}, state={1}"; + t[420] = "An error occurred while setting up the SSL connection."; + t[421] = "Ошибка при установке SSL-подсоединения."; + t[424] = "Invalid sslmode value: {0}"; + t[425] = "Неверное значение sslmode: {0}"; + t[436] = "Copying from database failed: {0}"; + t[437] = "Ошибка при обработке ответа команды COPY: {0}"; + t[438] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[439] = "Неверная последовательность UTF-8: финальное значение вне области допустимых: {0}"; + t[442] = "Error preparing transaction. prepare xid={0}"; + t[443] = "Ошибка при выполнении prepare для транзакции {0}"; + t[450] = "A connection could not be made using the requested protocol {0}."; + t[451] = "Невозможно установить соединение с помощью протокола {0}"; + t[460] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[461] = "Чередование транзакций в одном соединении не поддерживается. Предыдущую транзакцию нужно завершить xid={0}, currentXid={1}, state={2}, flags={3}"; + t[462] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[463] = "Неверная последовательность UTF-8: финальное значение является surrogate значением: {0}"; + t[466] = "The column name {0} was not found in this ResultSet."; + t[467] = "Колонки {0} не найдено в этом ResultSet’’е."; + t[468] = "oid type {0} not known and not a number"; + t[469] = "Oid {0} не известен или не является числом"; + t[476] = "Hint: {0}"; + t[477] = "Подсказка: {0}"; + t[478] = "Unsupported property name: {0}"; + t[479] = "Свойство {0} не поддерживается"; + t[480] = "Ran out of memory retrieving query results."; + t[481] = "Недостаточно памяти для обработки результатов запроса. Попробуйте увеличить -Xmx или проверьте размеры обрабатываемых данных"; + t[484] = "Interval {0} not yet implemented"; + t[485] = "Интеврвал {0} ещё не реализован"; + t[486] = "This connection has been closed."; + t[487] = "Соединение уже было закрыто"; + t[488] = "The SocketFactory class provided {0} could not be instantiated."; + t[489] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}"; + t[490] = "This SQLXML object has already been freed."; + t[491] = "Этот объект SQLXML уже был закрыт"; + t[494] = "Unexpected command status: {0}."; + t[495] = "Неожиданный статус команды: {0}."; + t[502] = "Large Objects may not be used in auto-commit mode."; + t[503] = "Большие объекты не могут использоваться в режиме авто-подтверждения (auto-commit)."; + t[504] = "Conversion of money failed."; + t[505] = "Ошибка при преобразовании типа money."; + t[512] = "No value specified for parameter {0}."; + t[513] = "Не указано значение для параметра {0}."; + t[514] = "The server requested password-based authentication, but no password was provided."; + t[515] = "Сервер запросил парольную аутентификацию, но пароль не был указан."; + t[518] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[519] = "Неверная последовательность UTF-8: байт {0} из {1} не подходит к маске 10xxxxxx: {2}"; + t[522] = "Conversion to type {0} failed: {1}."; + t[523] = "Ошибка при преобразовании к типу {0}: {1}"; + t[528] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[529] = "Тип аутентификации {0} не поддерживается. Проверьте если вы сконфигурировали файл pg_hba.conf чтобы включить IP-адреса клиентов или подсеть. Также удостовертесь что он использует схему аутентификации поддерживаемую драйвером."; + t[534] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[535] = "Индекс параметра вне диапазона: {0}. Допустимые значения: 1..{1}"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 269) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 267) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 538) + idx -= 538; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 538 && table[idx] == null) idx += 2; } + + @Override + public boolean hasMoreElements () { + return (idx < 538); + } + + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 538 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_sr.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_sr.java new file mode 100644 index 0000000..2fcac2e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_sr.java @@ -0,0 +1,401 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_sr extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[794]; + t[0] = ""; + t[1] = "Project-Id-Version: PostgreSQL 8.1\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-26 11:13+0100\nLast-Translator: Bojan Škaljac \nLanguage-Team: Srpski \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Serbian\nX-Poedit-Country: YUGOSLAVIA\n"; + t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[3] = "Nije implementirano: Dvofazni commit mora biti izdat uz korištenje besposlene konekcije. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[4] = "DataSource has been closed."; + t[5] = "DataSource je zatvoren."; + t[8] = "Invalid flags {0}"; + t[9] = "Nevažeće zastavice {0}"; + t[18] = "Where: {0}"; + t[19] = "Gde: {0}"; + t[24] = "Unknown XML Source class: {0}"; + t[25] = "Nepoznata XML ulazna klasa: {0}"; + t[26] = "The connection attempt failed."; + t[27] = "Pokušaj konektovanja propao."; + t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[29] = "Trenutna pozicija posle kraja ResultSet-a. Ne možete pozvati deleteRow() na toj poziciji."; + t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[33] = "Ne možete da koristite metode za upit koji uzimaju string iz upita u PreparedStatement-u."; + t[36] = "Multiple ResultSets were returned by the query."; + t[37] = "Višestruki ResultSet-vi su vraćeni od strane upita."; + t[50] = "Too many update results were returned."; + t[51] = "Previše rezultata za ažuriranje je vraćeno."; + t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[59] = "Ilegalna UTF-8 sekvenca: inicijalni bajt je {0}: {1}"; + t[66] = "The column name {0} was not found in this ResultSet."; + t[67] = "Ime kolone {0} nije pronadjeno u ResultSet."; + t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[71] = "Fastpath poziv {0} - Nikakav rezultat nije vraćen a očekivan je integer."; + t[74] = "Protocol error. Session setup failed."; + t[75] = "Greška protokola. Zakazivanje sesije propalo."; + t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[77] = "CallableStatement jedeklarisan ali nije bilo poziva registerOutParameter (1, )."; + t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[79] = "ResultSets sa osobinom CONCUR_READ_ONLY ne moeže biti ažuriran."; + t[90] = "LOB positioning offsets start at 1."; + t[91] = "LOB pozicija ofset počinje kod 1."; + t[92] = "Internal Position: {0}"; + t[93] = "Interna pozicija: {0}"; + t[96] = "free() was called on this LOB previously"; + t[97] = "free() je pozvan na ovom LOB-u prethodno"; + t[100] = "Cannot change transaction read-only property in the middle of a transaction."; + t[101] = "Nije moguće izmeniti read-only osobinu transakcije u sred izvršavanja transakcije."; + t[102] = "The JVM claims not to support the {0} encoding."; + t[103] = "JVM tvrdi da ne podržava {0} encoding."; + t[108] = "{0} function doesn''t take any argument."; + t[109] = "Funkcija {0} nema parametara."; + t[112] = "xid must not be null"; + t[113] = "xid ne sme biti null"; + t[114] = "Connection has been closed."; + t[115] = "Konekcija je već zatvorena."; + t[122] = "The server does not support SSL."; + t[123] = "Server ne podržava SSL."; + t[124] = "Custom type maps are not supported."; + t[125] = "Mape sa korisnički definisanim tipovima nisu podržane."; + t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[141] = "Ilegalna UTF-8 sekvenca: bajt {0} od {1} bajtova sekvence nije 10xxxxxx: {2}"; + t[148] = "Hint: {0}"; + t[149] = "Nagovest: {0}"; + t[152] = "Unable to find name datatype in the system catalogs."; + t[153] = "Nije moguće pronaći ime tipa podatka u sistemskom katalogu."; + t[156] = "Unsupported Types value: {0}"; + t[157] = "Za tip nije podržana vrednost: {0}"; + t[158] = "Unknown type {0}."; + t[159] = "Nepoznat tip {0}."; + t[166] = "{0} function takes two and only two arguments."; + t[167] = "Funkcija {0} prima dva i samo dva parametra."; + t[170] = "Finalizing a Connection that was never closed:"; + t[171] = "Dovršavanje konekcije koja nikada nije zatvorena:"; + t[180] = "The maximum field size must be a value greater than or equal to 0."; + t[181] = "Maksimalna vrednost veličine polja mora biti vrednost veća ili jednaka 0."; + t[186] = "PostgreSQL LOBs can only index to: {0}"; + t[187] = "PostgreSQL LOB mogu jedino da označavaju: {0}"; + t[194] = "Method {0} is not yet implemented."; + t[195] = "Metod {0} nije još impelemtiran."; + t[198] = "Error loading default settings from driverconfig.properties"; + t[199] = "Greška u čitanju standardnih podešavanja iz driverconfig.properties"; + t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; + t[201] = "Razultat nemože da se primi iz CallableStatement pre nego što se on izvrši."; + t[202] = "Large Objects may not be used in auto-commit mode."; + t[203] = "Veliki objekti (Large Object) se nemogu koristiti u auto-commit modu."; + t[208] = "Expected command status BEGIN, got {0}."; + t[209] = "Očekivan status komande je BEGIN, a dobijeno je {0}."; + t[218] = "Invalid fetch direction constant: {0}."; + t[219] = "Pogrešna konstanta za direkciju donošenja: {0}."; + t[222] = "{0} function takes three and only three arguments."; + t[223] = "Funkcija {0} prima tri i samo tri parametra."; + t[226] = "This SQLXML object has already been freed."; + t[227] = "Ovaj SQLXML je već obrisan."; + t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[229] = "Nije moguće ažurirati ResultSet zato što je ili početak ili kraj rezultata."; + t[230] = "The JVM claims not to support the encoding: {0}"; + t[231] = "JVM tvrdi da ne podržava encoding: {0}"; + t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[233] = "Parametar tipa {0} je registrovan,ali poziv za get{1} (sql tip={2}) je izvršen."; + t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[235] = "Greška prilikom povratka na prethodo pripremljenu transakciju. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[240] = "Cannot establish a savepoint in auto-commit mode."; + t[241] = "U auto-commit modu nije moguće podešavanje tački snimanja."; + t[242] = "Cannot retrieve the id of a named savepoint."; + t[243] = "Nije moguće primiti id imena tačke snimanja."; + t[244] = "The column index is out of range: {0}, number of columns: {1}."; + t[245] = "Indeks kolone van osega: {0}, broj kolona: {1}."; + t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[251] = "Nešto neobično se dogodilo i drajver je zakazao. Molim prijavite ovaj izuzetak."; + t[260] = "Cannot cast an instance of {0} to type {1}"; + t[261] = "Nije moguće kastovati instancu {0} u tip {1}"; + t[264] = "Unknown Types value."; + t[265] = "Nepoznata vrednost za Types."; + t[266] = "Invalid stream length {0}."; + t[267] = "Nevažeća dužina toka {0}."; + t[272] = "Cannot retrieve the name of an unnamed savepoint."; + t[273] = "Nije moguće izvaditi ime tačke snimanja koja nema ime."; + t[274] = "Unable to translate data into the desired encoding."; + t[275] = "Nije moguće prevesti podatke u odabrani encoding format."; + t[276] = "Expected an EOF from server, got: {0}"; + t[277] = "Očekivan EOF od servera, a dobijeno: {0}"; + t[278] = "Bad value for type {0} : {1}"; + t[279] = "Pogrešna vrednost za tip {0} : {1}"; + t[280] = "The server requested password-based authentication, but no password was provided."; + t[281] = "Server zahteva autentifikaciju baziranu na šifri, ali šifra nije prosleđena."; + t[286] = "Unable to create SAXResult for SQLXML."; + t[287] = "Nije moguće kreirati SAXResult za SQLXML."; + t[292] = "Error during recover"; + t[293] = "Greška prilikom oporavljanja."; + t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[295] = "Pokušaj pozivanja kraja pre odgovarajućeg početka. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[297] = "Skraćivanje velikih objekata je implementirano samo u 8.3 i novijim serverima."; + t[298] = "This PooledConnection has already been closed."; + t[299] = "PooledConnection je već zatvoren."; + t[302] = "ClientInfo property not supported."; + t[303] = "ClientInfo property nije podržan."; + t[306] = "Fetch size must be a value greater to or equal to 0."; + t[307] = "Doneta veličina mora biti vrednost veća ili jednaka 0."; + t[312] = "A connection could not be made using the requested protocol {0}."; + t[313] = "Konekciju nije moguće kreirati uz pomoć protokola {0}."; + t[318] = "Unknown XML Result class: {0}"; + t[319] = "nepoznata XML klasa rezultata: {0}"; + t[322] = "There are no rows in this ResultSet."; + t[323] = "U ResultSet-u nema redova."; + t[324] = "Unexpected command status: {0}."; + t[325] = "Neočekivan komandni status: {0}."; + t[330] = "Heuristic commit/rollback not supported. forget xid={0}"; + t[331] = "Heuristički commit/rollback nije podržan. forget xid={0}"; + t[334] = "Not on the insert row."; + t[335] = "Nije mod ubacivanja redova."; + t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; + t[337] = "SQLXML objekat je već inicijalizovan, tako da ga nije moguće dodatno menjati."; + t[344] = "Server SQLState: {0}"; + t[345] = "SQLState servera: {0}"; + t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[349] = "Serverov standard_conforming_strings parametar javlja {0}. JDBC drajver ocekuje on ili off."; + t[360] = "The driver currently does not support COPY operations."; + t[361] = "Drajver trenutno ne podržava COPY operacije."; + t[364] = "The array index is out of range: {0}, number of elements: {1}."; + t[365] = "Indeks niza je van opsega: {0}, broj elemenata: {1}."; + t[374] = "suspend/resume not implemented"; + t[375] = "obustavljanje/nastavljanje nije implementirano."; + t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[379] = "Nije implementirano: Commit iz jedne faze mora biti izdat uz korištenje iste konekcije koja je korištena za startovanje."; + t[380] = "Error during one-phase commit. commit xid={0}"; + t[381] = "Kreška prilikom commit-a iz jedne faze. commit xid={0}"; + t[398] = "Cannot call cancelRowUpdates() when on the insert row."; + t[399] = "Nije moguće pozvati cancelRowUpdates() prilikom ubacivanja redova."; + t[400] = "Cannot reference a savepoint after it has been released."; + t[401] = "Nije moguće referenciranje tačke snimanja nakon njenog oslobađanja."; + t[402] = "You must specify at least one column value to insert a row."; + t[403] = "Morate specificirati barem jednu vrednost za kolonu da bi ste ubacili red."; + t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[405] = "Nije moguće odrediti vrednost za MaxIndexKezs zbog nedostatka podataka u sistemskom katalogu."; + t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[413] = "Ilegalna UTF-8 sekvenca: finalna vrednost je van opsega: {0}"; + t[414] = "{0} function takes two or three arguments."; + t[415] = "Funkcija {0} prima dva ili tri parametra."; + t[428] = "Unable to convert DOMResult SQLXML data to a string."; + t[429] = "Nije moguće konvertovati DOMResult SQLXML podatke u string."; + t[434] = "Unable to decode xml data."; + t[435] = "Neuspešno dekodiranje XML podataka."; + t[440] = "Unexpected error writing large object to database."; + t[441] = "Neočekivana greška prilikom upisa velikog objekta u bazu podataka."; + t[442] = "Zero bytes may not occur in string parameters."; + t[443] = "Nula bajtovji se ne smeju pojavljivati u string parametrima."; + t[444] = "A result was returned when none was expected."; + t[445] = "Rezultat vraćen ali nikakav rezultat nije očekivan."; + t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[451] = "ResultSet nije moguće ažurirati. Upit koji je generisao ovaj razultat mora selektoati jedino tabelu,i mora selektovati sve primrne ključeve iz te tabele. Pogledajte API specifikaciju za JDBC 2.1, sekciju 5.6 za više detalja."; + t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[455] = "Dužina vezivne poruke {0} prevelika. Ovo je možda rezultat veoma velike ili pogrešne dužine specifikacije za InputStream parametre."; + t[460] = "Statement has been closed."; + t[461] = "Statemen je već zatvoren."; + t[462] = "No value specified for parameter {0}."; + t[463] = "Nije zadata vrednost za parametar {0}."; + t[468] = "The array index is out of range: {0}"; + t[469] = "Indeks niza je van opsega: {0}"; + t[474] = "Unable to bind parameter values for statement."; + t[475] = "Nije moguće naći vrednost vezivnog parametra za izjavu (statement)."; + t[476] = "Can''t refresh the insert row."; + t[477] = "Nije moguće osvežiti ubačeni red."; + t[480] = "No primary key found for table {0}."; + t[481] = "Nije pronađen ključ za tabelu {0}."; + t[482] = "Cannot change transaction isolation level in the middle of a transaction."; + t[483] = "Nije moguće izmeniti nivo izolacije transakcije u sred izvršavanja transakcije."; + t[498] = "Provided InputStream failed."; + t[499] = "Pribaljeni InputStream zakazao."; + t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[501] = "Index parametra je van opsega: {0}, broj parametara je: {1}."; + t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[503] = "Serverov DataStyle parametar promenjen u {0}. JDBC zahteva da DateStyle počinje sa ISO za uspešno završavanje operacije."; + t[508] = "Connection attempt timed out."; + t[509] = "Isteklo je vreme za pokušaj konektovanja."; + t[512] = "Internal Query: {0}"; + t[513] = "Interni upit: {0}"; + t[514] = "Error preparing transaction. prepare xid={0}"; + t[515] = "Greška u pripremanju transakcije. prepare xid={0}"; + t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[519] = "Tip autentifikacije {0} nije podržan. Proverite dali imate podešen pg_hba.conf fajl koji uključuje klijentovu IP adresu ili podmrežu, i da ta mreža koristi šemu autentifikacije koja je podržana od strane ovog drajvera."; + t[526] = "Interval {0} not yet implemented"; + t[527] = "Interval {0} još nije implementiran."; + t[532] = "Conversion of interval failed"; + t[533] = "Konverzija intervala propala."; + t[540] = "Query timeout must be a value greater than or equals to 0."; + t[541] = "Tajm-aut mora biti vrednost veća ili jednaka 0."; + t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[543] = "Konekcija je zatvorena automatski zato što je nova konekcija otvorena za isti PooledConnection ili je PooledConnection zatvoren."; + t[544] = "ResultSet not positioned properly, perhaps you need to call next."; + t[545] = "ResultSet nije pravilno pozicioniran, možda je potrebno da pozovete next."; + t[546] = "Prepare called before end. prepare xid={0}, state={1}"; + t[547] = "Pripremanje poziva pre kraja. prepare xid={0}, state={1}"; + t[548] = "Invalid UUID data."; + t[549] = "Nevažeća UUID podatak."; + t[550] = "This statement has been closed."; + t[551] = "Statement je zatvoren."; + t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[553] = "Nije moguće zaključiti SQL tip koji bi se koristio sa instancom {0}. Koristite setObject() sa zadatim eksplicitnim tipom vrednosti."; + t[554] = "Cannot call updateRow() when on the insert row."; + t[555] = "Nije moguće pozvati updateRow() prilikom ubacivanja redova."; + t[562] = "Detail: {0}"; + t[563] = "Detalji: {0}"; + t[566] = "Cannot call deleteRow() when on the insert row."; + t[567] = "Nije moguće pozvati deleteRow() prilikom ubacivanja redova."; + t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[569] = "Trenutna pozicija pre početka ResultSet-a. Ne možete pozvati deleteRow() na toj poziciji."; + t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[577] = "Ilegalna UTF-8 sekvenca: finalna vrednost je zamena vrednosti: {0}"; + t[578] = "Unknown Response Type {0}."; + t[579] = "Nepoznat tip odziva {0}."; + t[582] = "Unsupported value for stringtype parameter: {0}"; + t[583] = "Vrednost za parametar tipa string nije podržana: {0}"; + t[584] = "Conversion to type {0} failed: {1}."; + t[585] = "Konverzija u tip {0} propala: {1}."; + t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; + t[587] = "SQLXML objekat nije inicijalizovan tako da nije moguće preuzimati podatke iz njega."; + t[600] = "Unable to load the class {0} responsible for the datatype {1}"; + t[601] = "Nije moguće učitati kalsu {0} odgovornu za tip podataka {1}"; + t[604] = "The fastpath function {0} is unknown."; + t[605] = "Fastpath funkcija {0} je nepoznata."; + t[608] = "Malformed function or procedure escape syntax at offset {0}."; + t[609] = "Pogrešna sintaksa u funkciji ili proceduri na poziciji {0}."; + t[612] = "Provided Reader failed."; + t[613] = "Pribavljeni čitač (Reader) zakazao."; + t[614] = "Maximum number of rows must be a value grater than or equal to 0."; + t[615] = "Maksimalni broj redova mora biti vrednosti veće ili jednake 0."; + t[616] = "Failed to create object for: {0}."; + t[617] = "Propao pokušaj kreiranja objekta za: {0}."; + t[620] = "Conversion of money failed."; + t[621] = "Konverzija novca (money) propala."; + t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[623] = "Prevremen završetak ulaznog toka podataka,očekivano {0} bajtova, a pročitano samo {1}."; + t[626] = "An unexpected result was returned by a query."; + t[627] = "Nepredviđen rezultat je vraćen od strane upita."; + t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[645] = "Preplitanje transakcija nije implementirano. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[646] = "An error occurred while setting up the SSL connection."; + t[647] = "Greška se dogodila prilikom podešavanja SSL konekcije."; + t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[655] = "Ilegalna UTF-8 sekvenca: {0} bytes used to encode a {1} byte value: {2}"; + t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; + t[657] = "Nije implementirano: Spremanje mora biti pozvano uz korišćenje iste konekcije koja se koristi za startovanje transakcije. currentXid={0}, prepare xid={1}"; + t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[659] = "SSLSocketFactory klasa koju pruža {0} se nemože instancirati."; + t[662] = "Failed to convert binary xml data to encoding: {0}."; + t[663] = "Neuspešno konvertovanje binarnih XML podataka u kodnu stranu: {0}."; + t[670] = "Position: {0}"; + t[671] = "Pozicija: {0}"; + t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[677] = "Lokacija: Fajl: {0}, Rutina: {1}, Linija: {2}"; + t[684] = "Cannot tell if path is open or closed: {0}."; + t[685] = "Nije moguće utvrditi dali je putanja otvorena ili zatvorena: {0}."; + t[690] = "Unable to create StAXResult for SQLXML"; + t[691] = "Nije moguće kreirati StAXResult za SQLXML"; + t[700] = "Cannot convert an instance of {0} to type {1}"; + t[701] = "Nije moguće konvertovati instancu {0} u tip {1}"; + t[710] = "{0} function takes four and only four argument."; + t[711] = "Funkcija {0} prima četiri i samo četiri parametra."; + t[718] = "Interrupted while attempting to connect."; + t[719] = "Prekinut pokušaj konektovanja."; + t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; + t[723] = "Sigurnosna podešavanja su sprečila konekciju. Verovatno je potrebno da dozvolite konekciju klasi java.net.SocketPermission na bazu na serveru."; + t[734] = "No function outputs were registered."; + t[735] = "Nije registrovan nikakv izlaz iz funkcije."; + t[736] = "{0} function takes one and only one argument."; + t[737] = "Funkcija {0} prima jedan i samo jedan parametar."; + t[744] = "This ResultSet is closed."; + t[745] = "ResultSet je zatvoren."; + t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[747] = "Pronađeni su nevažeći karakter podaci. Uzrok je najverovatnije to što pohranjeni podaci sadrže karaktere koji su nevažeći u setu karaktera sa kojima je baza kreirana. Npr. Čuvanje 8bit podataka u SQL_ASCII bazi podataka."; + t[752] = "Error disabling autocommit"; + t[753] = "Greška u isključivanju autokomita"; + t[754] = "Ran out of memory retrieving query results."; + t[755] = "Nestalo je memorije prilikom preuzimanja rezultata upita."; + t[756] = "Returning autogenerated keys is not supported."; + t[757] = "Vraćanje autogenerisanih ključeva nije podržano."; + t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[761] = "Operacija zahteva skrolabilan ResultSet,ali ovaj ResultSet je FORWARD_ONLY."; + t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[763] = "CallableStatement funkcija je izvršena dok je izlazni parametar {0} tipa {1} a tip {2} je registrovan kao izlazni parametar."; + t[764] = "Unable to find server array type for provided name {0}."; + t[765] = "Neuspešno nalaženje liste servera za zadato ime {0}."; + t[768] = "Unknown ResultSet holdability setting: {0}."; + t[769] = "Nepoznata ResultSet podešavanja za mogućnost držanja (holdability): {0}."; + t[772] = "Transaction isolation level {0} not supported."; + t[773] = "Nivo izolacije transakcije {0} nije podržan."; + t[774] = "Zero bytes may not occur in identifiers."; + t[775] = "Nula bajtovji se ne smeju pojavljivati u identifikatorima."; + t[776] = "No results were returned by the query."; + t[777] = "Nikakav rezultat nije vraćen od strane upita."; + t[778] = "A CallableStatement was executed with nothing returned."; + t[779] = "CallableStatement je izvršen ali ništa nije vrećeno kao rezultat."; + t[780] = "wasNull cannot be call before fetching a result."; + t[781] = "wasNull nemože biti pozvan pre zahvatanja rezultata."; + t[784] = "Returning autogenerated keys by column index is not supported."; + t[785] = "Vraćanje autogenerisanih ključeva po kloloni nije podržano."; + t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[787] = "Izraz ne deklariše izlazni parametar. Koristite '{' ?= poziv ... '}' za deklarisanje."; + t[788] = "Can''t use relative move methods while on the insert row."; + t[789] = "Ne može se koristiti metod relativnog pomeranja prilikom ubacivanja redova."; + t[790] = "A CallableStatement was executed with an invalid number of parameters"; + t[791] = "CallableStatement je izvršen sa nevažećim brojem parametara"; + t[792] = "Connection is busy with another transaction"; + t[793] = "Konekcija je zauzeta sa drugom transakciom."; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 397) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 395) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 794) + idx -= 794; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 794 && table[idx] == null) idx += 2; } + + @Override + public boolean hasMoreElements () { + return (idx < 794); + } + + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 794 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_tr.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_tr.java new file mode 100644 index 0000000..02222b2 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_tr.java @@ -0,0 +1,401 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_tr extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[794]; + t[0] = ""; + t[1] = "Project-Id-Version: jdbc-tr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-31 21:47+0200\nLast-Translator: Devrim GÜNDÜZ \nLanguage-Team: Turkish \nLanguage: tr\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.3.1\nX-Poedit-Language: Turkish\nX-Poedit-Country: TURKEY\n"; + t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[3] = "Desteklenmiyor: 2nd phase commit, atıl bir bağlantıdan başlatılmalıdır. commit xid={0}, currentXid={1}, state={2}, transactionState={3}"; + t[4] = "DataSource has been closed."; + t[5] = "DataSource kapatıldı."; + t[8] = "Invalid flags {0}"; + t[9] = "Geçersiz seçenekler {0}"; + t[18] = "Where: {0}"; + t[19] = "Where: {0}"; + t[24] = "Unknown XML Source class: {0}"; + t[25] = "Bilinmeyen XML Kaynak Sınıfı: {0}"; + t[26] = "The connection attempt failed."; + t[27] = "Bağlantı denemesi başarısız oldu."; + t[28] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[29] = "Şu an ResultSet sonucundan sonra konumlandı. deleteRow() burada çağırabilirsiniz."; + t[32] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[33] = "PreparedStatement ile sorgu satırı alan sorgu yöntemleri kullanılamaz."; + t[36] = "Multiple ResultSets were returned by the query."; + t[37] = "Sorgu tarafından birden fazla ResultSet getirildi."; + t[50] = "Too many update results were returned."; + t[51] = "Çok fazla güncelleme sonucu döndürüldü."; + t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}"; + t[59] = "Geçersiz UTF-8 çoklu bayt karakteri: ilk bayt {0}: {1}"; + t[66] = "The column name {0} was not found in this ResultSet."; + t[67] = "Bu ResultSet içinde {0} sütun adı bulunamadı."; + t[70] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[71] = "Fastpath call {0} - Integer beklenirken hiçbir sonuç getirilmedi."; + t[74] = "Protocol error. Session setup failed."; + t[75] = "Protokol hatası. Oturum kurulumu başarısız oldu."; + t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[77] = "CallableStatement bildirildi ancak registerOutParameter(1, < bir tip>) tanıtımı yapılmadı."; + t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[79] = "Eş zamanlama CONCUR_READ_ONLY olan ResultSet''ler değiştirilemez"; + t[90] = "LOB positioning offsets start at 1."; + t[91] = "LOB bağlangıç adresi 1Den başlıyor"; + t[92] = "Internal Position: {0}"; + t[93] = "Internal Position: {0}"; + t[96] = "free() was called on this LOB previously"; + t[97] = "Bu LOB'da free() daha önce çağırıldı"; + t[100] = "Cannot change transaction read-only property in the middle of a transaction."; + t[101] = "Transaction ortasında geçerli transactionun read-only özellği değiştirilemez."; + t[102] = "The JVM claims not to support the {0} encoding."; + t[103] = "JVM, {0} dil kodlamasını desteklememektedir."; + t[108] = "{0} function doesn''t take any argument."; + t[109] = "{0} fonksiyonu parametre almaz."; + t[112] = "xid must not be null"; + t[113] = "xid null olamaz"; + t[114] = "Connection has been closed."; + t[115] = "Bağlantı kapatıldı."; + t[122] = "The server does not support SSL."; + t[123] = "Sunucu SSL desteklemiyor."; + t[124] = "Custom type maps are not supported."; + t[125] = "Özel tip eşleştirmeleri desteklenmiyor."; + t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}"; + t[141] = "Geçersiz UTF-8 çoklu bayt karakteri: {0}/{1} baytı 10xxxxxx değildir: {2}"; + t[148] = "Hint: {0}"; + t[149] = "İpucu: {0}"; + t[152] = "Unable to find name datatype in the system catalogs."; + t[153] = "Sistem kataloglarında name veri tipi bulunamıyor."; + t[156] = "Unsupported Types value: {0}"; + t[157] = "Geçersiz Types değeri: {0}"; + t[158] = "Unknown type {0}."; + t[159] = "Bilinmeyen tip {0}."; + t[166] = "{0} function takes two and only two arguments."; + t[167] = "{0} fonksiyonunu sadece iki parametre alabilir."; + t[170] = "Finalizing a Connection that was never closed:"; + t[171] = "Kapatılmamış bağlantı sonlandırılıyor."; + t[180] = "The maximum field size must be a value greater than or equal to 0."; + t[181] = "En büyük alan boyutu sıfır ya da sıfırdan büyük bir değer olmalı."; + t[186] = "PostgreSQL LOBs can only index to: {0}"; + t[187] = "PostgreSQL LOB göstergeleri sadece {0} referans edebilir"; + t[194] = "Method {0} is not yet implemented."; + t[195] = "{0} yöntemi henüz kodlanmadı."; + t[198] = "Error loading default settings from driverconfig.properties"; + t[199] = "driverconfig.properties dosyasından varsayılan ayarları yükleme hatası"; + t[200] = "Results cannot be retrieved from a CallableStatement before it is executed."; + t[201] = "CallableStatement çalıştırılmadan sonuçlar ondan alınamaz."; + t[202] = "Large Objects may not be used in auto-commit mode."; + t[203] = "Auto-commit biçimde large object kullanılamaz."; + t[208] = "Expected command status BEGIN, got {0}."; + t[209] = "BEGIN komut durumunu beklenirken {0} alındı."; + t[218] = "Invalid fetch direction constant: {0}."; + t[219] = "Getirme yönü değişmezi geçersiz: {0}."; + t[222] = "{0} function takes three and only three arguments."; + t[223] = "{0} fonksiyonunu sadece üç parametre alabilir."; + t[226] = "This SQLXML object has already been freed."; + t[227] = "Bu SQLXML nesnesi zaten boşaltılmış."; + t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[229] = "ResultSet, sonuçların ilk kaydından önce veya son kaydından sonra olduğu için güncelleme yapılamamaktadır."; + t[230] = "The JVM claims not to support the encoding: {0}"; + t[231] = "JVM, {0} dil kodlamasını desteklememektedir."; + t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[233] = "{0} tipinde parametre tanıtıldı, ancak {1} (sqltype={2}) tipinde geri getirmek için çağrı yapıldı."; + t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[235] = "Hazırlanmış transaction rollback hatası. rollback xid={0}, preparedXid={1}, currentXid={2}"; + t[240] = "Cannot establish a savepoint in auto-commit mode."; + t[241] = "Auto-commit biçimde savepoint oluşturulamıyor."; + t[242] = "Cannot retrieve the id of a named savepoint."; + t[243] = "Adlandırılmış savepointin id değerine erişilemiyor."; + t[244] = "The column index is out of range: {0}, number of columns: {1}."; + t[245] = "Sütun gçstergesi kapsam dışıdır: {0}, sütun sayısı: {1}."; + t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[251] = "Sıradışı bir durum sürücünün hata vermesine sebep oldu. Lütfen bu durumu geliştiricilere bildirin."; + t[260] = "Cannot cast an instance of {0} to type {1}"; + t[261] = "{0} tipi {1} tipine dönüştürülemiyor"; + t[264] = "Unknown Types value."; + t[265] = "Geçersiz Types değeri."; + t[266] = "Invalid stream length {0}."; + t[267] = "Geçersiz akım uzunluğu {0}."; + t[272] = "Cannot retrieve the name of an unnamed savepoint."; + t[273] = "Adı verilmemiş savepointin id değerine erişilemiyor."; + t[274] = "Unable to translate data into the desired encoding."; + t[275] = "Veri, istenilen dil kodlamasına çevrilemiyor."; + t[276] = "Expected an EOF from server, got: {0}"; + t[277] = "Sunucudan EOF beklendi; ama {0} alındı."; + t[278] = "Bad value for type {0} : {1}"; + t[279] = "{0} veri tipi için geçersiz değer : {1}"; + t[280] = "The server requested password-based authentication, but no password was provided."; + t[281] = "Sunucu şifre tabanlı yetkilendirme istedi; ancak bir şifre sağlanmadı."; + t[286] = "Unable to create SAXResult for SQLXML."; + t[287] = "SQLXML için SAXResult yaratılamadı."; + t[292] = "Error during recover"; + t[293] = "Kurtarma sırasında hata"; + t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[295] = "start çağırımı olmadan end çağırılmıştır. state={0}, start xid={1}, currentXid={2}, preparedXid={3}"; + t[296] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[297] = "Large objectlerin temizlenmesi 8.3 ve sonraki sürümlerde kodlanmıştır."; + t[298] = "This PooledConnection has already been closed."; + t[299] = "Geçerli PooledConnection zaten önceden kapatıldı."; + t[302] = "ClientInfo property not supported."; + t[303] = "Clientinfo property'si desteklenememktedir."; + t[306] = "Fetch size must be a value greater to or equal to 0."; + t[307] = "Fetch boyutu sıfır veya daha büyük bir değer olmalıdır."; + t[312] = "A connection could not be made using the requested protocol {0}."; + t[313] = "İstenilen protokol ile bağlantı kurulamadı {0}"; + t[318] = "Unknown XML Result class: {0}"; + t[319] = "Bilinmeyen XML Sonuç sınıfı: {0}."; + t[322] = "There are no rows in this ResultSet."; + t[323] = "Bu ResultSet içinde kayıt bulunamadı."; + t[324] = "Unexpected command status: {0}."; + t[325] = "Beklenmeyen komut durumu: {0}."; + t[330] = "Heuristic commit/rollback not supported. forget xid={0}"; + t[331] = "Heuristic commit/rollback desteklenmiyor. forget xid={0}"; + t[334] = "Not on the insert row."; + t[335] = "Insert kaydı değil."; + t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further."; + t[337] = "Bu SQLXML nesnesi daha önceden ilklendirilmiştir; o yüzden daha fazla müdahale edilemez."; + t[344] = "Server SQLState: {0}"; + t[345] = "Sunucu SQLState: {0}"; + t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[349] = "İstemcinin client_standard_conforming_strings parametresi {0} olarak raporlandı. JDBC sürücüsü on ya da off olarak bekliyordu."; + t[360] = "The driver currently does not support COPY operations."; + t[361] = "Bu sunucu şu aşamada COPY işlemleri desteklememktedir."; + t[364] = "The array index is out of range: {0}, number of elements: {1}."; + t[365] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}."; + t[374] = "suspend/resume not implemented"; + t[375] = "suspend/resume desteklenmiyor"; + t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it"; + t[379] = "Desteklenmiyor: one-phase commit, işlevinde başlatan ve bitiren bağlantı aynı olmalıdır"; + t[380] = "Error during one-phase commit. commit xid={0}"; + t[381] = "One-phase commit sırasında hata. commit xid={0}"; + t[398] = "Cannot call cancelRowUpdates() when on the insert row."; + t[399] = "Insert edilmiş kaydın üzerindeyken cancelRowUpdates() çağırılamaz."; + t[400] = "Cannot reference a savepoint after it has been released."; + t[401] = "Bırakıldıktan sonra savepoint referans edilemez."; + t[402] = "You must specify at least one column value to insert a row."; + t[403] = "Bir satır eklemek için en az bir sütun değerini belirtmelisiniz."; + t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data."; + t[405] = "Sistem kataloğu olmadığından MaxIndexKeys değerini tespit edilememektedir."; + t[410] = "commit called before end. commit xid={0}, state={1}"; + t[411] = "commit, sondan önce çağırıldı. commit xid={0}, state={1}"; + t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}"; + t[413] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer sıra dışıdır: {0}"; + t[414] = "{0} function takes two or three arguments."; + t[415] = "{0} fonksiyonu yalnız iki veya üç argüman alabilir."; + t[428] = "Unable to convert DOMResult SQLXML data to a string."; + t[429] = "DOMResult SQLXML verisini diziye dönüştürülemedi."; + t[434] = "Unable to decode xml data."; + t[435] = "XML verisinin kodu çözülemedi."; + t[440] = "Unexpected error writing large object to database."; + t[441] = "Large object veritabanına yazılırken beklenmeyan hata."; + t[442] = "Zero bytes may not occur in string parameters."; + t[443] = "String parametrelerinde sıfır bayt olamaz."; + t[444] = "A result was returned when none was expected."; + t[445] = "Hiçbir sonuç kebklenimezken sonuç getirildi."; + t[450] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[451] = "ResultSet değiştirilemez. Bu sonucu üreten sorgu tek bir tablodan sorgulamalı ve tablonun tüm primary key alanları belirtmelidir. Daha fazla bilgi için bk. JDBC 2.1 API Specification, section 5.6."; + t[454] = "Bind message length {0} too long. This can be caused by very large or incorrect length specifications on InputStream parameters."; + t[455] = "Bind mesaj uzunluğu ({0}) fazla uzun. Bu durum InputStream yalnış uzunluk belirtimlerden kaynaklanabilir."; + t[460] = "Statement has been closed."; + t[461] = "Komut kapatıldı."; + t[462] = "No value specified for parameter {0}."; + t[463] = "{0} parametresi için hiç bir değer belirtilmedi."; + t[468] = "The array index is out of range: {0}"; + t[469] = "Dizi göstergesi kapsam dışıdır: {0}"; + t[474] = "Unable to bind parameter values for statement."; + t[475] = "Komut için parametre değerlei bağlanamadı."; + t[476] = "Can''t refresh the insert row."; + t[477] = "Inser satırı yenilenemiyor."; + t[480] = "No primary key found for table {0}."; + t[481] = "{0} tablosunda primary key yok."; + t[482] = "Cannot change transaction isolation level in the middle of a transaction."; + t[483] = "Transaction ortasında geçerli transactionun transaction isolation level özellği değiştirilemez."; + t[498] = "Provided InputStream failed."; + t[499] = "Sağlanmış InputStream başarısız."; + t[500] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[501] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}."; + t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[503] = "Sunucunun DateStyle parametresi {0} olarak değiştirildi. JDBC sürücüsü doğru işlemesi için DateStyle tanımının ISO işle başlamasını gerekir."; + t[508] = "Connection attempt timed out."; + t[509] = "Bağlantı denemesi zaman aşımına uğradı."; + t[512] = "Internal Query: {0}"; + t[513] = "Internal Query: {0}"; + t[514] = "Error preparing transaction. prepare xid={0}"; + t[515] = "Transaction hazırlama hatası. prepare xid={0}"; + t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[519] = "{0} yetkinlendirme tipi desteklenmemektedir. pg_hba.conf dosyanızı istemcinin IP adresini ya da subnetini içerecek şekilde ayarlayıp ayarlamadığınızı ve sürücü tarafından desteklenen yetkilendirme yöntemlerinden birisini kullanıp kullanmadığını kontrol ediniz."; + t[526] = "Interval {0} not yet implemented"; + t[527] = "{0} aralığı henüz kodlanmadı."; + t[532] = "Conversion of interval failed"; + t[533] = "Interval dönüştürmesi başarısız."; + t[540] = "Query timeout must be a value greater than or equals to 0."; + t[541] = "Sorgu zaman aşımı değer sıfır veya sıfırdan büyük bir sayı olmalıdır."; + t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[543] = "PooledConnection kapatıldığı için veya aynı PooledConnection için yeni bir bağlantı açıldığı için geçerli bağlantı otomatik kapatıldı."; + t[544] = "ResultSet not positioned properly, perhaps you need to call next."; + t[545] = "ResultSet doğru konumlanmamıştır, next işlemi çağırmanız gerekir."; + t[546] = "Prepare called before end. prepare xid={0}, state={1}"; + t[547] = "Sondan önce prepare çağırılmış. prepare xid={0}, state={1}"; + t[548] = "Invalid UUID data."; + t[549] = "Geçersiz UUID verisi."; + t[550] = "This statement has been closed."; + t[551] = "Bu komut kapatıldı."; + t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use."; + t[553] = "{0}''nin örneği ile kullanılacak SQL tip bulunamadı. Kullanılacak tip belirtmek için kesin Types değerleri ile setObject() kullanın."; + t[554] = "Cannot call updateRow() when on the insert row."; + t[555] = "Insert kaydı üzerinde updateRow() çağırılamaz."; + t[562] = "Detail: {0}"; + t[563] = "Ayrıntı: {0}"; + t[566] = "Cannot call deleteRow() when on the insert row."; + t[567] = "Insert kaydı üzerinde deleteRow() çağırılamaz."; + t[568] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[569] = "Şu an ResultSet başlangcıından önce konumlandı. deleteRow() burada çağırabilirsiniz."; + t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}"; + t[577] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer yapay bir değerdir: {0}"; + t[578] = "Unknown Response Type {0}."; + t[579] = "Bilinmeyen yanıt tipi {0}"; + t[582] = "Unsupported value for stringtype parameter: {0}"; + t[583] = "strinftype parametresi için destekleneyen değer: {0}"; + t[584] = "Conversion to type {0} failed: {1}."; + t[585] = "{0} veri tipine dönüştürme hatası: {1}."; + t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it."; + t[587] = "Bu SQLXML nesnesi ilklendirilmemiş; o yüzden ondan veri alamazsınız."; + t[600] = "Unable to load the class {0} responsible for the datatype {1}"; + t[601] = "{1} veri tipinden sorumlu {0} sınıfı yüklenemedi"; + t[604] = "The fastpath function {0} is unknown."; + t[605] = "{0} fastpath fonksiyonu bilinmemektedir."; + t[608] = "Malformed function or procedure escape syntax at offset {0}."; + t[609] = "{0} adresinde fonksiyon veya yordamda kaçış söz dizimi geçersiz."; + t[612] = "Provided Reader failed."; + t[613] = "Sağlanmış InputStream başarısız."; + t[614] = "Maximum number of rows must be a value grater than or equal to 0."; + t[615] = "En büyük getirilecek satır sayısı sıfırdan büyük olmalıdır."; + t[616] = "Failed to create object for: {0}."; + t[617] = "{0} için nesne oluşturma hatası."; + t[620] = "Conversion of money failed."; + t[621] = "Money dönüştürmesi başarısız."; + t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}."; + t[623] = "Giriş akımında beklenmeyen dosya sonu, {0} bayt beklenirken sadece {1} bayt alındı."; + t[626] = "An unexpected result was returned by a query."; + t[627] = "Sorgu beklenmeyen bir sonuç döndürdü."; + t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[645] = "Transaction interleaving desteklenmiyor. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[646] = "An error occurred while setting up the SSL connection."; + t[647] = "SSL bağlantısı ayarlanırken bir hata oluştu."; + t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}"; + t[655] = "Geçersiz UTF-8 çoklu bayt karakteri: {0} bayt, {1} bayt değeri kodlamak için kullanılmış: {2}"; + t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}"; + t[657] = "Desteklenmiyor: Prepare, transaction başlatran bağlantı tarafından çağırmalıdır. currentXid={0}, prepare xid={1}"; + t[658] = "The SSLSocketFactory class provided {0} could not be instantiated."; + t[659] = "SSLSocketFactory {0} ile örneklenmedi."; + t[662] = "Failed to convert binary xml data to encoding: {0}."; + t[663] = "xml verisinin şu dil kodlamasına çevirilmesi başarısız oldu: {0}"; + t[670] = "Position: {0}"; + t[671] = "Position: {0}"; + t[676] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[677] = "Yer: Dosya: {0}, Yordam: {1}, Satır: {2}"; + t[684] = "Cannot tell if path is open or closed: {0}."; + t[685] = "Pathın açık mı kapalı olduğunu tespit edilemiyor: {0}."; + t[690] = "Unable to create StAXResult for SQLXML"; + t[691] = "SQLXML için StAXResult yaratılamadı"; + t[700] = "Cannot convert an instance of {0} to type {1}"; + t[701] = "{0} instance, {1} tipine dönüştürülemiyor"; + t[710] = "{0} function takes four and only four argument."; + t[711] = "{0} fonksiyonunu yalnız dört parametre alabilir."; + t[718] = "Interrupted while attempting to connect."; + t[719] = "Bağlanırken kesildi."; + t[722] = "Your security policy has prevented the connection from being attempted. You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."; + t[723] = "Güvenlik politikanız bağlantının kurulmasını engelledi. java.net.SocketPermission'a veritabanına ve de bağlanacağı porta bağlantı izni vermelisiniz."; + t[734] = "No function outputs were registered."; + t[735] = "Hiçbir fonksiyon çıktısı kaydedilmedi."; + t[736] = "{0} function takes one and only one argument."; + t[737] = "{0} fonksiyonunu yalnız tek bir parametre alabilir."; + t[744] = "This ResultSet is closed."; + t[745] = "ResultSet kapalıdır."; + t[746] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[747] = "Geçersiz karakterler bulunmuştur. Bunun sebebi, verilerde veritabanın desteklediği dil kodlamadaki karakterlerin dışında bir karaktere rastlamasıdır. Bunun en yaygın örneği 8 bitlik veriyi SQL_ASCII veritabanında saklamasıdır."; + t[752] = "Error disabling autocommit"; + t[753] = "autocommit'i devre dışı bırakma sırasında hata"; + t[754] = "Ran out of memory retrieving query results."; + t[755] = "Sorgu sonuçları alınırken bellek yetersiz."; + t[756] = "Returning autogenerated keys is not supported."; + t[757] = "Otomatik üretilen değerlerin getirilmesi desteklenememktedir."; + t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[761] = "İşlem, kaydırılabilen ResultSet gerektirir, ancak bu ResultSet FORWARD_ONLYdir."; + t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[763] = "CallableStatement çalıştırıldı, ancak {2} tipi kaydedilmesine rağmen döndürme parametresi {0} ve tipi {1} idi."; + t[764] = "Unable to find server array type for provided name {0}."; + t[765] = "Belirtilen {0} adı için sunucu array tipi bulunamadı."; + t[768] = "Unknown ResultSet holdability setting: {0}."; + t[769] = "ResultSet tutabilme ayarı geçersiz: {0}."; + t[772] = "Transaction isolation level {0} not supported."; + t[773] = "Transaction isolation level {0} desteklenmiyor."; + t[774] = "Zero bytes may not occur in identifiers."; + t[775] = "Belirteçlerde sıfır bayt olamaz."; + t[776] = "No results were returned by the query."; + t[777] = "Sorgudan hiç bir sonuç dönmedi."; + t[778] = "A CallableStatement was executed with nothing returned."; + t[779] = "CallableStatement çalıştırma sonucunda veri getirilmedi."; + t[780] = "wasNull cannot be call before fetching a result."; + t[781] = "wasNull sonuç çekmeden önce çağırılamaz."; + t[784] = "Returning autogenerated keys by column index is not supported."; + t[785] = "Kolonların indexlenmesi ile otomatik olarak oluşturulan anahtarların döndürülmesi desteklenmiyor."; + t[786] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[787] = "Bu komut OUT parametresi bildirmemektedir. Bildirmek için '{' ?= call ... '}' kullanın."; + t[788] = "Can''t use relative move methods while on the insert row."; + t[789] = "Insert kaydı üzerinde relative move method kullanılamaz."; + t[790] = "A CallableStatement was executed with an invalid number of parameters"; + t[791] = "CallableStatement geçersiz sayıda parametre ile çalıştırıldı."; + t[792] = "Connection is busy with another transaction"; + t[793] = "Bağlantı, başka bir transaction tarafından meşgul ediliyor"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 397) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 395) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 794) + idx -= 794; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 794 && table[idx] == null) idx += 2; } + @Override + public boolean hasMoreElements () { + return (idx < 794); + } + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 794 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_CN.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_CN.java new file mode 100644 index 0000000..1694aaa --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_CN.java @@ -0,0 +1,287 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_zh_CN extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[578]; + t[0] = ""; + t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.3\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-31 14:34+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) \nLanguage-Team: The PostgreSQL Development Team \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: CHINA\nX-Poedit-SourceCharset: utf-8\n"; + t[6] = "Cannot call cancelRowUpdates() when on the insert row."; + t[7] = "不能在新增的数据列上呼叫 cancelRowUpdates()。"; + t[8] = "The server requested password-based authentication, but no password was provided."; + t[9] = "服务器要求使用密码验证,但是密码并未提供。"; + t[12] = "Detail: {0}"; + t[13] = "详细:{0}"; + t[16] = "Can''t refresh the insert row."; + t[17] = "无法重读新增的数据列。"; + t[18] = "Connection has been closed."; + t[19] = "Connection 已经被关闭。"; + t[24] = "Bad value for type {0} : {1}"; + t[25] = "不良的类型值 {0} : {1}"; + t[36] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[37] = "大型对象的截断(Truncation)仅被实作执行在 8.3 和后来的服务器。"; + t[40] = "Cannot retrieve the name of an unnamed savepoint."; + t[41] = "无法取得未命名储存点(Savepoint)的名称。"; + t[46] = "An error occurred while setting up the SSL connection."; + t[47] = "进行 SSL 连线时发生错误。"; + t[50] = "suspend/resume not implemented"; + t[51] = "暂停(suspend)/再继续(resume)尚未被实作。"; + t[60] = "{0} function takes one and only one argument."; + t[61] = "{0} 函式取得一个且仅有一个引数。"; + t[62] = "Conversion to type {0} failed: {1}."; + t[63] = "转换类型 {0} 失败:{1}。"; + t[66] = "Conversion of money failed."; + t[67] = "money 转换失败。"; + t[70] = "A result was returned when none was expected."; + t[71] = "传回预期之外的结果。"; + t[80] = "This PooledConnection has already been closed."; + t[81] = "这个 PooledConnection 已经被关闭。"; + t[84] = "Multiple ResultSets were returned by the query."; + t[85] = "查询传回多个 ResultSet。"; + t[90] = "Not on the insert row."; + t[91] = "不在新增的数据列上。"; + t[94] = "An unexpected result was returned by a query."; + t[95] = "传回非预期的查询结果。"; + t[102] = "Internal Query: {0}"; + t[103] = "内部查询:{0}"; + t[106] = "The array index is out of range: {0}"; + t[107] = "阵列索引超过许可范围:{0}"; + t[112] = "Connection attempt timed out."; + t[113] = "Connection 尝试逾时。"; + t[114] = "Unable to find name datatype in the system catalogs."; + t[115] = "在系统 catalog 中找不到名称数据类型(datatype)。"; + t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[117] = "不明的原因导致驱动程序造成失败,请回报这个例外。"; + t[120] = "The array index is out of range: {0}, number of elements: {1}."; + t[121] = "阵列索引超过许可范围:{0},元素数量:{1}。"; + t[138] = "Invalid flags {0}"; + t[139] = "无效的旗标 flags {0}"; + t[146] = "Unexpected error writing large object to database."; + t[147] = "将大型对象(large object)写入数据库时发生不明错误。"; + t[162] = "Query timeout must be a value greater than or equals to 0."; + t[163] = "查询逾时等候时间必须大于或等于 0。"; + t[170] = "Unknown type {0}."; + t[171] = "不明的类型 {0}"; + t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[175] = "这服务器的 standard_conforming_strings 参数已回报为 {0},JDBC 驱动程序已预期开启或是关闭。"; + t[176] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[177] = "发现不合法的字元,可能的原因是欲储存的数据中包含数据库的字元集不支援的字码,其中最常见例子的就是将 8 位元数据存入使用 SQL_ASCII 编码的数据库中。"; + t[178] = "The column index is out of range: {0}, number of columns: {1}."; + t[179] = "栏位索引超过许可范围:{0},栏位数:{1}。"; + t[180] = "The connection attempt failed."; + t[181] = "尝试连线已失败。"; + t[182] = "No value specified for parameter {0}."; + t[183] = "未设定参数值 {0} 的内容。"; + t[190] = "Provided Reader failed."; + t[191] = "提供的 Reader 已失败。"; + t[194] = "Unsupported value for stringtype parameter: {0}"; + t[195] = "字符类型参数值未被支持:{0}"; + t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[199] = "已经宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, ) 。"; + t[204] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[205] = "不能在 ResultSet 的第一笔数据之前呼叫 deleteRow()。"; + t[214] = "The maximum field size must be a value greater than or equal to 0."; + t[215] = "最大栏位容量必须大于或等于 0。"; + t[216] = "Fetch size must be a value greater to or equal to 0."; + t[217] = "数据读取笔数(fetch size)必须大于或等于 0。"; + t[220] = "PostgreSQL LOBs can only index to: {0}"; + t[221] = "PostgreSQL LOBs 仅能索引到:{0}"; + t[224] = "The JVM claims not to support the encoding: {0}"; + t[225] = "JVM 声明并不支援编码:{0} 。"; + t[226] = "Interval {0} not yet implemented"; + t[227] = "隔绝 {0} 尚未被实作。"; + t[238] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[239] = "Fastpath 呼叫 {0} - 没有传回值,且应该传回一个整数。"; + t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[247] = "ResultSets 与并发同作(Concurrency) CONCUR_READ_ONLY 不能被更新。"; + t[250] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[251] = "这个 statement 未宣告 OUT 参数,使用 '{' ?= call ... '}' 宣告一个。"; + t[256] = "Cannot reference a savepoint after it has been released."; + t[257] = "无法参照已经被释放的储存点。"; + t[260] = "Unsupported Types value: {0}"; + t[261] = "未被支持的类型值:{0}"; + t[266] = "Protocol error. Session setup failed."; + t[267] = "通讯协定错误,Session 初始化失败。"; + t[274] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[275] = "不能在 ResultSet 的最后一笔数据之后呼叫 deleteRow()。"; + t[278] = "Internal Position: {0}"; + t[279] = "内部位置:{0}"; + t[280] = "Zero bytes may not occur in identifiers."; + t[281] = "在标识识别符中不存在零位元组。"; + t[288] = "{0} function doesn''t take any argument."; + t[289] = "{0} 函式无法取得任何的引数。"; + t[300] = "This statement has been closed."; + t[301] = "这个 statement 已经被关闭。"; + t[318] = "Cannot establish a savepoint in auto-commit mode."; + t[319] = "在自动确认事物交易模式无法建立储存点(Savepoint)。"; + t[320] = "Position: {0}"; + t[321] = "位置:{0}"; + t[322] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[323] = "不可更新的 ResultSet。用来产生这个 ResultSet 的 SQL 命令只能操作一个数据表,并且必需选择所有主键栏位,详细请参阅 JDBC 2.1 API 规格书 5.6 节。"; + t[330] = "This ResultSet is closed."; + t[331] = "这个 ResultSet 已经被关闭。"; + t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[339] = "已注册参数类型 {0},但是又呼叫了get{1}(sqltype={2})。"; + t[342] = "Transaction isolation level {0} not supported."; + t[343] = "不支援交易隔绝等级 {0} 。"; + t[344] = "Statement has been closed."; + t[345] = "Sstatement 已经被关闭。"; + t[352] = "Server SQLState: {0}"; + t[353] = "服务器 SQLState:{0}"; + t[354] = "No primary key found for table {0}."; + t[355] = "{0} 数据表中未找到主键(Primary key)。"; + t[362] = "Cannot convert an instance of {0} to type {1}"; + t[363] = "无法转换 {0} 到类型 {1} 的实例"; + t[364] = "DataSource has been closed."; + t[365] = "DataSource 已经被关闭。"; + t[368] = "The column name {0} was not found in this ResultSet."; + t[369] = "ResultSet 中找不到栏位名称 {0}。"; + t[372] = "ResultSet not positioned properly, perhaps you need to call next."; + t[373] = "查询结果指标位置不正确,您也许需要呼叫 ResultSet 的 next() 方法。"; + t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[379] = "无法更新 ResultSet,可能在第一笔数据之前或最未笔数据之后。"; + t[380] = "Method {0} is not yet implemented."; + t[381] = "这个 {0} 方法尚未被实作。"; + t[382] = "{0} function takes two or three arguments."; + t[383] = "{0} 函式取得二个或三个引数。"; + t[384] = "The JVM claims not to support the {0} encoding."; + t[385] = "JVM 声明并不支援 {0} 编码。"; + t[396] = "Unknown Response Type {0}."; + t[397] = "不明的回应类型 {0}。"; + t[398] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[399] = "参数索引超出许可范围:{0},参数总数:{1}。"; + t[400] = "Where: {0}"; + t[401] = "在位置:{0}"; + t[406] = "Cannot call deleteRow() when on the insert row."; + t[407] = "不能在新增的数据上呼叫 deleteRow()。"; + t[414] = "{0} function takes four and only four argument."; + t[415] = "{0} 函式取得四个且仅有四个引数。"; + t[416] = "Unable to translate data into the desired encoding."; + t[417] = "无法将数据转成目标编码。"; + t[424] = "Can''t use relative move methods while on the insert row."; + t[425] = "不能在新增的数据列上使用相对位置 move 方法。"; + t[434] = "Invalid stream length {0}."; + t[435] = "无效的串流长度 {0}."; + t[436] = "The driver currently does not support COPY operations."; + t[437] = "驱动程序目前不支援 COPY 操作。"; + t[440] = "Maximum number of rows must be a value grater than or equal to 0."; + t[441] = "最大数据读取笔数必须大于或等于 0。"; + t[446] = "Failed to create object for: {0}."; + t[447] = "为 {0} 建立对象失败。"; + t[448] = "{0} function takes three and only three arguments."; + t[449] = "{0} 函式取得三个且仅有三个引数。"; + t[450] = "Conversion of interval failed"; + t[451] = "隔绝(Interval)转换失败。"; + t[452] = "Cannot tell if path is open or closed: {0}."; + t[453] = "无法得知 path 是开启或关闭:{0}。"; + t[460] = "Provided InputStream failed."; + t[461] = "提供的 InputStream 已失败。"; + t[462] = "Invalid fetch direction constant: {0}."; + t[463] = "无效的 fetch 方向常数:{0}。"; + t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[473] = "事物交易隔绝(Transaction interleaving)未被实作。xid={0}, currentXid={1}, state={2}, flags={3}"; + t[474] = "{0} function takes two and only two arguments."; + t[475] = "{0} 函式取得二个且仅有二个引数。"; + t[476] = "There are no rows in this ResultSet."; + t[477] = "ResultSet 中找不到数据列。"; + t[478] = "Zero bytes may not occur in string parameters."; + t[479] = "字符参数不能有 0 个位元组。"; + t[480] = "Cannot call updateRow() when on the insert row."; + t[481] = "不能在新增的数据列上呼叫 deleteRow()。"; + t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[483] = "Connection 已自动结束,因为一个新的 PooledConnection 连线被开启或者或 PooledConnection 已被关闭。"; + t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[489] = "一个 CallableStatement 执行函式后输出的参数类型为 {1} 值为 {0},但是已注册的类型是 {2}。"; + t[494] = "Cannot cast an instance of {0} to type {1}"; + t[495] = "不能转换一个 {0} 实例到类型 {1}"; + t[498] = "Cannot retrieve the id of a named savepoint."; + t[499] = "无法取得已命名储存点的 id。"; + t[500] = "Cannot change transaction read-only property in the middle of a transaction."; + t[501] = "不能在事物交易过程中改变事物交易唯读属性。"; + t[502] = "The server does not support SSL."; + t[503] = "服务器不支援 SSL 连线。"; + t[510] = "A connection could not be made using the requested protocol {0}."; + t[511] = "无法以要求的通讯协定 {0} 建立连线。"; + t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[513] = "不支援 {0} 验证类型。请核对您已经组态 pg_hba.conf 文件包含客户端的IP位址或网路区段,以及驱动程序所支援的验证架构模式已被支援。"; + t[514] = "Malformed function or procedure escape syntax at offset {0}."; + t[515] = "不正确的函式或程序 escape 语法于 {0}。"; + t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[517] = "这服务器的 DateStyle 参数被更改成 {0},JDBC 驱动程序请求需要 DateStyle 以 ISO 开头以正确工作。"; + t[518] = "No results were returned by the query."; + t[519] = "查询没有传回任何结果。"; + t[520] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[521] = "位置:文件:{0},常式:{1},行:{2}"; + t[526] = "Hint: {0}"; + t[527] = "建议:{0}"; + t[528] = "A CallableStatement was executed with nothing returned."; + t[529] = "一个 CallableStatement 执行函式后没有传回值。"; + t[530] = "Unknown ResultSet holdability setting: {0}."; + t[531] = "未知的 ResultSet 可适用的设置:{0}。"; + t[540] = "Cannot change transaction isolation level in the middle of a transaction."; + t[541] = "不能在事务交易过程中改变事物交易隔绝等级。"; + t[544] = "The fastpath function {0} is unknown."; + t[545] = "不明的 fastpath 函式 {0}。"; + t[546] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[547] = "在 PreparedStatement 上不能使用获取查询字符的查询方法。"; + t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[557] = "操作要求可卷动的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。"; + t[564] = "Unknown Types value."; + t[565] = "不明的类型值。"; + t[570] = "Large Objects may not be used in auto-commit mode."; + t[571] = "大型对象无法被使用在自动确认事物交易模式。"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 289) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 287) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 578) + idx -= 578; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + @Override + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 578 && table[idx] == null) idx += 2; } + + @Override + public boolean hasMoreElements () { + return (idx < 578); + } + + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 578 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_TW.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_TW.java new file mode 100644 index 0000000..a010086 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_TW.java @@ -0,0 +1,286 @@ +package org.postgresql.translation; + +import java.util.Enumeration; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +public class messages_zh_TW extends ResourceBundle { + private static final String[] table; + static { + String[] t = new String[578]; + t[0] = ""; + t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.3\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-21 16:50+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) \nLanguage-Team: The PostgreSQL Development Team \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: TAIWAN\nX-Poedit-SourceCharset: utf-8\n"; + t[6] = "Cannot call cancelRowUpdates() when on the insert row."; + t[7] = "不能在新增的資料列上呼叫 cancelRowUpdates()。"; + t[8] = "The server requested password-based authentication, but no password was provided."; + t[9] = "伺服器要求使用密碼驗證,但是密碼並未提供。"; + t[12] = "Detail: {0}"; + t[13] = "詳細:{0}"; + t[16] = "Can''t refresh the insert row."; + t[17] = "無法重讀新增的資料列。"; + t[18] = "Connection has been closed."; + t[19] = "Connection 已經被關閉。"; + t[24] = "Bad value for type {0} : {1}"; + t[25] = "不良的型別值 {0} : {1}"; + t[36] = "Truncation of large objects is only implemented in 8.3 and later servers."; + t[37] = "大型物件的截斷(Truncation)僅被實作執行在 8.3 和後來的伺服器。"; + t[40] = "Cannot retrieve the name of an unnamed savepoint."; + t[41] = "無法取得未命名儲存點(Savepoint)的名稱。"; + t[46] = "An error occurred while setting up the SSL connection."; + t[47] = "進行 SSL 連線時發生錯誤。"; + t[50] = "suspend/resume not implemented"; + t[51] = "暫停(suspend)/再繼續(resume)尚未被實作。"; + t[60] = "{0} function takes one and only one argument."; + t[61] = "{0} 函式取得一個且僅有一個引數。"; + t[62] = "Conversion to type {0} failed: {1}."; + t[63] = "轉換型別 {0} 失敗:{1}。"; + t[66] = "Conversion of money failed."; + t[67] = "money 轉換失敗。"; + t[70] = "A result was returned when none was expected."; + t[71] = "傳回預期之外的結果。"; + t[80] = "This PooledConnection has already been closed."; + t[81] = "這個 PooledConnection 已經被關閉。"; + t[84] = "Multiple ResultSets were returned by the query."; + t[85] = "查詢傳回多個 ResultSet。"; + t[90] = "Not on the insert row."; + t[91] = "不在新增的資料列上。"; + t[94] = "An unexpected result was returned by a query."; + t[95] = "傳回非預期的查詢結果。"; + t[102] = "Internal Query: {0}"; + t[103] = "內部查詢:{0}"; + t[106] = "The array index is out of range: {0}"; + t[107] = "陣列索引超過許可範圍:{0}"; + t[112] = "Connection attempt timed out."; + t[113] = "Connection 嘗試逾時。"; + t[114] = "Unable to find name datatype in the system catalogs."; + t[115] = "在系統 catalog 中找不到名稱資料類型(datatype)。"; + t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception."; + t[117] = "不明的原因導致驅動程式造成失敗,請回報這個例外。"; + t[120] = "The array index is out of range: {0}, number of elements: {1}."; + t[121] = "陣列索引超過許可範圍:{0},元素數量:{1}。"; + t[138] = "Invalid flags {0}"; + t[139] = "無效的旗標 {0}"; + t[146] = "Unexpected error writing large object to database."; + t[147] = "將大型物件(large object)寫入資料庫時發生不明錯誤。"; + t[162] = "Query timeout must be a value greater than or equals to 0."; + t[163] = "查詢逾時等候時間必須大於或等於 0。"; + t[170] = "Unknown type {0}."; + t[171] = "不明的型別 {0}"; + t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off."; + t[175] = "這伺服器的 standard_conforming_strings 參數已回報為 {0},JDBC 驅動程式已預期開啟或是關閉。"; + t[176] = "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."; + t[177] = "發現不合法的字元,可能的原因是欲儲存的資料中包含資料庫的字元集不支援的字碼,其中最常見例子的就是將 8 位元資料存入使用 SQL_ASCII 編碼的資料庫中。"; + t[178] = "The column index is out of range: {0}, number of columns: {1}."; + t[179] = "欄位索引超過許可範圍:{0},欄位數:{1}。"; + t[180] = "The connection attempt failed."; + t[181] = "嘗試連線已失敗。"; + t[182] = "No value specified for parameter {0}."; + t[183] = "未設定參數值 {0} 的內容。"; + t[190] = "Provided Reader failed."; + t[191] = "提供的 Reader 已失敗。"; + t[194] = "Unsupported value for stringtype parameter: {0}"; + t[195] = "字串型別參數值未被支持:{0}"; + t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, ) was made."; + t[199] = "已經宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, ) 。"; + t[204] = "Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."; + t[205] = "不能在 ResultSet 的第一筆資料之前呼叫 deleteRow()。"; + t[214] = "The maximum field size must be a value greater than or equal to 0."; + t[215] = "最大欄位容量必須大於或等於 0。"; + t[216] = "Fetch size must be a value greater to or equal to 0."; + t[217] = "資料讀取筆數(fetch size)必須大於或等於 0。"; + t[220] = "PostgreSQL LOBs can only index to: {0}"; + t[221] = "PostgreSQL LOBs 僅能索引到:{0}"; + t[224] = "The JVM claims not to support the encoding: {0}"; + t[225] = "JVM 聲明並不支援編碼:{0} 。"; + t[226] = "Interval {0} not yet implemented"; + t[227] = "隔絕 {0} 尚未被實作。"; + t[238] = "Fastpath call {0} - No result was returned and we expected an integer."; + t[239] = "Fastpath 呼叫 {0} - 沒有傳回值,且應該傳回一個整數。"; + t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."; + t[247] = "ResultSets 與並發同作(Concurrency) CONCUR_READ_ONLY 不能被更新。"; + t[250] = "This statement does not declare an OUT parameter. Use '{' ?= call ... '}' to declare one."; + t[251] = "這個 statement 未宣告 OUT 參數,使用 '{' ?= call ... '}' 宣告一個。"; + t[256] = "Cannot reference a savepoint after it has been released."; + t[257] = "無法參照已經被釋放的儲存點。"; + t[260] = "Unsupported Types value: {0}"; + t[261] = "未被支持的型別值:{0}"; + t[266] = "Protocol error. Session setup failed."; + t[267] = "通訊協定錯誤,Session 初始化失敗。"; + t[274] = "Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."; + t[275] = "不能在 ResultSet 的最後一筆資料之後呼叫 deleteRow()。"; + t[278] = "Internal Position: {0}"; + t[279] = "內部位置:{0}"; + t[280] = "Zero bytes may not occur in identifiers."; + t[281] = "在標識識別符中不存在零位元組。"; + t[288] = "{0} function doesn''t take any argument."; + t[289] = "{0} 函式無法取得任何的引數。"; + t[300] = "This statement has been closed."; + t[301] = "這個 statement 已經被關閉。"; + t[318] = "Cannot establish a savepoint in auto-commit mode."; + t[319] = "在自動確認事物交易模式無法建立儲存點(Savepoint)。"; + t[320] = "Position: {0}"; + t[321] = "位置:{0}"; + t[322] = "ResultSet is not updateable. The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."; + t[323] = "不可更新的 ResultSet。用來產生這個 ResultSet 的 SQL 命令只能操作一個資料表,並且必需選擇所有主鍵欄位,詳細請參閱 JDBC 2.1 API 規格書 5.6 節。"; + t[330] = "This ResultSet is closed."; + t[331] = "這個 ResultSet 已經被關閉。"; + t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made."; + t[339] = "已註冊參數型別 {0},但是又呼叫了get{1}(sqltype={2})。"; + t[342] = "Transaction isolation level {0} not supported."; + t[343] = "不支援交易隔絕等級 {0} 。"; + t[344] = "Statement has been closed."; + t[345] = "Sstatement 已經被關閉。"; + t[352] = "Server SQLState: {0}"; + t[353] = "伺服器 SQLState:{0}"; + t[354] = "No primary key found for table {0}."; + t[355] = "{0} 資料表中未找到主鍵(Primary key)。"; + t[362] = "Cannot convert an instance of {0} to type {1}"; + t[363] = "無法轉換 {0} 到類型 {1} 的實例"; + t[364] = "DataSource has been closed."; + t[365] = "DataSource 已經被關閉。"; + t[368] = "The column name {0} was not found in this ResultSet."; + t[369] = "ResultSet 中找不到欄位名稱 {0}。"; + t[372] = "ResultSet not positioned properly, perhaps you need to call next."; + t[373] = "查詢結果指標位置不正確,您也許需要呼叫 ResultSet 的 next() 方法。"; + t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results."; + t[379] = "無法更新 ResultSet,可能在第一筆資料之前或最未筆資料之後。"; + t[380] = "Method {0} is not yet implemented."; + t[381] = "這個 {0} 方法尚未被實作。"; + t[382] = "{0} function takes two or three arguments."; + t[383] = "{0} 函式取得二個或三個引數。"; + t[384] = "The JVM claims not to support the {0} encoding."; + t[385] = "JVM 聲明並不支援 {0} 編碼。"; + t[396] = "Unknown Response Type {0}."; + t[397] = "不明的回應類型 {0}。"; + t[398] = "The parameter index is out of range: {0}, number of parameters: {1}."; + t[399] = "參數索引超出許可範圍:{0},參數總數:{1}。"; + t[400] = "Where: {0}"; + t[401] = "在位置:{0}"; + t[406] = "Cannot call deleteRow() when on the insert row."; + t[407] = "不能在新增的資料上呼叫 deleteRow()。"; + t[414] = "{0} function takes four and only four argument."; + t[415] = "{0} 函式取得四個且僅有四個引數。"; + t[416] = "Unable to translate data into the desired encoding."; + t[417] = "無法將資料轉成目標編碼。"; + t[424] = "Can''t use relative move methods while on the insert row."; + t[425] = "不能在新增的資料列上使用相對位置 move 方法。"; + t[434] = "Invalid stream length {0}."; + t[435] = "無效的串流長度 {0}."; + t[436] = "The driver currently does not support COPY operations."; + t[437] = "驅動程式目前不支援 COPY 操作。"; + t[440] = "Maximum number of rows must be a value grater than or equal to 0."; + t[441] = "最大資料讀取筆數必須大於或等於 0。"; + t[446] = "Failed to create object for: {0}."; + t[447] = "為 {0} 建立物件失敗。"; + t[448] = "{0} function takes three and only three arguments."; + t[449] = "{0} 函式取得三個且僅有三個引數。"; + t[450] = "Conversion of interval failed"; + t[451] = "隔絕(Interval)轉換失敗。"; + t[452] = "Cannot tell if path is open or closed: {0}."; + t[453] = "無法得知 path 是開啟或關閉:{0}。"; + t[460] = "Provided InputStream failed."; + t[461] = "提供的 InputStream 已失敗。"; + t[462] = "Invalid fetch direction constant: {0}."; + t[463] = "無效的 fetch 方向常數:{0}。"; + t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}"; + t[473] = "事物交易隔絕(Transaction interleaving)未被實作。xid={0}, currentXid={1}, state={2}, flags={3}"; + t[474] = "{0} function takes two and only two arguments."; + t[475] = "{0} 函式取得二個且僅有二個引數。"; + t[476] = "There are no rows in this ResultSet."; + t[477] = "ResultSet 中找不到資料列。"; + t[478] = "Zero bytes may not occur in string parameters."; + t[479] = "字串參數不能有 0 個位元組。"; + t[480] = "Cannot call updateRow() when on the insert row."; + t[481] = "不能在新增的資料列上呼叫 deleteRow()。"; + t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed."; + t[483] = "Connection 已自動結束,因為一個新的 PooledConnection 連線被開啟或者或 PooledConnection 已被關閉。"; + t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered."; + t[489] = "一個 CallableStatement 執行函式後輸出的參數型別為 {1} 值為 {0},但是已註冊的型別是 {2}。"; + t[494] = "Cannot cast an instance of {0} to type {1}"; + t[495] = "不能轉換一個 {0} 實例到型別 {1}"; + t[498] = "Cannot retrieve the id of a named savepoint."; + t[499] = "無法取得已命名儲存點的 id。"; + t[500] = "Cannot change transaction read-only property in the middle of a transaction."; + t[501] = "不能在事物交易過程中改變事物交易唯讀屬性。"; + t[502] = "The server does not support SSL."; + t[503] = "伺服器不支援 SSL 連線。"; + t[510] = "A connection could not be made using the requested protocol {0}."; + t[511] = "無法以要求的通訊協定 {0} 建立連線。"; + t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver."; + t[513] = "不支援 {0} 驗證型別。請核對您已經組態 pg_hba.conf 檔案包含客戶端的IP位址或網路區段,以及驅動程式所支援的驗證架構模式已被支援。"; + t[514] = "Malformed function or procedure escape syntax at offset {0}."; + t[515] = "不正確的函式或程序 escape 語法於 {0}。"; + t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation."; + t[517] = "這伺服器的 DateStyle 參數被更改成 {0},JDBC 驅動程式請求需要 DateStyle 以 ISO 開頭以正確工作。"; + t[518] = "No results were returned by the query."; + t[519] = "查詢沒有傳回任何結果。"; + t[520] = "Location: File: {0}, Routine: {1}, Line: {2}"; + t[521] = "位置:檔案:{0},常式:{1},行:{2}"; + t[526] = "Hint: {0}"; + t[527] = "建議:{0}"; + t[528] = "A CallableStatement was executed with nothing returned."; + t[529] = "一個 CallableStatement 執行函式後沒有傳回值。"; + t[530] = "Unknown ResultSet holdability setting: {0}."; + t[531] = "未知的 ResultSet 可適用的設置:{0}。"; + t[540] = "Cannot change transaction isolation level in the middle of a transaction."; + t[541] = "不能在事務交易過程中改變事物交易隔絕等級。"; + t[544] = "The fastpath function {0} is unknown."; + t[545] = "不明的 fastpath 函式 {0}。"; + t[546] = "Can''t use query methods that take a query string on a PreparedStatement."; + t[547] = "在 PreparedStatement 上不能使用獲取查詢字串的查詢方法。"; + t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."; + t[557] = "操作要求可捲動的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。"; + t[564] = "Unknown Types value."; + t[565] = "不明的型別值。"; + t[570] = "Large Objects may not be used in auto-commit mode."; + t[571] = "大型物件無法被使用在自動確認事物交易模式。"; + table = t; + } + + @Override + public Object handleGetObject (String msgid) throws MissingResourceException { + int hash_val = msgid.hashCode() & 0x7fffffff; + int idx = (hash_val % 289) << 1; + { + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + int incr = ((hash_val % 287) + 1) << 1; + for (;;) { + idx += incr; + if (idx >= 578) + idx -= 578; + Object found = table[idx]; + if (found == null) + return null; + if (msgid.equals(found)) + return table[idx + 1]; + } + } + + public Enumeration getKeys () { + return new Enumeration<>() { + private int idx = 0; + { while (idx < 578 && table[idx] == null) idx += 2; } + + @Override + public boolean hasMoreElements () { + return (idx < 578); + } + + @Override + public String nextElement () { + Object key = table[idx]; + do idx += 2; while (idx < 578 && table[idx] == null); + return key.toString(); + } + }; + } + + public ResourceBundle getParent () { + return parent; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteBufferByteStreamWriter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteBufferByteStreamWriter.java new file mode 100644 index 0000000..f0a8aa0 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ByteBufferByteStreamWriter.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; + +/** + * A {@link ByteStreamWriter} that writes a {@link ByteBuffer java.nio.ByteBuffer} to a byte array + * parameter. + */ +public class ByteBufferByteStreamWriter implements ByteStreamWriter { + + private final ByteBuffer buf; + private final int length; + + /** + * Construct the writer with the given {@link ByteBuffer} + * + * @param buf the buffer to use. + */ + public ByteBufferByteStreamWriter(ByteBuffer buf) { + this.buf = buf; + this.length = buf.remaining(); + } + + @Override + public int getLength() { + return length; + } + + @Override + public void writeTo(ByteStreamTarget target) throws IOException { + if (buf.hasArray()) { + // Avoid copying the array if possible + target.getOutputStream() + .write(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); + return; + } + + // this _does_ involve some copying to a temporary buffer, but that's unavoidable + // as OutputStream itself only accepts single bytes or heap allocated byte arrays + try (WritableByteChannel c = Channels.newChannel(target.getOutputStream())) { + c.write(buf); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteBuffersByteStreamWriter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteBuffersByteStreamWriter.java new file mode 100644 index 0000000..9edde9f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ByteBuffersByteStreamWriter.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; + +/** + * A {@link ByteStreamWriter} that writes a {@link ByteBuffer java.nio.ByteBuffer} to a byte array + * parameter. + */ +class ByteBuffersByteStreamWriter implements ByteStreamWriter { + + private final ByteBuffer[] buffers; + private final int length; + + /** + * Construct the writer with the given {@link ByteBuffer} + * + * @param buffers the buffer to use. + */ + ByteBuffersByteStreamWriter(ByteBuffer... buffers) { + this.buffers = buffers; + int length = 0; + for (ByteBuffer buffer : buffers) { + length += buffer.remaining(); + } + this.length = length; + } + + @Override + public int getLength() { + return length; + } + + @Override + public void writeTo(ByteStreamTarget target) throws IOException { + boolean allArraysAreAccessible = true; + for (ByteBuffer buffer : buffers) { + if (!buffer.hasArray()) { + allArraysAreAccessible = false; + break; + } + } + + OutputStream os = target.getOutputStream(); + if (allArraysAreAccessible) { + for (ByteBuffer buffer : buffers) { + os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); + } + return; + } + // Channels.newChannel does not buffer writes, so we can mix writes to the channel with writes + // to the OutputStream + try (WritableByteChannel c = Channels.newChannel(os)) { + for (ByteBuffer buffer : buffers) { + if (buffer.hasArray()) { + os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); + } else { + c.write(buffer); + } + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteConverter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteConverter.java new file mode 100644 index 0000000..1cddc45 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ByteConverter.java @@ -0,0 +1,657 @@ +/* + * Copyright (c) 2011, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Arrays; + +/** + * Helper methods to parse java base types from byte arrays. + * + * @author Mikko Tiihonen + * @author Brett Okken + */ +public class ByteConverter { + + /** + * Simple stack structure for non-negative {@code short} values. + */ + private static final class PositiveShorts { + private short[] shorts = new short[8]; + private int idx; + + PositiveShorts() { + } + + public void push(short s) { + if (s < 0) { + throw new IllegalArgumentException("only non-negative values accepted: " + s); + } + if (idx == shorts.length) { + grow(); + } + shorts[idx++] = s; + } + + public int size() { + return idx; + } + + public boolean isEmpty() { + return idx == 0; + } + + public short pop() { + return idx > 0 ? shorts[--idx] : -1; + } + + private void grow() { + final int newSize = shorts.length <= 1024 ? shorts.length << 1 : (int) (shorts.length * 1.5); + shorts = Arrays.copyOf(shorts, newSize); + } + } + + private static final int NUMERIC_DSCALE_MASK = 0x00003FFF; + private static final short NUMERIC_POS = 0x0000; + private static final short NUMERIC_NEG = 0x4000; + private static final short NUMERIC_NAN = (short) 0xC000; + private static final int SHORT_BYTES = 2; + private static final int LONG_BYTES = 4; + private static final int[] INT_TEN_POWERS = new int[6]; + private static final long[] LONG_TEN_POWERS = new long[19]; + private static final BigInteger[] BI_TEN_POWERS = new BigInteger[32]; + private static final BigInteger BI_TEN_THOUSAND = BigInteger.valueOf(10000); + private static final BigInteger BI_MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE); + + static { + for (int i = 0; i < INT_TEN_POWERS.length; i++) { + INT_TEN_POWERS[i] = (int) Math.pow(10, i); + } + for (int i = 0; i < LONG_TEN_POWERS.length; i++) { + LONG_TEN_POWERS[i] = (long) Math.pow(10, i); + } + for (int i = 0; i < BI_TEN_POWERS.length; i++) { + BI_TEN_POWERS[i] = BigInteger.TEN.pow(i); + } + } + + private ByteConverter() { + // prevent instantiation of static helper class + } + + /** + * Convert a variable length array of bytes to an integer + * @param bytes array of bytes that can be decoded as an integer + * @return integer + */ + public static int bytesToInt(byte []bytes) { + if ( bytes.length == 1 ) { + return (int) bytes[0]; + } + if ( bytes.length == SHORT_BYTES ) { + return int2(bytes, 0); + } + if ( bytes.length == LONG_BYTES ) { + return int4(bytes, 0); + } else { + throw new IllegalArgumentException("Argument bytes is empty"); + } + } + + /** + * Convert a variable length array of bytes to an integer + * @param bytes array of bytes that can be decoded as an integer + * @return integer + */ + public static Number numeric(byte [] bytes) { + return numeric(bytes, 0, bytes.length); + } + + /** + * Convert a variable length array of bytes to a {@link Number}. The result will + * always be a {@link BigDecimal} or {@link Double#NaN}. + * + * @param bytes array of bytes to be decoded from binary numeric representation. + * @param pos index of the start position of the bytes array for number + * @param numBytes number of bytes to use, length is already encoded + * in the binary format but this is used for double checking + * @return BigDecimal representation of numeric or {@link Double#NaN}. + */ + public static Number numeric(byte [] bytes, int pos, int numBytes) { + + if (numBytes < 8) { + throw new IllegalArgumentException("number of bytes should be at-least 8"); + } + + //number of 2-byte shorts representing 4 decimal digits - should be treated as unsigned + int len = ByteConverter.int2(bytes, pos) & 0xFFFF; + //0 based number of 4 decimal digits (i.e. 2-byte shorts) before the decimal + //a value <= 0 indicates an absolute value < 1. + short weight = ByteConverter.int2(bytes, pos + 2); + //indicates positive, negative or NaN + short sign = ByteConverter.int2(bytes, pos + 4); + //number of digits after the decimal. This must be >= 0. + //a value of 0 indicates a whole number (integer). + short scale = ByteConverter.int2(bytes, pos + 6); + + //An integer should be built from the len number of 2 byte shorts, treating each + //as 4 digits. + //The weight, if > 0, indicates how many of those 4 digit chunks should be to the + //"left" of the decimal. If the weight is 0, then all 4 digit chunks start immediately + //to the "right" of the decimal. If the weight is < 0, the absolute distance from 0 + //indicates 4 leading "0" digits to the immediate "right" of the decimal, prior to the + //digits from "len". + //A weight which is positive, can be a number larger than what len defines. This means + //there are trailing 0s after the "len" integer and before the decimal. + //The scale indicates how many significant digits there are to the right of the decimal. + //A value of 0 indicates a whole number (integer). + //The combination of weight, len, and scale can result in either trimming digits provided + //by len (only to the right of the decimal) or adding significant 0 values to the right + //of len (on either side of the decimal). + + if (numBytes != (len * SHORT_BYTES + 8)) { + throw new IllegalArgumentException("invalid length of bytes \"numeric\" value"); + } + + if (!(sign == NUMERIC_POS + || sign == NUMERIC_NEG + || sign == NUMERIC_NAN)) { + throw new IllegalArgumentException("invalid sign in \"numeric\" value"); + } + + if (sign == NUMERIC_NAN) { + return Double.NaN; + } + + if ((scale & NUMERIC_DSCALE_MASK) != scale) { + throw new IllegalArgumentException("invalid scale in \"numeric\" value"); + } + + if (len == 0) { + return new BigDecimal(BigInteger.ZERO, scale); + } + + int idx = pos + 8; + + short d = ByteConverter.int2(bytes, idx); + + //if the absolute value is (0, 1), then leading '0' values + //do not matter for the unscaledInt, but trailing 0s do + if (weight < 0) { + assert scale > 0; + int effectiveScale = scale; + //adjust weight to determine how many leading 0s after the decimal + //before the provided values/digits actually begin + ++weight; + if (weight < 0) { + effectiveScale += 4 * weight; + } + + int i = 1; + //typically there should not be leading 0 short values, as it is more + //efficient to represent that in the weight value + for (; i < len && d == 0; i++) { + //each leading 0 value removes 4 from the effective scale + effectiveScale -= 4; + idx += 2; + d = ByteConverter.int2(bytes, idx); + } + + assert effectiveScale > 0; + if (effectiveScale >= 4) { + effectiveScale -= 4; + } else { + //an effective scale of less than four means that the value d + //has trailing 0s which are not significant + //so we divide by the appropriate power of 10 to reduce those + d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]); + effectiveScale = 0; + } + //defer moving to BigInteger as long as possible + //operations on the long are much faster + BigInteger unscaledBI = null; + long unscaledInt = d; + for (; i < len; i++) { + if (i == 4 && effectiveScale > 2) { + unscaledBI = BigInteger.valueOf(unscaledInt); + } + idx += 2; + d = ByteConverter.int2(bytes, idx); + //if effective scale is at least 4, then all 4 digits should be used + //and the existing number needs to be shifted 4 + if (effectiveScale >= 4) { + if (unscaledBI == null) { + unscaledInt *= 10000; + } else { + unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND); + } + effectiveScale -= 4; + } else { + //if effective scale is less than 4, then only shift left based on remaining scale + if (unscaledBI == null) { + unscaledInt *= INT_TEN_POWERS[effectiveScale]; + } else { + unscaledBI = unscaledBI.multiply(tenPower(effectiveScale)); + } + //and d needs to be shifted to the right to only get correct number of + //significant digits + d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]); + effectiveScale = 0; + } + if (unscaledBI == null) { + unscaledInt += d; + } else { + if (d != 0) { + unscaledBI = unscaledBI.add(BigInteger.valueOf(d)); + } + } + } + //now we need BigInteger to create BigDecimal + if (unscaledBI == null) { + unscaledBI = BigInteger.valueOf(unscaledInt); + } + //if there is remaining effective scale, apply it here + if (effectiveScale > 0) { + unscaledBI = unscaledBI.multiply(tenPower(effectiveScale)); + } + if (sign == NUMERIC_NEG) { + unscaledBI = unscaledBI.negate(); + } + + return new BigDecimal(unscaledBI, scale); + } + + //if there is no scale, then shorts are the unscaled int + if (scale == 0) { + //defer moving to BigInteger as long as possible + //operations on the long are much faster + BigInteger unscaledBI = null; + long unscaledInt = d; + //loop over all of the len shorts to process as the unscaled int + for (int i = 1; i < len; i++) { + if (i == 4) { + unscaledBI = BigInteger.valueOf(unscaledInt); + } + idx += 2; + d = ByteConverter.int2(bytes, idx); + if (unscaledBI == null) { + unscaledInt *= 10000; + unscaledInt += d; + } else { + unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND); + if (d != 0) { + unscaledBI = unscaledBI.add(BigInteger.valueOf(d)); + } + } + } + //now we need BigInteger to create BigDecimal + if (unscaledBI == null) { + unscaledBI = BigInteger.valueOf(unscaledInt); + } + if (sign == NUMERIC_NEG) { + unscaledBI = unscaledBI.negate(); + } + //the difference between len and weight (adjusted from 0 based) becomes the scale for BigDecimal + final int bigDecScale = (len - (weight + 1)) * 4; + //string representation always results in a BigDecimal with scale of 0 + //the binary representation, where weight and len can infer trailing 0s, can result in a negative scale + //to produce a consistent BigDecimal, we return the equivalent object with scale set to 0 + return bigDecScale == 0 ? new BigDecimal(unscaledBI) : new BigDecimal(unscaledBI, bigDecScale).setScale(0); + } + + //defer moving to BigInteger as long as possible + //operations on the long are much faster + BigInteger unscaledBI = null; + long unscaledInt = d; + //weight and scale as defined by postgresql are a bit different than how BigDecimal treats scale + //maintain the effective values to massage as we process through values + int effectiveWeight = weight; + int effectiveScale = scale; + for (int i = 1; i < len; i++) { + if (i == 4) { + unscaledBI = BigInteger.valueOf(unscaledInt); + } + idx += 2; + d = ByteConverter.int2(bytes, idx); + //first process effective weight down to 0 + if (effectiveWeight > 0) { + --effectiveWeight; + if (unscaledBI == null) { + unscaledInt *= 10000; + } else { + unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND); + } + } else if (effectiveScale >= 4) { + //if effective scale is at least 4, then all 4 digits should be used + //and the existing number needs to be shifted 4 + effectiveScale -= 4; + if (unscaledBI == null) { + unscaledInt *= 10000; + } else { + unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND); + } + } else { + //if effective scale is less than 4, then only shift left based on remaining scale + if (unscaledBI == null) { + unscaledInt *= INT_TEN_POWERS[effectiveScale]; + } else { + unscaledBI = unscaledBI.multiply(tenPower(effectiveScale)); + } + //and d needs to be shifted to the right to only get correct number of + //significant digits + d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]); + effectiveScale = 0; + } + if (unscaledBI == null) { + unscaledInt += d; + } else { + if (d != 0) { + unscaledBI = unscaledBI.add(BigInteger.valueOf(d)); + } + } + } + + //now we need BigInteger to create BigDecimal + if (unscaledBI == null) { + unscaledBI = BigInteger.valueOf(unscaledInt); + } + //if there is remaining weight, apply it here + if (effectiveWeight > 0) { + unscaledBI = unscaledBI.multiply(tenPower(effectiveWeight * 4)); + } + //if there is remaining effective scale, apply it here + if (effectiveScale > 0) { + unscaledBI = unscaledBI.multiply(tenPower(effectiveScale)); + } + if (sign == NUMERIC_NEG) { + unscaledBI = unscaledBI.negate(); + } + + return new BigDecimal(unscaledBI, scale); + } + + /** + * Converts a non-null {@link BigDecimal} to binary format for {@link org.postgresql.core.Oid#NUMERIC}. + * @param nbr The instance to represent in binary. + * @return The binary representation of nbr. + */ + public static byte[] numeric(BigDecimal nbr) { + final PositiveShorts shorts = new PositiveShorts(); + BigInteger unscaled = nbr.unscaledValue().abs(); + int scale = nbr.scale(); + if (unscaled.equals(BigInteger.ZERO)) { + final byte[] bytes = new byte[]{0, 0, -1, -1, 0, 0, 0, 0}; + ByteConverter.int2(bytes, 6, Math.max(0, scale)); + return bytes; + } + int weight = -1; + if (scale <= 0) { + //this means we have an integer + //adjust unscaled and weight + if (scale < 0) { + scale = Math.abs(scale); + //weight value covers 4 digits + weight += scale / 4; + //whatever remains needs to be incorporated to the unscaled value + int mod = scale % 4; + unscaled = unscaled.multiply(tenPower(mod)); + scale = 0; + } + + while (unscaled.compareTo(BI_MAX_LONG) > 0) { + final BigInteger[] pair = unscaled.divideAndRemainder(BI_TEN_THOUSAND); + unscaled = pair[0]; + final short shortValue = pair[1].shortValue(); + if (shortValue != 0 || !shorts.isEmpty()) { + shorts.push(shortValue); + } + ++weight; + } + long unscaledLong = unscaled.longValueExact(); + do { + final short shortValue = (short) (unscaledLong % 10000); + if (shortValue != 0 || !shorts.isEmpty()) { + shorts.push(shortValue); + } + unscaledLong = unscaledLong / 10000L; + ++weight; + } while (unscaledLong != 0); + } else { + final BigInteger[] split = unscaled.divideAndRemainder(tenPower(scale)); + BigInteger decimal = split[1]; + BigInteger wholes = split[0]; + weight = -1; + if (!BigInteger.ZERO.equals(decimal)) { + int mod = scale % 4; + int segments = scale / 4; + if (mod != 0) { + decimal = decimal.multiply(tenPower(4 - mod)); + ++segments; + } + do { + final BigInteger[] pair = decimal.divideAndRemainder(BI_TEN_THOUSAND); + decimal = pair[0]; + final short shortValue = pair[1].shortValue(); + if (shortValue != 0 || !shorts.isEmpty()) { + shorts.push(shortValue); + } + --segments; + } while (!BigInteger.ZERO.equals(decimal)); + + //for the leading 0 shorts we either adjust weight (if no wholes) + // or push shorts + if (BigInteger.ZERO.equals(wholes)) { + weight -= segments; + } else { + //now add leading 0 shorts + for (int i = 0; i < segments; i++) { + shorts.push((short) 0); + } + } + } + + while (!BigInteger.ZERO.equals(wholes)) { + ++weight; + final BigInteger[] pair = wholes.divideAndRemainder(BI_TEN_THOUSAND); + wholes = pair[0]; + final short shortValue = pair[1].shortValue(); + if (shortValue != 0 || !shorts.isEmpty()) { + shorts.push(shortValue); + } + } + } + + //8 bytes for "header" and then 2 for each short + final byte[] bytes = new byte[8 + (2 * shorts.size())]; + int idx = 0; + + //number of 2-byte shorts representing 4 decimal digits + ByteConverter.int2(bytes, idx, shorts.size()); + idx += 2; + //0 based number of 4 decimal digits (i.e. 2-byte shorts) before the decimal + ByteConverter.int2(bytes, idx, weight); + idx += 2; + //indicates positive, negative or NaN + ByteConverter.int2(bytes, idx, nbr.signum() == -1 ? NUMERIC_NEG : NUMERIC_POS); + idx += 2; + //number of digits after the decimal + ByteConverter.int2(bytes, idx, Math.max(0, scale)); + idx += 2; + + short s; + while ((s = shorts.pop()) != -1) { + ByteConverter.int2(bytes, idx, s); + idx += 2; + } + + return bytes; + } + + private static BigInteger tenPower(int exponent) { + return BI_TEN_POWERS.length > exponent ? BI_TEN_POWERS[exponent] : BigInteger.TEN.pow(exponent); + } + + /** + * Parses a long value from the byte array. + * + * @param bytes The byte array to parse. + * @param idx The starting index of the parse in the byte array. + * @return parsed long value. + */ + public static long int8(byte[] bytes, int idx) { + return + ((long) (bytes[idx + 0] & 255) << 56) + + ((long) (bytes[idx + 1] & 255) << 48) + + ((long) (bytes[idx + 2] & 255) << 40) + + ((long) (bytes[idx + 3] & 255) << 32) + + ((long) (bytes[idx + 4] & 255) << 24) + + ((long) (bytes[idx + 5] & 255) << 16) + + ((long) (bytes[idx + 6] & 255) << 8) + + (bytes[idx + 7] & 255); + } + + /** + * Parses an int value from the byte array. + * + * @param bytes The byte array to parse. + * @param idx The starting index of the parse in the byte array. + * @return parsed int value. + */ + public static int int4(byte[] bytes, int idx) { + return + ((bytes[idx] & 255) << 24) + + ((bytes[idx + 1] & 255) << 16) + + ((bytes[idx + 2] & 255) << 8) + + (bytes[idx + 3] & 255); + } + + /** + * Parses a short value from the byte array. + * + * @param bytes The byte array to parse. + * @param idx The starting index of the parse in the byte array. + * @return parsed short value. + */ + public static short int2(byte[] bytes, int idx) { + return (short) (((bytes[idx] & 255) << 8) + (bytes[idx + 1] & 255)); + } + + /** + * Parses a boolean value from the byte array. + * + * @param bytes + * The byte array to parse. + * @param idx + * The starting index to read from bytes. + * @return parsed boolean value. + */ + public static boolean bool(byte[] bytes, int idx) { + return bytes[idx] == 1; + } + + /** + * Parses a float value from the byte array. + * + * @param bytes The byte array to parse. + * @param idx The starting index of the parse in the byte array. + * @return parsed float value. + */ + public static float float4(byte[] bytes, int idx) { + return Float.intBitsToFloat(int4(bytes, idx)); + } + + /** + * Parses a double value from the byte array. + * + * @param bytes The byte array to parse. + * @param idx The starting index of the parse in the byte array. + * @return parsed double value. + */ + public static double float8(byte[] bytes, int idx) { + return Double.longBitsToDouble(int8(bytes, idx)); + } + + /** + * Encodes a long value to the byte array. + * + * @param target The byte array to encode to. + * @param idx The starting index in the byte array. + * @param value The value to encode. + */ + public static void int8(byte[] target, int idx, long value) { + target[idx + 0] = (byte) (value >>> 56); + target[idx + 1] = (byte) (value >>> 48); + target[idx + 2] = (byte) (value >>> 40); + target[idx + 3] = (byte) (value >>> 32); + target[idx + 4] = (byte) (value >>> 24); + target[idx + 5] = (byte) (value >>> 16); + target[idx + 6] = (byte) (value >>> 8); + target[idx + 7] = (byte) value; + } + + /** + * Encodes a int value to the byte array. + * + * @param target The byte array to encode to. + * @param idx The starting index in the byte array. + * @param value The value to encode. + */ + public static void int4(byte[] target, int idx, int value) { + target[idx + 0] = (byte) (value >>> 24); + target[idx + 1] = (byte) (value >>> 16); + target[idx + 2] = (byte) (value >>> 8); + target[idx + 3] = (byte) value; + } + + /** + * Encodes a int value to the byte array. + * + * @param target The byte array to encode to. + * @param idx The starting index in the byte array. + * @param value The value to encode. + */ + public static void int2(byte[] target, int idx, int value) { + target[idx + 0] = (byte) (value >>> 8); + target[idx + 1] = (byte) value; + } + + /** + * Encodes a boolean value to the byte array. + * + * @param target + * The byte array to encode to. + * @param idx + * The starting index in the byte array. + * @param value + * The value to encode. + */ + public static void bool(byte[] target, int idx, boolean value) { + target[idx] = value ? (byte) 1 : (byte) 0; + } + + /** + * Encodes a int value to the byte array. + * + * @param target The byte array to encode to. + * @param idx The starting index in the byte array. + * @param value The value to encode. + */ + public static void float4(byte[] target, int idx, float value) { + int4(target, idx, Float.floatToRawIntBits(value)); + } + + /** + * Encodes a int value to the byte array. + * + * @param target The byte array to encode to. + * @param idx The starting index in the byte array. + * @param value The value to encode. + */ + public static void float8(byte[] target, int idx, double value) { + int8(target, idx, Double.doubleToRawLongBits(value)); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteStreamWriter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteStreamWriter.java new file mode 100644 index 0000000..b1c723f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ByteStreamWriter.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +/** + * A class that can be used to set a byte array parameter by writing to an OutputStream. + * + *

The intended use case is wanting to write data to a byte array parameter that is stored off + * heap in a direct memory pool or in some other form that is inconvenient to assemble into a single + * heap-allocated buffer.

+ *

Users should write their own implementation depending on the + * original data source. The driver provides a built-in implementation supporting the {@link + * java.nio.ByteBuffer} class, see {@link ByteBufferByteStreamWriter}.

+ *

Intended usage is to simply pass in an instance using + * {@link java.sql.PreparedStatement#setObject(int, Object)}:

+ *
+ *     int bufLength = someBufferObject.length();
+ *     preparedStatement.setObject(1, new MyByteStreamWriter(bufLength, someBufferObject));
+ * 
+ *

The length must be known ahead of the stream being written to.

+ *

This provides the application more control over memory management than calling + * {@link java.sql.PreparedStatement#setBinaryStream(int, InputStream)} as with the latter the + * caller has no control over the buffering strategy.

+ */ +public interface ByteStreamWriter { + + /** + * Returns the length of the stream. + * + *

This must be known ahead of calling {@link #writeTo(ByteStreamTarget)}.

+ * + * @return the number of bytes in the stream. + */ + int getLength(); + + /** + * Write the data to the provided {@link OutputStream}. + * + *

Should not write more than {@link #getLength()} bytes. If attempted, the provided stream + * will throw an {@link java.io.IOException}.

+ * + * @param target the stream to write the data to + * @throws IOException if the underlying stream throws or there is some other error. + */ + void writeTo(ByteStreamTarget target) throws IOException; + + static ByteStreamWriter of(ByteBuffer... buf) { + return buf.length == 1 + ? new ByteBufferByteStreamWriter(buf[0]) + : new ByteBuffersByteStreamWriter(buf); + } + + /** + * Provides a target to write bytes to. + */ + interface ByteStreamTarget { + + /** + * Provides an output stream to write bytes to. + * + * @return an output stream + */ + OutputStream getOutputStream(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/CanEstimateSize.java b/pgjdbc/src/main/java/org/postgresql/util/CanEstimateSize.java new file mode 100644 index 0000000..74cba88 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/CanEstimateSize.java @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +public interface CanEstimateSize { + long getSize(); +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/DriverInfo.java b/pgjdbc/src/main/java/org/postgresql/util/DriverInfo.java new file mode 100644 index 0000000..1c95166 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/DriverInfo.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +/** + * Utility class with constants of Driver information. + */ +public final class DriverInfo { + + private DriverInfo() { + } + + // Driver name + public static final String DRIVER_NAME = "PostgreSQL JDBC Driver"; + public static final String DRIVER_SHORT_NAME = "PgJDBC"; + public static final String DRIVER_VERSION = "/*$version$*/"; + public static final String DRIVER_FULL_NAME = DRIVER_NAME + " " + DRIVER_VERSION; + + // Driver version + public static final int MAJOR_VERSION = /*$version.major+";"$*//*-*/42; + public static final int MINOR_VERSION = /*$version.minor+";"$*//*-*/0; + public static final int PATCH_VERSION = /*$version.patch+";"$*//*-*/0; + + // JDBC specification + public static final String JDBC_VERSION = "/*$jdbc.specification.version$*/"; + public static final int JDBC_MAJOR_VERSION = JDBC_VERSION.charAt(0) - '0'; + public static final int JDBC_MINOR_VERSION = JDBC_VERSION.charAt(2) - '0'; + +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ExpressionProperties.java b/pgjdbc/src/main/java/org/postgresql/util/ExpressionProperties.java new file mode 100644 index 0000000..fa50f76 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ExpressionProperties.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.util.Properties; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +@SuppressWarnings("serial") +public class ExpressionProperties extends Properties { + + private static final Pattern EXPRESSION = Pattern.compile("\\$\\{([^}]+)\\}"); + + private final Properties[] defaults; + + /** + * Creates an empty property list with the specified defaults. + * + * @param defaults java.util.Properties + */ + public ExpressionProperties(Properties ...defaults) { + this.defaults = defaults; + } + + /** + *

Returns property value with all {@code ${propKey}} like references replaced with the value of + * the relevant property with recursive resolution.

+ * + *

The method returns null if the property is not found.

+ * + * @param key the property key. + * + * @return the value in this property list with + * the specified key value. + */ + @Override + public String getProperty(String key) { + String value = getRawPropertyValue(key); + return replaceProperties(value); + } + + @Override + public String getProperty(String key, String defaultValue) { + String value = getRawPropertyValue(key); + if (value == null) { + value = defaultValue; + } + return replaceProperties(value); + } + + /** + * Returns raw value of a property without any replacements. + * @param key property name + * @return raw property value + */ + public String getRawPropertyValue(String key) { + String value = super.getProperty(key); + if (value != null) { + return value; + } + for (Properties properties : defaults) { + value = properties.getProperty(key); + if (value != null) { + return value; + } + } + return null; + } + + private String replaceProperties(String value) { + if (value == null) { + return null; + } + Matcher matcher = EXPRESSION.matcher(value); + StringBuffer sb = null; + while (matcher.find()) { + if (sb == null) { + sb = new StringBuffer(); + } + String propValue = getProperty(matcher.group(1)); + if (propValue == null) { + // Use original content like ${propKey} if property is not found + propValue = matcher.group(); + } + matcher.appendReplacement(sb, Matcher.quoteReplacement(propValue)); + } + if (sb == null) { + return value; + } + matcher.appendTail(sb); + return sb.toString(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/GT.java b/pgjdbc/src/main/java/org/postgresql/util/GT.java new file mode 100644 index 0000000..c6e2f8a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/GT.java @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.text.MessageFormat; +import java.util.Locale; +import java.util.MissingResourceException; +import java.util.ResourceBundle; + +/** + * This class provides a wrapper around a gettext message catalog that can provide a localized + * version of error messages. The caller provides a message String in the standard + * java.text.MessageFormat syntax and any arguments it may need. The returned String is the + * localized version if available or the original if not. + */ +public class GT { + + private static final GT _gt = new GT(); + private static final Object[] noargs = new Object[0]; + + public static String tr(String message, Object... args) { + return _gt.translate(message, args); + } + + private ResourceBundle bundle; + + private GT() { + try { + bundle = ResourceBundle.getBundle("org.postgresql.translation.messages", Locale.getDefault(Locale.Category.DISPLAY)); + } catch (MissingResourceException mre) { + // translation files have not been installed + bundle = null; + } + } + + private String translate(String message, Object[] args) { + if (bundle != null && message != null) { + try { + message = bundle.getString(message); + } catch (MissingResourceException mre) { + // If we can't find a translation, just + // use the untranslated message. + } + } + + // If we don't have any parameters we still need to run + // this through the MessageFormat(ter) to allow the same + // quoting and escaping rules to be used for all messages. + // + if (args == null) { + args = noargs; + } + + // Replace placeholders with arguments + // + if (message != null) { + message = MessageFormat.format(message, args); + } + + return message; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/Gettable.java b/pgjdbc/src/main/java/org/postgresql/util/Gettable.java new file mode 100644 index 0000000..7ff1c3d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/Gettable.java @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +public interface Gettable { + V get(K key); +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/GettableHashMap.java b/pgjdbc/src/main/java/org/postgresql/util/GettableHashMap.java new file mode 100644 index 0000000..1f9eb16 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/GettableHashMap.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.util.HashMap; + +@SuppressWarnings("serial") +public class GettableHashMap + extends HashMap + implements Gettable { + + public GettableHashMap() { + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/HStoreConverter.java b/pgjdbc/src/main/java/org/postgresql/util/HStoreConverter.java new file mode 100644 index 0000000..98bee9c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/HStoreConverter.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.core.Encoding; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; + +public class HStoreConverter { + + public HStoreConverter() { + } + + public static Map fromBytes(byte[] b, Encoding encoding) + throws SQLException { + Map m = new HashMap(); + int pos = 0; + int numElements = ByteConverter.int4(b, pos); + pos += 4; + try { + for (int i = 0; i < numElements; i++) { + int keyLen = ByteConverter.int4(b, pos); + pos += 4; + String key = encoding.decode(b, pos, keyLen); + pos += keyLen; + int valLen = ByteConverter.int4(b, pos); + pos += 4; + String val; + if (valLen == -1) { + val = null; + } else { + val = encoding.decode(b, pos, valLen); + pos += valLen; + } + m.put(key, val); + } + } catch (IOException ioe) { + throw new PSQLException( + GT.tr( + "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), + PSQLState.DATA_ERROR, ioe); + } + return m; + } + + public static byte[] toBytes(Map m, Encoding encoding) throws SQLException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + 10 * m.size()); + byte[] lenBuf = new byte[4]; + try { + ByteConverter.int4(lenBuf, 0, m.size()); + baos.write(lenBuf); + for (Entry e : m.entrySet()) { + Object mapKey = e.getKey(); + if (mapKey == null) { + throw new PSQLException(GT.tr("hstore key must not be null"), + PSQLState.INVALID_PARAMETER_VALUE); + } + byte[] key = encoding.encode(mapKey.toString()); + ByteConverter.int4(lenBuf, 0, key.length); + baos.write(lenBuf); + baos.write(key); + + if (e.getValue() == null) { + ByteConverter.int4(lenBuf, 0, -1); + baos.write(lenBuf); + } else { + byte[] val = encoding.encode(e.getValue().toString()); + ByteConverter.int4(lenBuf, 0, val.length); + baos.write(lenBuf); + baos.write(val); + } + } + } catch (IOException ioe) { + throw new PSQLException( + GT.tr( + "Invalid character data was found. This is most likely caused by stored data containing characters that are invalid for the character set the database was created in. The most common example of this is storing 8bit data in a SQL_ASCII database."), + PSQLState.DATA_ERROR, ioe); + } + return baos.toByteArray(); + } + + public static String toString(Map map) { + if (map.isEmpty()) { + return ""; + } + StringBuilder sb = new StringBuilder(map.size() * 8); + for (Entry e : map.entrySet()) { + appendEscaped(sb, e.getKey()); + sb.append("=>"); + appendEscaped(sb, e.getValue()); + sb.append(", "); + } + sb.setLength(sb.length() - 2); + return sb.toString(); + } + + private static void appendEscaped(StringBuilder sb, Object val) { + if (val != null) { + sb.append('"'); + String s = val.toString(); + for (int pos = 0; pos < s.length(); pos++) { + char ch = s.charAt(pos); + if (ch == '"' || ch == '\\') { + sb.append('\\'); + } + sb.append(ch); + } + sb.append('"'); + } else { + sb.append("NULL"); + } + } + + public static Map fromString(String s) { + Map m = new HashMap(); + int pos = 0; + StringBuilder sb = new StringBuilder(); + while (pos < s.length()) { + sb.setLength(0); + int start = s.indexOf('"', pos); + int end = appendUntilQuote(sb, s, start); + String key = sb.toString(); + pos = end + 3; + + String val; + if (s.charAt(pos) == 'N') { + val = null; + pos += 4; + } else { + sb.setLength(0); + end = appendUntilQuote(sb, s, pos); + val = sb.toString(); + pos = end; + } + pos++; + m.put(key, val); + } + return m; + } + + private static int appendUntilQuote(StringBuilder sb, String s, int pos) { + for (pos += 1; pos < s.length(); pos++) { + char ch = s.charAt(pos); + if (ch == '"') { + break; + } + if (ch == '\\') { + pos++; + ch = s.charAt(pos); + } + sb.append(ch); + } + return pos; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/HostSpec.java b/pgjdbc/src/main/java/org/postgresql/util/HostSpec.java new file mode 100644 index 0000000..08f7d6c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/HostSpec.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2012, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static java.util.regex.Pattern.compile; + +import java.util.Locale; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Simple container for host and port. + */ +public class HostSpec { + public static final String DEFAULT_NON_PROXY_HOSTS = "localhost|127.*|[::1]|0.0.0.0|[::0]"; + + protected final String localSocketAddress; + protected final String host; + protected final int port; + + public HostSpec(String host, int port) { + this(host, port, null); + } + + public HostSpec(String host, int port, String localSocketAddress) { + this.host = host; + this.port = port; + this.localSocketAddress = localSocketAddress; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + @Override + public String toString() { + return host + ":" + port; + } + + @Override + public boolean equals(Object obj) { + return obj instanceof HostSpec && port == ((HostSpec) obj).port + && host.equals(((HostSpec) obj).host) && Objects.equals(localSocketAddress, ((HostSpec) obj).localSocketAddress); + } + + @Override + public int hashCode() { + return Objects.hash(localSocketAddress, host, port); + } + + public String getLocalSocketAddress() { + return localSocketAddress; + } + + public Boolean shouldResolve() { + String socksProxy = System.getProperty("socksProxyHost"); + if (socksProxy == null || socksProxy.trim().isEmpty()) { + return true; + } + return matchesNonProxyHosts(); + } + + private Boolean matchesNonProxyHosts() { + String nonProxyHosts = System.getProperty("socksNonProxyHosts", DEFAULT_NON_PROXY_HOSTS); + if (nonProxyHosts == null || this.host.isEmpty()) { + return false; + } + + Pattern pattern = toPattern(nonProxyHosts); + Matcher matcher = pattern == null ? null : pattern.matcher(this.host); + return matcher != null && matcher.matches(); + } + + @SuppressWarnings("regex") + private Pattern toPattern(String mask) { + StringBuilder joiner = new StringBuilder(); + String separator = ""; + for (String disjunct : mask.split("\\|")) { + if (!disjunct.isEmpty()) { + String regex = disjunctToRegex(disjunct.toLowerCase(Locale.ROOT)); + joiner.append(separator).append(regex); + separator = "|"; + } + } + + return joiner.length() == 0 ? null : compile(joiner.toString()); + } + + private String disjunctToRegex(String disjunct) { + String regex; + + if (disjunct.startsWith("*")) { + regex = ".*" + Pattern.quote(disjunct.substring(1)); + } else if (disjunct.endsWith("*")) { + regex = Pattern.quote(disjunct.substring(0, disjunct.length() - 1)) + ".*"; + } else { + regex = Pattern.quote(disjunct); + } + + return regex; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/IntList.java b/pgjdbc/src/main/java/org/postgresql/util/IntList.java new file mode 100644 index 0000000..52dd577 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/IntList.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.util.Arrays; + +/** + * A specialized class to store a list of {@code int} values, so it does not need auto-boxing. Note: + * this is a driver-internal class, and it is not intended to be used outside the driver. + */ +public final class IntList { + private static final int[] EMPTY_INT_ARRAY = new int[0]; + private int[] ints = EMPTY_INT_ARRAY; + private int size; + + public IntList() { + } + + public void add(int i) { + int size = this.size; + ensureCapacity(size); + ints[size] = i; + this.size = size + 1; + } + + private void ensureCapacity(int size) { + int length = ints.length; + if (size >= length) { + // double in size until 1024 in size, then grow by 1.5x + final int newLength = length == 0 ? 8 : + length < 1024 ? length << 1 : + (length + (length >> 1)); + ints = Arrays.copyOf(ints, newLength); + } + } + + public int size() { + return size; + } + + public int get(int i) { + if (i < 0 || i >= size) { + throw new ArrayIndexOutOfBoundsException("Index: " + i + ", Size: " + size); + } + return ints[i]; + } + + public void clear() { + size = 0; + } + + /** + * Returns an array containing all the elements in this list. The modifications of the returned + * array will not affect this list. + * + * @return an array containing all the elements in this list + */ + public int[] toArray() { + if (size == 0) { + return EMPTY_INT_ARRAY; + } + return Arrays.copyOf(ints, size); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("["); + for (int i = 0; i < size; i++) { + if (i > 0) { + sb.append(", "); + } + sb.append(ints[i]); + } + sb.append("]"); + return sb.toString(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/JdbcBlackHole.java b/pgjdbc/src/main/java/org/postgresql/util/JdbcBlackHole.java new file mode 100644 index 0000000..56f7d6a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/JdbcBlackHole.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class JdbcBlackHole { + + public JdbcBlackHole() { + } + + public static void close(Connection con) { + try { + if (con != null) { + con.close(); + } + } catch (SQLException e) { + /* ignore for now */ + } + } + + public static void close(Statement s) { + try { + if (s != null) { + s.close(); + } + } catch (SQLException e) { + /* ignore for now */ + } + } + + public static void close(ResultSet rs) { + try { + if (rs != null) { + rs.close(); + } + } catch (SQLException e) { + /* ignore for now */ + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/KerberosTicket.java b/pgjdbc/src/main/java/org/postgresql/util/KerberosTicket.java new file mode 100644 index 0000000..5ec3801 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/KerberosTicket.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +import javax.security.auth.Subject; +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.auth.login.AppConfigurationEntry; +import javax.security.auth.login.Configuration; +import javax.security.auth.login.LoginContext; +import javax.security.auth.login.LoginException; + +public class KerberosTicket { + + private static final String CONFIG_ITEM_NAME = "ticketCache"; + private static final String KRBLOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule"; + + static class CustomKrbConfig extends Configuration { + + public CustomKrbConfig() { + } + + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String name) { + if (CONFIG_ITEM_NAME.equals(name)) { + Map options = new HashMap<>(); + options.put("refreshKrb5Config", Boolean.FALSE.toString()); + options.put("useTicketCache", Boolean.TRUE.toString()); + options.put("doNotPrompt", Boolean.TRUE.toString()); + options.put("useKeyTab", Boolean.TRUE.toString()); + options.put("isInitiator", Boolean.FALSE.toString()); + options.put("renewTGT", Boolean.FALSE.toString()); + options.put("debug", Boolean.FALSE.toString()); + return new AppConfigurationEntry[]{ + new AppConfigurationEntry(KRBLOGIN_MODULE, + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)}; + } + return null; + } + + } + + public KerberosTicket() { + } + + public static boolean credentialCacheExists(Properties info) { + LoginContext lc = null; + + // in the event that the user has specified a jaas.conf file then we want to remember it + Configuration existingConfiguration = Configuration.getConfiguration(); + Configuration.setConfiguration(new CustomKrbConfig()); + + try { + lc = new LoginContext(CONFIG_ITEM_NAME, new CallbackHandler() { + + @Override + public void handle(Callback[] callbacks) + throws IOException, UnsupportedCallbackException { + // if the user has not configured jaasLogin correctly this can happen + throw new RuntimeException("This is an error, you should set doNotPrompt to false in jaas.config"); + } + }); + lc.login(); + } catch (LoginException e) { + // restore saved configuration + if (existingConfiguration != null ) { + Configuration.setConfiguration(existingConfiguration); + } + return false; + } + // restore saved configuration + if (existingConfiguration != null ) { + Configuration.setConfiguration(existingConfiguration); + } + Subject sub = lc.getSubject(); + return sub != null; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/LazyCleaner.java b/pgjdbc/src/main/java/org/postgresql/util/LazyCleaner.java new file mode 100644 index 0000000..9ca4c68 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/LazyCleaner.java @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +/* changes were made to move it into the org.postgresql.util package + * + * Copyright 2022 Juan Lopes + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.postgresql.util; + +import java.lang.ref.PhantomReference; +import java.lang.ref.ReferenceQueue; +import java.time.Duration; +import java.util.concurrent.ThreadFactory; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * LazyCleaner is a utility class that allows to register objects for deferred cleanup. + *

Note: this is a driver-internal class

+ */ +public class LazyCleaner { + private static final Logger LOGGER = Logger.getLogger(LazyCleaner.class.getName()); + private static final LazyCleaner instance = + new LazyCleaner( + Duration.ofMillis(Long.getLong("pgjdbc.config.cleanup.thread.ttl", 30000)), + "PostgreSQL-JDBC-Cleaner" + ); + + public interface Cleanable { + void clean() throws T; + } + + public interface CleaningAction { + void onClean(boolean leak) throws T; + } + + private final ReferenceQueue queue = new ReferenceQueue<>(); + private final long threadTtl; + private final ThreadFactory threadFactory; + private boolean threadRunning; + private int watchedCount; + private Node first; + + /** + * Returns a default cleaner instance. + *

Note: this is driver-internal API.

+ * @return the instance of LazyCleaner + */ + public static LazyCleaner getInstance() { + return instance; + } + + public LazyCleaner(Duration threadTtl, final String threadName) { + this(threadTtl, runnable -> { + Thread thread = new Thread(runnable, threadName); + thread.setDaemon(true); + return thread; + }); + } + + private LazyCleaner(Duration threadTtl, ThreadFactory threadFactory) { + this.threadTtl = threadTtl.toMillis(); + this.threadFactory = threadFactory; + } + + public Cleanable register(Object obj, CleaningAction action) { + assert obj != action : "object handle should not be the same as cleaning action, otherwise" + + " the object will never become phantom reachable, so the action will never trigger"; + return add(new Node(obj, action)); + } + + public synchronized int getWatchedCount() { + return watchedCount; + } + + public synchronized boolean isThreadRunning() { + return threadRunning; + } + + private synchronized boolean checkEmpty() { + if (first == null) { + threadRunning = false; + return true; + } + return false; + } + + private synchronized Node add(Node node) { + if (first != null) { + node.next = first; + first.prev = node; + } + first = node; + watchedCount++; + + if (!threadRunning) { + threadRunning = startThread(); + } + return node; + } + + private boolean startThread() { + Thread thread = threadFactory.newThread(new Runnable() { + @Override + public void run() { + while (true) { + try { + // Clear setContextClassLoader to avoid leaking the classloader + Thread.currentThread().setContextClassLoader(null); + Thread.currentThread().setUncaughtExceptionHandler(null); + // Node extends PhantomReference, so this cast is safe + Node ref = (Node) queue.remove(threadTtl); + if (ref == null) { + if (checkEmpty()) { + break; + } + continue; + } + try { + ref.onClean(true); + } catch (Throwable e) { + if (e instanceof InterruptedException) { + // This could happen if onClean uses sneaky-throws + LOGGER.log(Level.WARNING, "Unexpected interrupt while executing onClean", e); + throw e; + } + // Should not happen if cleaners are well-behaved + LOGGER.log(Level.WARNING, "Unexpected exception while executing onClean", e); + } + } catch (InterruptedException e) { + if (LazyCleaner.this.checkEmpty()) { + LOGGER.log( + Level.FINE, + "Cleanup queue is empty, and got interrupt, will terminate the cleanup thread" + ); + break; + } + LOGGER.log(Level.FINE, "Ignoring interrupt since the cleanup queue is non-empty"); + } catch (Throwable e) { + // Ignore exceptions from the cleanup action + LOGGER.log(Level.WARNING, "Unexpected exception in cleaner thread main loop", e); + } + } + } + }); + if (thread != null) { + thread.start(); + return true; + } + LOGGER.log(Level.WARNING, "Unable to create cleanup thread"); + return false; + } + + private synchronized boolean remove(Node node) { + // If already removed, do nothing + if (node.next == node) { + return false; + } + + // Update list + if (first == node) { + first = node.next; + } + if (node.next != null) { + node.next.prev = node.prev; + } + if (node.prev != null) { + node.prev.next = node.next; + } + + // Indicate removal by pointing the cleaner to itself + node.next = node; + node.prev = node; + + watchedCount--; + return true; + } + + private class Node extends PhantomReference implements Cleanable, + CleaningAction { + private final CleaningAction action; + private Node prev; + private Node next; + + Node(Object referent, CleaningAction action) { + super(referent, queue); + this.action = action; + //Objects.requireNonNull(referent); // poor man`s reachabilityFence + } + + @Override + public void clean() throws T { + onClean(false); + } + + @Override + public void onClean(boolean leak) throws T { + if (!remove(this)) { + return; + } + if (action != null) { + action.onClean(leak); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/LogWriterHandler.java b/pgjdbc/src/main/java/org/postgresql/util/LogWriterHandler.java new file mode 100644 index 0000000..4b9aea6 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/LogWriterHandler.java @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.jdbc.ResourceLock; + +import java.io.Writer; +import java.util.logging.ErrorManager; +import java.util.logging.Formatter; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.SimpleFormatter; + +@SuppressWarnings("try") +public class LogWriterHandler extends Handler { + + private Writer writer; + private final ResourceLock lock = new ResourceLock(); + + @SuppressWarnings("this-escape") + public LogWriterHandler(Writer inWriter) { + super(); + setLevel(Level.INFO); + setFilter(null); + setFormatter(new SimpleFormatter()); + setWriter(inWriter); + } + + @Override + public void publish(LogRecord record) { + final String formatted; + final Formatter formatter = getFormatter(); + + try { + formatted = formatter.format(record); + } catch (Exception ex) { + reportError("Error Formatting record", ex, ErrorManager.FORMAT_FAILURE); + return; + } + + if (formatted.length() == 0) { + return; + } + try { + try (ResourceLock ignore = lock.obtain()) { + Writer writer = this.writer; + if (writer != null) { + writer.write(formatted); + } + } + } catch (Exception ex) { + reportError("Error writing message", ex, ErrorManager.WRITE_FAILURE); + } + } + + @Override + public void flush() { + try (ResourceLock ignore = lock.obtain()) { + Writer writer = this.writer; + if (writer != null) { + writer.flush(); + } + } catch ( Exception ex ) { + reportError("Error on flush", ex, ErrorManager.WRITE_FAILURE); + } + } + + @Override + public void close() throws SecurityException { + try (ResourceLock ignore = lock.obtain()) { + Writer writer = this.writer; + if (writer != null) { + writer.close(); + } + } catch ( Exception ex ) { + reportError("Error closing writer", ex, ErrorManager.WRITE_FAILURE); + } + } + + private void setWriter(Writer writer) throws IllegalArgumentException { + try (ResourceLock ignore = lock.obtain()) { + if (writer == null) { + throw new IllegalArgumentException("Writer cannot be null"); + } + this.writer = writer; + + try { + writer.write(getFormatter().getHead(this)); + } catch (Exception ex) { + reportError("Error writing head section", ex, ErrorManager.WRITE_FAILURE); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/LruCache.java b/pgjdbc/src/main/java/org/postgresql/util/LruCache.java new file mode 100644 index 0000000..6360591 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/LruCache.java @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.jdbc.ResourceLock; + +import java.sql.SQLException; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Caches values in simple least-recently-accessed order. + */ +@SuppressWarnings("try") +public class LruCache + implements Gettable { + /** + * Action that is invoked when the entry is removed from the cache. + * + * @param type of the cache entry + */ + public interface EvictAction { + void evict(Value value) throws SQLException; + } + + /** + * When the entry is not present in cache, this create action is used to create one. + * + * @param type of the cache entry + */ + public interface CreateAction { + Value create(Key key) throws SQLException; + } + + private final EvictAction onEvict; + private final CreateAction createAction; + private final int maxSizeEntries; + private final long maxSizeBytes; + private long currentSize; + private final Map cache; + private final ResourceLock lock = new ResourceLock(); + + @SuppressWarnings("serial") + private class LimitedMap extends LinkedHashMap { + LimitedMap(int initialCapacity, float loadFactor, boolean accessOrder) { + super(initialCapacity, loadFactor, accessOrder); + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + // Avoid creating iterators if size constraints not violated + if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) { + return false; + } + + Iterator> it = entrySet().iterator(); + while (it.hasNext()) { + if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) { + return false; + } + + Map.Entry entry = it.next(); + evictValue(entry.getValue()); + long valueSize = entry.getValue().getSize(); + if (valueSize > 0) { + // just in case + currentSize -= valueSize; + } + it.remove(); + } + return false; + } + } + + private void evictValue(Value value) { + try { + if (onEvict != null) { + onEvict.evict(value); + } + } catch (SQLException e) { + /* ignore */ + } + } + + public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder) { + this(maxSizeEntries, maxSizeBytes, accessOrder, null, null); + } + + public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder, + CreateAction createAction, + EvictAction onEvict) { + this.maxSizeEntries = maxSizeEntries; + this.maxSizeBytes = maxSizeBytes; + this.createAction = createAction; + this.onEvict = onEvict; + this.cache = new LimitedMap(16, 0.75f, accessOrder); + } + + /** + * Returns an entry from the cache. + * + * @param key cache key + * @return entry from cache or null if cache does not contain given key. + */ + @Override + public Value get(Key key) { + try (ResourceLock ignore = lock.obtain()) { + return cache.get(key); + } + } + + /** + * Borrows an entry from the cache. + * + * @param key cache key + * @return entry from cache or newly created entry if cache does not contain given key. + * @throws SQLException if entry creation fails + */ + public Value borrow(Key key) throws SQLException { + try (ResourceLock ignore = lock.obtain()) { + Value value = cache.remove(key); + if (value == null) { + if (createAction == null) { + throw new UnsupportedOperationException("createAction == null, so can't create object"); + } + return createAction.create(key); + } + currentSize -= value.getSize(); + return value; + } + } + + /** + * Returns given value to the cache. + * + * @param key key + * @param value value + */ + public void put(Key key, Value value) { + try (ResourceLock ignore = lock.obtain()) { + long valueSize = value.getSize(); + if (maxSizeBytes == 0 || maxSizeEntries == 0 || valueSize * 2 > maxSizeBytes) { + // Just destroy the value if cache is disabled or if entry would consume more than a half of + // the cache + evictValue(value); + return; + } + currentSize += valueSize; + Value prev = cache.put(key, value); + if (prev == null) { + return; + } + // This should be a rare case + currentSize -= prev.getSize(); + if (prev != value) { + evictValue(prev); + } + } + } + + /** + * Puts all the values from the given map into the cache. + * + * @param m The map containing entries to put into the cache + */ + public void putAll(Map m) { + try (ResourceLock ignore = lock.obtain()) { + for (Map.Entry entry : m.entrySet()) { + this.put(entry.getKey(), entry.getValue()); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/MD5Digest.java b/pgjdbc/src/main/java/org/postgresql/util/MD5Digest.java new file mode 100644 index 0000000..f0e586c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/MD5Digest.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +/** + * MD5-based utility function to obfuscate passwords before network transmission. + * + * @author Jeremy Wohl + */ +public class MD5Digest { + + private static final byte[] HEX_BYTES = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + private MD5Digest() { + } + + /** + * Encodes user/password/salt information in the following way: MD5(MD5(password + user) + salt). + * + * @param user The connecting user. + * @param password The connecting user's password. + * @param salt A four-salt sent by the server. + * @return A 35-byte array, comprising the string "md5" and an MD5 digest. + */ + public static byte[] encode(byte[] user, byte[] password, byte[] salt) { + try { + final MessageDigest md = MessageDigest.getInstance("MD5"); + + md.update(password); + md.update(user); + byte[] digest = md.digest(); + + final byte[] hexDigest = new byte[35]; + + bytesToHex(digest, hexDigest, 0); + md.update(hexDigest, 0, 32); + md.update(salt); + digest = md.digest(); + + bytesToHex(digest, hexDigest, 3); + hexDigest[0] = (byte) 'm'; + hexDigest[1] = (byte) 'd'; + hexDigest[2] = (byte) '5'; + + return hexDigest; + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException("Unable to encode password with MD5", e); + } + } + + /* + * Turn 16-byte stream into a human-readable 32-byte hex string + */ + public static void bytesToHex(byte[] bytes, byte[] hex, int offset) { + int pos = offset; + for (int i = 0; i < 16; i++) { + //bit twiddling converts to int, so just do it once here for both operations + final int c = bytes[i] & 0xFF; + hex[pos++] = HEX_BYTES[c >> 4]; + hex[pos++] = HEX_BYTES[c & 0xF]; + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/NumberParser.java b/pgjdbc/src/main/java/org/postgresql/util/NumberParser.java new file mode 100644 index 0000000..e3437e1 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/NumberParser.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +/** + * Optimised byte[] to number parser. + */ +public class NumberParser { + private static final NumberFormatException FAST_NUMBER_FAILED = new NumberFormatException() { + @Override + public Throwable fillInStackTrace() { + return this; + } + }; + + private static final long MAX_LONG_DIV_TEN = Long.MAX_VALUE / 10; + + public NumberParser() { + } + + /** + * Optimised byte[] to number parser. This code does not handle null values, so the caller must do + * checkResultSet and handle null values prior to calling this function. Fraction part is + * discarded. + * + * @param bytes integer represented as a sequence of ASCII bytes + * @return The parsed number. + * @throws NumberFormatException If the number is invalid or the out of range for fast parsing. + * The value must then be parsed by another (less optimised) method. + */ + public static long getFastLong(byte[] bytes, long minVal, long maxVal) throws NumberFormatException { + int len = bytes.length; + if (len == 0) { + throw FAST_NUMBER_FAILED; + } + + boolean neg = bytes[0] == '-'; + + long val = 0; + int start = neg ? 1 : 0; + while (start < len) { + byte b = bytes[start++]; + if (b < '0' || b > '9') { + if (b == '.') { + if (neg && len == 2 || !neg && len == 1) { + // we have to check that string is not "." or "-." + throw FAST_NUMBER_FAILED; + } + // check that the rest of the buffer contains only digits + while (start < len) { + b = bytes[start++]; + if (b < '0' || b > '9') { + throw FAST_NUMBER_FAILED; + } + } + break; + } else { + throw FAST_NUMBER_FAILED; + } + } + + if (val <= MAX_LONG_DIV_TEN) { + val *= 10; + val += b - '0'; + } else { + throw FAST_NUMBER_FAILED; + } + } + + if (val < 0) { + // It is possible to get overflow in two situations: + // 1. for MIN_VALUE, because abs(MIN_VALUE)=MAX_VALUE+1. In this situation thanks to + // complement arithmetic we got correct result and shouldn't do anything with it. + // 2. for incorrect string, representing a number greater than MAX_VALUE, for example + // "9223372036854775809", it this case we have to throw exception + if (!(neg && val == Long.MIN_VALUE)) { + throw FAST_NUMBER_FAILED; + } + } else if (neg) { + val = -val; + } + + if (val < minVal || val > maxVal) { + throw FAST_NUMBER_FAILED; + } + return val; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/OSUtil.java b/pgjdbc/src/main/java/org/postgresql/util/OSUtil.java new file mode 100644 index 0000000..4904aa6 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/OSUtil.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.File; +import java.util.Locale; + +/** + * Operating system specifics + */ +public class OSUtil { + + public OSUtil() { + } + + /** + * + * @return true if OS is windows + */ + public static boolean isWindows() { + return System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows"); + } + + /** + * + * @return OS specific root directory for user specific configurations + */ + public static String getUserConfigRootDirectory() { + if (isWindows()) { + return System.getenv("APPDATA") + File.separator + "postgresql"; + } else { + return System.getProperty("user.home"); + } + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ObjectFactory.java b/pgjdbc/src/main/java/org/postgresql/util/ObjectFactory.java new file mode 100644 index 0000000..4b79fc9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ObjectFactory.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.Properties; + +/** + * Helper class to instantiate objects. Note: the class is NOT public API, so it is subject + * to change. + */ +public class ObjectFactory { + + public ObjectFactory() { + } + + /** + * Instantiates a class using the appropriate constructor. If a constructor with a single + * Propertiesparameter exists, it is used. Otherwise, if tryString is true a constructor with a + * single String argument is searched if it fails, or tryString is true a no argument constructor + * is tried. + * + * @param type of expected class + * @param expectedClass expected class of type T, if the classname instantiated doesn't match + * the expected type of this class this method will fail + * @param classname name of the class to instantiate + * @param info parameter to pass as Properties + * @param tryString whether to look for a single String argument constructor + * @param stringarg parameter to pass as String + * @return the instantiated class + * @throws ClassNotFoundException if something goes wrong + * @throws SecurityException if something goes wrong + * @throws NoSuchMethodException if something goes wrong + * @throws IllegalArgumentException if something goes wrong + * @throws InstantiationException if something goes wrong + * @throws IllegalAccessException if something goes wrong + * @throws InvocationTargetException if something goes wrong + */ + public static T instantiate(Class expectedClass, String classname, Properties info, + boolean tryString, + String stringarg) + throws ClassNotFoundException, SecurityException, NoSuchMethodException, + IllegalArgumentException, InstantiationException, IllegalAccessException, + InvocationTargetException { + Object[] args = {info}; + Constructor ctor = null; + Class cls = Class.forName(classname).asSubclass(expectedClass); + try { + ctor = cls.getConstructor(Properties.class); + } catch (NoSuchMethodException ignored) { + } + if (tryString && ctor == null) { + try { + ctor = cls.getConstructor(String.class); + args = new String[]{stringarg}; + } catch (NoSuchMethodException ignored) { + } + } + if (ctor == null) { + ctor = cls.getConstructor(); + args = new Object[0]; + } + return ctor.newInstance(args); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGBinaryObject.java b/pgjdbc/src/main/java/org/postgresql/util/PGBinaryObject.java new file mode 100644 index 0000000..f41e763 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGBinaryObject.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2011, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.sql.SQLException; + +/** + * PGBinaryObject is a interface that classes extending {@link PGobject} can use to take advantage + * of more optimal binary encoding of the data type. + */ +public interface PGBinaryObject { + /** + * This method is called to set the value of this object. + * + * @param value data containing the binary representation of the value of the object + * @param offset the offset in the byte array where object data starts + * @throws SQLException thrown if value is invalid for this type + */ + void setByteValue(byte[] value, int offset) throws SQLException; + + /** + * This method is called to return the number of bytes needed to store this object in the binary + * form required by org.postgresql. + * + * @return the number of bytes needed to store this object + */ + int lengthInBytes(); + + /** + * This method is called the to store the value of the object, in the binary form required by + * org.postgresql. + * + * @param bytes the array to store the value, it is guaranteed to be at lest + * {@link #lengthInBytes} in size. + * @param offset the offset in the byte array where object must be stored + */ + void toBytes(byte[] bytes, int offset); +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGInterval.java b/pgjdbc/src/main/java/org/postgresql/util/PGInterval.java new file mode 100644 index 0000000..215ae3a --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGInterval.java @@ -0,0 +1,549 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.Serializable; +import java.sql.SQLException; +import java.text.DecimalFormat; +import java.text.NumberFormat; +import java.util.Calendar; +import java.util.Date; +import java.util.Locale; +import java.util.StringTokenizer; + +/** + * This implements a class that handles the PostgreSQL interval type. + */ +@SuppressWarnings("serial") +public class PGInterval extends PGobject implements Serializable, Cloneable { + + private static final int MICROS_IN_SECOND = 1000000; + + private int years; + private int months; + private int days; + private int hours; + private int minutes; + private int wholeSeconds; + private int microSeconds; + private boolean isNull; + + /** + * required by the driver. + */ + public PGInterval() { + type = "interval"; + } + + /** + * Initialize a interval with a given interval string representation. + * + * @param value String representated interval (e.g. '3 years 2 mons') + * @throws SQLException Is thrown if the string representation has an unknown format + * @see PGobject#setValue(String) + */ + @SuppressWarnings("this-escape") + public PGInterval(String value) throws SQLException { + this(); + setValue(value); + } + + private int lookAhead(String value, int position, String find) { + char [] tokens = find.toCharArray(); + int found = -1; + + for (int i = 0; i < tokens.length; i++) { + found = value.indexOf(tokens[i], position); + if (found > 0) { + return found; + } + } + return found; + } + + private void parseISO8601Format(String value) { + int number = 0; + String dateValue; + String timeValue = null; + + int hasTime = value.indexOf('T'); + if ( hasTime > 0 ) { + /* skip over the P */ + dateValue = value.substring(1, hasTime); + timeValue = value.substring(hasTime + 1); + } else { + /* skip over the P */ + dateValue = value.substring(1); + } + + for (int i = 0; i < dateValue.length(); i++) { + int lookAhead = lookAhead(dateValue, i, "YMD"); + if (lookAhead > 0) { + number = Integer.parseInt(dateValue.substring(i, lookAhead)); + if (dateValue.charAt(lookAhead) == 'Y') { + setYears(number); + } else if (dateValue.charAt(lookAhead) == 'M') { + setMonths(number); + } else if (dateValue.charAt(lookAhead) == 'D') { + setDays(number); + } + i = lookAhead; + } + } + if ( timeValue != null ) { + for (int i = 0; i < timeValue.length(); i++) { + int lookAhead = lookAhead(timeValue, i, "HMS"); + if (lookAhead > 0) { + if (timeValue.charAt(lookAhead) == 'H') { + setHours(Integer.parseInt(timeValue.substring(i, lookAhead))); + } else if (timeValue.charAt(lookAhead) == 'M') { + setMinutes(Integer.parseInt(timeValue.substring(i, lookAhead))); + } else if (timeValue.charAt(lookAhead) == 'S') { + setSeconds(Double.parseDouble(timeValue.substring(i, lookAhead))); + } + i = lookAhead; + } + } + } + } + + /** + * Initializes all values of this interval to the specified values. + * + * @param years years + * @param months months + * @param days days + * @param hours hours + * @param minutes minutes + * @param seconds seconds + * @see #setValue(int, int, int, int, int, double) + */ + @SuppressWarnings("this-escape") + public PGInterval(int years, int months, int days, int hours, int minutes, double seconds) { + this(); + setValue(years, months, days, hours, minutes, seconds); + } + + /** + * Sets a interval string represented value to this instance. This method only recognize the + * format, that Postgres returns - not all input formats are supported (e.g. '1 yr 2 m 3 s'). + * + * @param value String representated interval (e.g. '3 years 2 mons') + * @throws SQLException Is thrown if the string representation has an unknown format + */ + @Override + public void setValue(String value) throws SQLException { + isNull = value == null; + if (value == null) { + setValue(0, 0, 0, 0, 0, 0); + isNull = true; + return; + } + final boolean postgresFormat = !value.startsWith("@"); + if (value.startsWith("P")) { + parseISO8601Format(value); + return; + } + // Just a simple '0' + if (!postgresFormat && value.length() == 3 && value.charAt(2) == '0') { + setValue(0, 0, 0, 0, 0, 0.0); + return; + } + + int years = 0; + int months = 0; + int days = 0; + int hours = 0; + int minutes = 0; + double seconds = 0; + + try { + String valueToken = null; + + value = value.replace('+', ' ').replace('@', ' '); + final StringTokenizer st = new StringTokenizer(value); + for (int i = 1; st.hasMoreTokens(); i++) { + String token = st.nextToken(); + + if ((i & 1) == 1) { + int endHours = token.indexOf(':'); + if (endHours == -1) { + valueToken = token; + continue; + } + + // This handles hours, minutes, seconds and microseconds for + // ISO intervals + int offset = token.charAt(0) == '-' ? 1 : 0; + + hours = nullSafeIntGet(token.substring(offset + 0, endHours)); + minutes = nullSafeIntGet(token.substring(endHours + 1, endHours + 3)); + + // Pre 7.4 servers do not put second information into the results + // unless it is non-zero. + int endMinutes = token.indexOf(':', endHours + 1); + if (endMinutes != -1) { + seconds = nullSafeDoubleGet(token.substring(endMinutes + 1)); + } + + if (offset == 1) { + hours = -hours; + minutes = -minutes; + seconds = -seconds; + } + + valueToken = null; + } else { + // This handles years, months, days for both, ISO and + // Non-ISO intervals. Hours, minutes, seconds and microseconds + // are handled for Non-ISO intervals here. + + if (token.startsWith("year")) { + years = nullSafeIntGet(valueToken); + } else if (token.startsWith("mon")) { + months = nullSafeIntGet(valueToken); + } else if (token.startsWith("day")) { + days = nullSafeIntGet(valueToken); + } else if (token.startsWith("hour")) { + hours = nullSafeIntGet(valueToken); + } else if (token.startsWith("min")) { + minutes = nullSafeIntGet(valueToken); + } else if (token.startsWith("sec")) { + seconds = nullSafeDoubleGet(valueToken); + } + } + } + } catch (NumberFormatException e) { + throw new PSQLException(GT.tr("Conversion of interval failed"), + PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE, e); + } + + if (!postgresFormat && value.endsWith("ago")) { + // Inverse the leading sign + setValue(-years, -months, -days, -hours, -minutes, -seconds); + } else { + setValue(years, months, days, hours, minutes, seconds); + } + } + + /** + * Set all values of this interval to the specified values. + * + * @param years years + * @param months months + * @param days days + * @param hours hours + * @param minutes minutes + * @param seconds seconds + */ + public void setValue(int years, int months, int days, int hours, int minutes, double seconds) { + setYears(years); + setMonths(months); + setDays(days); + setHours(hours); + setMinutes(minutes); + setSeconds(seconds); + } + + /** + * Returns the stored interval information as a string. + * + * @return String represented interval + */ + @Override + public String getValue() { + if (isNull) { + return null; + } + DecimalFormat df = (DecimalFormat) NumberFormat.getInstance(Locale.US); + df.applyPattern("0.0#####"); + + return String.format( + Locale.ROOT, + "%d years %d mons %d days %d hours %d mins %s secs", + years, + months, + days, + hours, + minutes, + df.format(getSeconds()) + ); + } + + /** + * Returns the years represented by this interval. + * + * @return years represented by this interval + */ + public int getYears() { + return years; + } + + /** + * Set the years of this interval to the specified value. + * + * @param years years to set + */ + public void setYears(int years) { + isNull = false; + this.years = years; + } + + /** + * Returns the months represented by this interval. + * + * @return months represented by this interval + */ + public int getMonths() { + return months; + } + + /** + * Set the months of this interval to the specified value. + * + * @param months months to set + */ + public void setMonths(int months) { + isNull = false; + this.months = months; + } + + /** + * Returns the days represented by this interval. + * + * @return days represented by this interval + */ + public int getDays() { + return days; + } + + /** + * Set the days of this interval to the specified value. + * + * @param days days to set + */ + public void setDays(int days) { + isNull = false; + this.days = days; + } + + /** + * Returns the hours represented by this interval. + * + * @return hours represented by this interval + */ + public int getHours() { + return hours; + } + + /** + * Set the hours of this interval to the specified value. + * + * @param hours hours to set + */ + public void setHours(int hours) { + isNull = false; + this.hours = hours; + } + + /** + * Returns the minutes represented by this interval. + * + * @return minutes represented by this interval + */ + public int getMinutes() { + return minutes; + } + + /** + * Set the minutes of this interval to the specified value. + * + * @param minutes minutes to set + */ + public void setMinutes(int minutes) { + isNull = false; + this.minutes = minutes; + } + + /** + * Returns the seconds represented by this interval. + * + * @return seconds represented by this interval + */ + public double getSeconds() { + return wholeSeconds + (double) microSeconds / MICROS_IN_SECOND; + } + + public int getWholeSeconds() { + return wholeSeconds; + } + + public int getMicroSeconds() { + return microSeconds; + } + + /** + * Set the seconds of this interval to the specified value. + * + * @param seconds seconds to set + */ + public void setSeconds(double seconds) { + isNull = false; + wholeSeconds = (int) seconds; + microSeconds = (int) Math.round((seconds - wholeSeconds) * MICROS_IN_SECOND); + } + + /** + * Rolls this interval on a given calendar. + * + * @param cal Calendar instance to add to + */ + public void add(Calendar cal) { + if (isNull) { + return; + } + + final int milliseconds = (microSeconds + (microSeconds < 0 ? -500 : 500)) / 1000 + wholeSeconds * 1000; + + cal.add(Calendar.MILLISECOND, milliseconds); + cal.add(Calendar.MINUTE, getMinutes()); + cal.add(Calendar.HOUR, getHours()); + cal.add(Calendar.DAY_OF_MONTH, getDays()); + cal.add(Calendar.MONTH, getMonths()); + cal.add(Calendar.YEAR, getYears()); + } + + /** + * Rolls this interval on a given date. + * + * @param date Date instance to add to + */ + public void add(Date date) { + if (isNull) { + return; + } + final Calendar cal = Calendar.getInstance(); + cal.setTime(date); + add(cal); + date.setTime(cal.getTime().getTime()); + } + + /** + * Add this interval's value to the passed interval. This is backwards to what I would expect, but + * this makes it match the other existing add methods. + * + * @param interval intval to add + */ + public void add(PGInterval interval) { + if (isNull || interval.isNull) { + return; + } + interval.setYears(interval.getYears() + getYears()); + interval.setMonths(interval.getMonths() + getMonths()); + interval.setDays(interval.getDays() + getDays()); + interval.setHours(interval.getHours() + getHours()); + interval.setMinutes(interval.getMinutes() + getMinutes()); + interval.setSeconds(interval.getSeconds() + getSeconds()); + } + + /** + * Scale this interval by an integer factor. The server can scale by arbitrary factors, but that + * would require adjusting the call signatures for all the existing methods like getDays() or + * providing our own justification of fractional intervals. Neither of these seem like a good idea + * without a strong use case. + * + * @param factor scale factor + */ + public void scale(int factor) { + if (isNull) { + return; + } + setYears(factor * getYears()); + setMonths(factor * getMonths()); + setDays(factor * getDays()); + setHours(factor * getHours()); + setMinutes(factor * getMinutes()); + setSeconds(factor * getSeconds()); + } + + /** + * Returns integer value of value or 0 if value is null. + * + * @param value integer as string value + * @return integer parsed from string value + * @throws NumberFormatException if the string contains invalid chars + */ + private static int nullSafeIntGet(String value) throws NumberFormatException { + return value == null ? 0 : Integer.parseInt(value); + } + + /** + * Returns double value of value or 0 if value is null. + * + * @param value double as string value + * @return double parsed from string value + * @throws NumberFormatException if the string contains invalid chars + */ + private static double nullSafeDoubleGet(String value) throws NumberFormatException { + return value == null ? 0 : Double.parseDouble(value); + } + + /** + * Returns whether an object is equal to this one or not. + * + * @param obj Object to compare with + * @return true if the two intervals are identical + */ + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (obj == this) { + return true; + } + + if (!(obj instanceof PGInterval)) { + return false; + } + + final PGInterval pgi = (PGInterval) obj; + if (isNull) { + return pgi.isNull; + } else if (pgi.isNull) { + return false; + } + + return pgi.years == years + && pgi.months == months + && pgi.days == days + && pgi.hours == hours + && pgi.minutes == minutes + && pgi.wholeSeconds == wholeSeconds + && pgi.microSeconds == microSeconds; + } + + /** + * Returns a hashCode for this object. + * + * @return hashCode + */ + @Override + public int hashCode() { + if (isNull) { + return 0; + } + return (((((((8 * 31 + microSeconds) * 31 + wholeSeconds) * 31 + minutes) * 31 + hours) * 31 + + days) * 31 + months) * 31 + years) * 31; + } + + @Override + public Object clone() throws CloneNotSupportedException { + // squid:S2157 "Cloneables" should implement "clone + return super.clone(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGJDBCMain.java b/pgjdbc/src/main/java/org/postgresql/util/PGJDBCMain.java new file mode 100644 index 0000000..bf8fd2d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGJDBCMain.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.Driver; + +import java.net.URL; + +public class PGJDBCMain { + + public PGJDBCMain() { + } + + public static void main(String[] args) { + + URL url = Driver.class.getResource("/org/postgresql/Driver.class"); + System.out.printf("%n%s%n", DriverInfo.DRIVER_FULL_NAME); + System.out.printf("Found in: %s%n%n", url); + + System.out.printf("The PgJDBC driver is not an executable Java program.%n%n" + + "You must install it according to the JDBC driver installation " + + "instructions for your application / container / appserver, " + + "then use it by specifying a JDBC URL of the form %n jdbc:postgresql://%n" + + "or using an application specific method.%n%n" + + "See the PgJDBC documentation: http://jdbc.postgresql.org/documentation/head/index.html%n%n" + + "This command has had no effect.%n"); + + System.exit(1); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGPropertyMaxResultBufferParser.java b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyMaxResultBufferParser.java new file mode 100644 index 0000000..ca0ca3b --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyMaxResultBufferParser.java @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.lang.management.ManagementFactory; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class PGPropertyMaxResultBufferParser { + + private static final Logger LOGGER = Logger.getLogger(PGPropertyMaxResultBufferParser.class.getName()); + + private static final String[] PERCENT_PHRASES = new String[]{ + "p", + "pct", + "percent" + }; + + public PGPropertyMaxResultBufferParser() { + } + + /** + * Method to parse value of max result buffer size. + * + * @param value string containing size of bytes with optional multiplier (T, G, M or K) or percent + * value to declare max percent of heap memory to use. + * @return value of max result buffer size. + * @throws PSQLException Exception when given value can't be parsed. + */ + public static long parseProperty(String value) throws PSQLException { + long result = -1; + if (value == null) { + // default branch + } else if (checkIfValueContainsPercent(value)) { + result = parseBytePercentValue(value); + } else if (!value.isEmpty()) { + result = parseByteValue(value); + } + result = adjustResultSize(result); + return result; + } + + /** + * Method to check if given value can contain percent declaration of size of max result buffer. + * + * @param value Value to check. + * @return Result if value contains percent. + */ + private static boolean checkIfValueContainsPercent(String value) { + return getPercentPhraseLengthIfContains(value) != -1; + } + + /** + * Method to get percent value of max result buffer size dependable on actual free memory. This + * method doesn't check other possibilities of value declaration. + * + * @param value string containing percent used to define max result buffer. + * @return percent value of max result buffer size. + * @throws PSQLException Exception when given value can't be parsed. + */ + private static long parseBytePercentValue(String value) throws PSQLException { + long result = -1; + int length; + + if (!value.isEmpty()) { + length = getPercentPhraseLengthIfContains(value); + + if (length == -1) { + throwExceptionAboutParsingError( + "Received MaxResultBuffer parameter can't be parsed. Value received to parse: {0}", + value); + } + + result = calculatePercentOfMemory(value, length); + } + return result; + } + + /** + * Method to get length of percent phrase existing in given string, only if one of phrases exist + * on the length of string. + * + * @param valueToCheck String which is gonna be checked if contains percent phrase. + * @return Length of phrase inside string, returns -1 when no phrase found. + */ + private static int getPercentPhraseLengthIfContains(String valueToCheck) { + int result = -1; + for (String phrase : PERCENT_PHRASES) { + int indx = getPhraseLengthIfContains(valueToCheck, phrase); + if (indx != -1) { + result = indx; + } + } + return result; + } + + /** + * Method to get length of given phrase in given string to check, method checks if phrase exist on + * the end of given string. + * + * @param valueToCheck String which gonna be checked if contains phrase. + * @param phrase Phrase to be looked for on the end of given string. + * @return Length of phrase inside string, returns -1 when phrase wasn't found. + */ + private static int getPhraseLengthIfContains(String valueToCheck, String phrase) { + int searchValueLength = phrase.length(); + + if (valueToCheck.length() > searchValueLength) { + String subValue = valueToCheck.substring(valueToCheck.length() - searchValueLength); + if (subValue.equals(phrase)) { + return searchValueLength; + } + } + return -1; + } + + /** + * Method to calculate percent of given max heap memory. + * + * @param value String which contains percent + percent phrase which gonna be used + * during calculations. + * @param percentPhraseLength Length of percent phrase inside given value. + * @return Size of byte buffer based on percent of max heap memory. + */ + private static long calculatePercentOfMemory(String value, int percentPhraseLength) { + String realValue = value.substring(0, value.length() - percentPhraseLength); + double percent = Double.parseDouble(realValue) / 100; + return (long) (percent * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()); + } + + /** + * Method to get size based on given string value. String can contains just a number or number + + * multiplier sign (like T, G, M or K). + * + * @param value Given string to be parsed. + * @return Size based on given string. + * @throws PSQLException Exception when given value can't be parsed. + */ + @SuppressWarnings("fallthrough") + private static long parseByteValue(String value) throws PSQLException { + long result = -1; + long multiplier = 1; + long mul = 1000; + String realValue; + char sign = value.charAt(value.length() - 1); + + switch (sign) { + + case 'T': + case 't': + multiplier *= mul; + // fall through + + case 'G': + case 'g': + multiplier *= mul; + // fall through + + case 'M': + case 'm': + multiplier *= mul; + // fall through + + case 'K': + case 'k': + multiplier *= mul; + realValue = value.substring(0, value.length() - 1); + result = Integer.parseInt(realValue) * multiplier; + break; + + case '%': + return result; + + default: + if (sign >= '0' && sign <= '9') { + result = Long.parseLong(value); + } else { + throwExceptionAboutParsingError( + "Received MaxResultBuffer parameter can't be parsed. Value received to parse: {0}", + value); + } + break; + } + return result; + } + + /** + * Method to adjust result memory limit size. If given memory is larger than 90% of max heap + * memory then it gonna be reduced to 90% of max heap memory. + * + * @param value Size to be adjusted. + * @return Adjusted size (original size or 90% of max heap memory) + */ + private static long adjustResultSize(long value) { + if (value > 0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) { + long newResult = (long) (0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()); + + LOGGER.log(Level.WARNING, GT.tr( + "WARNING! Required to allocate {0} bytes, which exceeded possible heap memory size. Assigned {1} bytes as limit.", + String.valueOf(value), String.valueOf(newResult))); + + value = newResult; + } + return value; + } + + /** + * Method to throw message for parsing MaxResultBuffer. + * + * @param message Message to be added to exception. + * @param values Values to be put inside exception message. + * @throws PSQLException Exception when given value can't be parsed. + */ + private static void throwExceptionAboutParsingError(String message, Object... values) throws PSQLException { + throw new PSQLException(GT.tr( + message, + values), + PSQLState.SYNTAX_ERROR); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGPropertyUtil.java b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyUtil.java new file mode 100644 index 0000000..8c1e344 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyUtil.java @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.PGProperty; + +import java.util.Locale; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * routines to support PG properties + */ +public class PGPropertyUtil { + + private static final Logger LOGGER = Logger.getLogger(PGPropertyUtil.class.getName()); + + public PGPropertyUtil() { + } + + /** + * converts PGPORT String to Integer + * + * @param portStr value of port + * @return value of port or null + */ + private static Integer convertPgPortToInt(String portStr) { + try { + int port = Integer.parseInt(portStr); + if (port < 1 || port > 65535) { + LOGGER.log(Level.WARNING, "JDBC URL port: {0} not valid (1:65535) ", portStr); + return null; + } + return port; + } catch (NumberFormatException ignore) { + LOGGER.log(Level.WARNING, "JDBC URL invalid port number: {0}", portStr); + return null; + } + } + + /** + * Validate properties. Goal is to detect inconsistencies and report understandable messages + * + * @param properties properties + * @return false if errors found + */ + public static boolean propertiesConsistencyCheck(Properties properties) { + // + String hosts = PGProperty.PG_HOST.getOrDefault(properties); + if (hosts == null) { + LOGGER.log(Level.WARNING, "Property [{0}] can not be null", PGProperty.PG_HOST.getName()); + return false; + } + String ports = PGProperty.PG_PORT.getOrDefault(properties); + if (ports == null) { + LOGGER.log(Level.WARNING, "Property [{0}] can not be null", PGProperty.PG_PORT.getName()); + return false; + } + + // check port values + for (String portStr : ports.split(",")) { + if (PGPropertyUtil.convertPgPortToInt(portStr) == null) { + return false; + } + } + + // check count of hosts and count of ports + int hostCount = hosts.split(",").length; + int portCount = ports.split(",").length; + if (hostCount != portCount) { + LOGGER.log(Level.WARNING, "Properties [{0}] [{1}] must have same amount of values", + new Object[]{PGProperty.PG_HOST.getName(), PGProperty.PG_PORT.getName()}); + LOGGER.log(Level.WARNING, "Property [{0}] ; value [{1}] ; count [{2}]", + new Object[]{PGProperty.PG_HOST.getName(), hosts, hostCount}); + LOGGER.log(Level.WARNING, "Property [{0}] ; value [{1}] ; count [{2}]", + new Object[]{PGProperty.PG_PORT.getName(), ports, portCount}); + return false; + } + // + return true; + } + + /** + * translate PGSERVICEFILE keys host, port, dbname + * Example: "host" becomes "PGHOST" + * + * @param serviceKey key in pg_service.conf + * @return translated property or the same value if translation is not needed + */ + // translate PGSERVICEFILE keys host, port, dbname + public static String translatePGServiceToPGProperty(String serviceKey) { + String testKey = "PG" + serviceKey.toUpperCase(Locale.ROOT); + if ( + PGProperty.PG_HOST.getName().equals(testKey) + || (PGProperty.PG_PORT.getName().equals(testKey)) + || (PGProperty.PG_DBNAME.getName().equals(testKey)) + ) { + return testKey; + } else { + return serviceKey; + } + } + + /** + * translate PGSERVICEFILE keys host, port, dbname + * Example: "PGHOST" becomes "host" + * + * @param propertyKey postgres property + * @return translated property or the same value if translation is not needed + */ + public static String translatePGPropertyToPGService(String propertyKey) { + if ( + PGProperty.PG_HOST.getName().equals(propertyKey) + || (PGProperty.PG_PORT.getName().equals(propertyKey)) + || (PGProperty.PG_DBNAME.getName().equals(propertyKey)) + ) { + return propertyKey.substring(2).toLowerCase(Locale.ROOT); + } else { + return propertyKey; + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGTime.java b/pgjdbc/src/main/java/org/postgresql/util/PGTime.java new file mode 100644 index 0000000..ddc8ed4 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGTime.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.sql.PreparedStatement; +import java.sql.Time; +import java.util.Calendar; + +/** + * This class augments the Java built-in Time to allow for explicit setting of the time zone. + */ +@SuppressWarnings("serial") +public class PGTime extends Time { + + /** + * The optional calendar for this time. + */ + private Calendar calendar; + + /** + * Constructs a PGTime without a time zone. + * + * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds + * before January 1, 1970, 00:00:00 GMT. + * @see Time#Time(long) + */ + public PGTime(long time) { + this(time, null); + } + + /** + * Constructs a PGTime with the given calendar object. The calendar object is + * optional. If absent, the driver will treat the time as time without time zone. + * When present, the driver will treat the time as a time with time zone using the + * TimeZone in the calendar object. Furthermore, this calendar will be used instead + * of the calendar object passed to {@link PreparedStatement#setTime(int, Time, Calendar)}. + * + * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds + * before January 1, 1970, 00:00:00 GMT. + * @param calendar the calendar object containing the time zone or null. + * @see Time#Time(long) + */ + public PGTime(long time, Calendar calendar) { + super(time); + this.calendar = calendar; + } + + /** + * Sets the calendar object for this time. + * + * @param calendar the calendar object or null. + */ + public void setCalendar(Calendar calendar) { + this.calendar = calendar; + } + + /** + * Returns the calendar object for this time. + * + * @return the calendar or null. + */ + public Calendar getCalendar() { + return calendar; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (calendar == null ? 0 : calendar.hashCode()); + return result; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + PGTime pgTime = (PGTime) o; + + return calendar != null ? calendar.equals(pgTime.calendar) : pgTime.calendar == null; + } + + @Override + public Object clone() { + PGTime clone = (PGTime) super.clone(); + Calendar calendar = getCalendar(); + if (calendar != null) { + clone.setCalendar((Calendar) calendar.clone()); + } + return clone; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGTimestamp.java b/pgjdbc/src/main/java/org/postgresql/util/PGTimestamp.java new file mode 100644 index 0000000..36deb55 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGTimestamp.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.sql.Timestamp; +import java.util.Calendar; + +/** + * This class augments the Java built-in Timestamp to allow for explicit setting of the time zone. + */ +@SuppressWarnings("serial") +public class PGTimestamp extends Timestamp { + + /** + * The optional calendar for this timestamp. + */ + private Calendar calendar; + + /** + * Constructs a PGTimestamp without a time zone. The integral seconds are stored in + * the underlying date value; the fractional seconds are stored in the nanos field of + * the Timestamp object. + * + * @param time milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number + * of milliseconds before January 1, 1970, 00:00:00 GMT. + * @see Timestamp#Timestamp(long) + */ + public PGTimestamp(long time) { + this(time, null); + } + + /** + *

Constructs a PGTimestamp with the given time zone. The integral seconds are stored + * in the underlying date value; the fractional seconds are stored in the nanos field + * of the Timestamp object.

+ * + *

The calendar object is optional. If absent, the driver will treat the timestamp as + * timestamp without time zone. When present, the driver will treat the timestamp as + * a timestamp with time zone using the TimeZone in the calendar object. + * Furthermore, this calendar will be used instead of the calendar object passed to + * {@link java.sql.PreparedStatement#setTimestamp(int, Timestamp, Calendar)}.

+ * + * @param time milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number + * of milliseconds before January 1, 1970, 00:00:00 GMT. + * @param calendar the calendar object containing the time zone or null. + * @see Timestamp#Timestamp(long) + */ + public PGTimestamp(long time, Calendar calendar) { + super(time); + this.calendar = calendar; + } + + /** + * Sets the calendar object for this timestamp. + * + * @param calendar the calendar object or null. + */ + public void setCalendar(Calendar calendar) { + this.calendar = calendar; + } + + /** + * Returns the calendar object for this timestamp. + * + * @return the calendar object or null. + */ + public Calendar getCalendar() { + return calendar; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (calendar == null ? 0 : calendar.hashCode()); + return result; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + PGTimestamp that = (PGTimestamp) o; + + return calendar != null ? calendar.equals(that.calendar) : that.calendar == null; + } + + @Override + public Object clone() { + PGTimestamp clone = (PGTimestamp) super.clone(); + Calendar calendar = getCalendar(); + if (calendar != null) { + clone.setCalendar((Calendar) calendar.clone()); + } + return clone; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGbytea.java b/pgjdbc/src/main/java/org/postgresql/util/PGbytea.java new file mode 100644 index 0000000..44cc543 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGbytea.java @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.sql.SQLException; + +/** + * Converts to and from the postgresql bytea datatype used by the backend. + */ +public class PGbytea { + private static final int MAX_3_BUFF_SIZE = 2 * 1024 * 1024; + + /** + * Lookup table for each of the valid ascii code points (offset by {@code '0'}) + * to the 4 bit numeric value. + */ + private static final int[] HEX_VALS = new int['f' + 1 - '0']; + + static { + for (int i = 0; i < 10; i++) { + HEX_VALS[i] = (byte) i; + } + for (int i = 0; i < 6; i++) { + HEX_VALS['A' + i - '0'] = (byte) (10 + i); + HEX_VALS['a' + i - '0'] = (byte) (10 + i); + } + } + + public PGbytea() { + } + + /* + * Converts a PG bytea raw value (i.e. the raw binary representation of the bytea data type) into + * a java byte[] + */ + public static byte[] toBytes(byte[] s) throws SQLException { + if (s == null) { + return null; + } + + // Starting with PG 9.0, a new hex format is supported + // that starts with "\x". Figure out which format we're + // dealing with here. + // + if (s.length < 2 || s[0] != '\\' || s[1] != 'x') { + return toBytesOctalEscaped(s); + } + return toBytesHexEscaped(s); + } + + private static byte[] toBytesHexEscaped(byte[] s) { + // first 2 bytes of s indicate the byte[] is hex encoded + // so they need to be ignored here + final int realLength = s.length - 2; + byte[] output = new byte[realLength >>> 1]; + for (int i = 0; i < realLength; i += 2) { + int val = getHex(s[2 + i]) << 4; + val |= getHex(s[3 + i]); + output[i >>> 1] = (byte) val; + } + return output; + } + + private static int getHex(byte b) { + return HEX_VALS[b - '0']; + } + + private static byte[] toBytesOctalEscaped(byte[] s) { + final int slength = s.length; + byte[] buf = null; + int correctSize = slength; + if (slength > MAX_3_BUFF_SIZE) { + // count backslash escapes, they will be either + // backslashes or an octal escape \\ or \003 + // + for (int i = 0; i < slength; i++) { + byte current = s[i]; + if (current == '\\') { + byte next = s[++i]; + if (next == '\\') { + --correctSize; + } else { + correctSize -= 3; + } + } + } + buf = new byte[correctSize]; + } else { + buf = new byte[slength]; + } + int bufpos = 0; + int thebyte; + byte nextbyte; + byte secondbyte; + for (int i = 0; i < slength; i++) { + nextbyte = s[i]; + if (nextbyte == (byte) '\\') { + secondbyte = s[++i]; + if (secondbyte == (byte) '\\') { + // escaped \ + buf[bufpos++] = (byte) '\\'; + } else { + thebyte = (secondbyte - 48) * 64 + (s[++i] - 48) * 8 + (s[++i] - 48); + if (thebyte > 127) { + thebyte -= 256; + } + buf[bufpos++] = (byte) thebyte; + } + } else { + buf[bufpos++] = nextbyte; + } + } + if (bufpos == correctSize) { + return buf; + } + byte[] result = new byte[bufpos]; + System.arraycopy(buf, 0, result, 0, bufpos); + return result; + } + + /* + * Converts a java byte[] into a PG bytea string (i.e. the text representation of the bytea data + * type) + */ + public static String toPGString(byte[] buf) { + if (buf == null) { + return null; + } + StringBuilder stringBuilder = new StringBuilder(2 * buf.length); + for (byte element : buf) { + int elementAsInt = (int) element; + if (elementAsInt < 0) { + elementAsInt = 256 + elementAsInt; + } + // we escape the same non-printable characters as the backend + // we must escape all 8bit characters otherwise when converting + // from java unicode to the db character set we may end up with + // question marks if the character set is SQL_ASCII + if (elementAsInt < 32 || elementAsInt > 126) { + // escape character with the form \000, but need two \\ because of + // the Java parser + stringBuilder.append("\\"); + stringBuilder.append((char) (((elementAsInt >> 6) & 0x3) + 48)); + stringBuilder.append((char) (((elementAsInt >> 3) & 0x7) + 48)); + stringBuilder.append((char) ((elementAsInt & 0x07) + 48)); + } else if (element == (byte) '\\') { + // escape the backslash character as \\, but need four \\\\ because + // of the Java parser + stringBuilder.append("\\\\"); + } else { + // other characters are left alone + stringBuilder.append((char) element); + } + } + return stringBuilder.toString(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGmoney.java b/pgjdbc/src/main/java/org/postgresql/util/PGmoney.java new file mode 100644 index 0000000..a8c137f --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGmoney.java @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * This implements a class that handles the PostgreSQL money and cash types. + */ +@SuppressWarnings("serial") +public class PGmoney extends PGobject implements Serializable, Cloneable { + /* + * The value of the field + */ + public double val; + + /** + * If the object represents {@code null::money} + */ + public boolean isNull; + + /** + * @param value of field + */ + public PGmoney(double value) { + this(); + val = value; + } + + @SuppressWarnings("this-escape") + public PGmoney(String value) throws SQLException { + this(); + setValue(value); + } + + /* + * Required by the driver + */ + public PGmoney() { + type = "money"; + } + + @Override + public void setValue(String s) throws SQLException { + isNull = s == null; + if (s == null) { + return; + } + try { + String s1; + boolean negative; + + negative = s.charAt(0) == '('; + + // Remove any () (for negative) & currency symbol + s1 = PGtokenizer.removePara(s).substring(1); + + // Strip out any , in currency + int pos = s1.indexOf(','); + while (pos != -1) { + s1 = s1.substring(0, pos) + s1.substring(pos + 1); + pos = s1.indexOf(','); + } + + val = Double.parseDouble(s1); + val = negative ? -val : val; + + } catch (NumberFormatException e) { + throw new PSQLException(GT.tr("Conversion of money failed."), + PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE, e); + } + } + + @Override + public int hashCode() { + if (isNull) { + return 0; + } + final int prime = 31; + int result = super.hashCode(); + long temp; + temp = Double.doubleToLongBits(val); + result = prime * result + (int) (temp ^ (temp >>> 32)); + return result; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof PGmoney) { + PGmoney p = (PGmoney) obj; + if (isNull) { + return p.isNull; + } else if (p.isNull) { + return false; + } + return val == p.val; + } + return false; + } + + @Override + public String getValue() { + if (isNull) { + return null; + } + if (val < 0) { + return "-$" + (-val); + } else { + return "$" + val; + } + } + + @Override + public Object clone() throws CloneNotSupportedException { + // squid:S2157 "Cloneables" should implement "clone + return super.clone(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGobject.java b/pgjdbc/src/main/java/org/postgresql/util/PGobject.java new file mode 100644 index 0000000..7c8f468 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGobject.java @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.Serializable; +import java.sql.SQLException; + +/** + * PGobject is a class used to describe unknown types An unknown type is any type that is unknown by + * JDBC Standards. + */ +@SuppressWarnings("serial") +public class PGobject implements Serializable, Cloneable { + protected String type; + protected String value; + + /** + * This is called by org.postgresql.Connection.getObject() to create the object. + */ + public PGobject() { + } + + /** + *

This method sets the type of this object.

+ * + *

It should not be extended by subclasses, hence it is final

+ * + * @param type a string describing the type of the object + */ + public final void setType(String type) { + this.type = type; + } + + /** + * This method sets the value of this object. It must be overridden. + * + * @param value a string representation of the value of the object + * @throws SQLException thrown if value is invalid for this type + */ + public void setValue(String value) throws SQLException { + this.value = value; + } + + /** + * As this cannot change during the life of the object, it's final. + * + * @return the type name of this object + */ + public final String getType() { + return type; + } + + /** + * This must be overridden, to return the value of the object, in the form required by + * org.postgresql. + * + * @return the value of this object + */ + public String getValue() { + return value; + } + + /** + * Returns true if the current object wraps `null` value. + * This might be helpful + * + * @return true if the current object wraps `null` value. + */ + public boolean isNull() { + return getValue() == null; + } + + /** + * This must be overridden to allow comparisons of objects. + * + * @param obj Object to compare with + * @return true if the two boxes are identical + */ + @Override + public boolean equals(Object obj) { + if (obj instanceof PGobject) { + final Object otherValue = ((PGobject) obj).getValue(); + + if (otherValue == null) { + return getValue() == null; + } + return otherValue.equals(getValue()); + } + return false; + } + + /** + * This must be overridden to allow the object to be cloned. + */ + public Object clone() throws CloneNotSupportedException { + return super.clone(); + } + + /** + * This is defined here, so user code need not override it. + * + * @return the value of this object, in the syntax expected by org.postgresql + */ + @Override + @SuppressWarnings("nullness") + public String toString() { + return getValue(); + } + + /** + * Compute hash. As equals() use only value. Return the same hash for the same value. + * + * @return Value hashcode, 0 if value is null {@link java.util.Objects#hashCode(Object)} + */ + @Override + public int hashCode() { + String value = getValue(); + return value != null ? value.hashCode() : 0; + } + + protected static boolean equals(Object a, Object b) { + return a == b || a != null && a.equals(b); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGtokenizer.java b/pgjdbc/src/main/java/org/postgresql/util/PGtokenizer.java new file mode 100644 index 0000000..d0030ce --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PGtokenizer.java @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Deque; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * This class is used to tokenize the text output of org.postgres. It's mainly used by the geometric + * classes, but is useful in parsing any output from custom data types output from org.postgresql. + * + * @see org.postgresql.geometric.PGbox + * @see org.postgresql.geometric.PGcircle + * @see org.postgresql.geometric.PGlseg + * @see org.postgresql.geometric.PGpath + * @see org.postgresql.geometric.PGpoint + * @see org.postgresql.geometric.PGpolygon + */ +public class PGtokenizer { + + private static final Map CLOSING_TO_OPENING_CHARACTER = new HashMap<>(); + + static { + CLOSING_TO_OPENING_CHARACTER.put(')', '('); + + CLOSING_TO_OPENING_CHARACTER.put(']', '['); + + CLOSING_TO_OPENING_CHARACTER.put('>', '<'); + + CLOSING_TO_OPENING_CHARACTER.put('"', '"'); + } + + // Our tokens + protected List tokens = new ArrayList<>(); + + /** + *

Create a tokeniser.

+ * + *

We could have used StringTokenizer to do this, however, we needed to handle nesting of '(' ')' + * '[' ']' '<' and '>' as these are used by the geometric data types.

+ * + * @param string containing tokens + * @param delim single character to split the tokens + */ + @SuppressWarnings("this-escape") + public PGtokenizer(String string, char delim) { + tokenize(string, delim); + } + + /** + * This resets this tokenizer with a new string and/or delimiter. + * + * @param string containing tokens + * @param delim single character to split the tokens + * @return number of tokens + */ + public int tokenize(String string, char delim) { + tokens.clear(); + + final Deque stack = new ArrayDeque<>(); + + // stack keeps track of the levels we are in the current token. + // if stack.size is > 0 then we don't split a token when delim is matched. + // + // The Geometric datatypes use this, because often a type may have others + // (usually PGpoint) embedded within a token. + // + // Peter 1998 Jan 6 - Added < and > to the nesting rules + int p; + int s; + boolean skipChar = false; + boolean nestedDoubleQuote = false; + char c = (char) 0; + for (p = 0, s = 0; p < string.length(); p++) { + c = string.charAt(p); + + // increase nesting if an open character is found + if (c == '(' || c == '[' || c == '<' || (!nestedDoubleQuote && !skipChar && c == '"')) { + stack.push(c); + if (c == '"') { + nestedDoubleQuote = true; + skipChar = true; + } + } + + // decrease nesting if a close character is found + if (c == ')' || c == ']' || c == '>' || (nestedDoubleQuote && !skipChar && c == '"')) { + + if (c == '"') { + while (!stack.isEmpty() && !Character.valueOf('"').equals(stack.peek())) { + stack.pop(); + } + nestedDoubleQuote = false; + stack.pop(); + } else { + final Character ch = CLOSING_TO_OPENING_CHARACTER.get(c); + if (!stack.isEmpty() && ch != null && ch.equals(stack.peek())) { + stack.pop(); + } + } + } + + skipChar = c == '\\'; + + if (stack.isEmpty() && c == delim) { + tokens.add(string.substring(s, p)); + s = p + 1; // +1 to skip the delimiter + } + + } + + // Don't forget the last token ;-) + if (s < string.length()) { + tokens.add(string.substring(s)); + } + + // check for last token empty + if ( s == string.length() && c == delim) { + tokens.add(""); + } + + return tokens.size(); + } + + /** + * @return the number of tokens available + */ + public int getSize() { + return tokens.size(); + } + + /** + * @param n Token number ( 0 ... getSize()-1 ) + * @return The token value + */ + public String getToken(int n) { + return tokens.get(n); + } + + /** + *

This returns a new tokenizer based on one of our tokens.

+ * + *

The geometric datatypes use this to process nested tokens (usually PGpoint).

+ * + * @param n Token number ( 0 ... getSize()-1 ) + * @param delim The delimiter to use + * @return A new instance of PGtokenizer based on the token + */ + public PGtokenizer tokenizeToken(int n, char delim) { + return new PGtokenizer(getToken(n), delim); + } + + /** + * This removes the lead/trailing strings from a string. + * + * @param s Source string + * @param l Leading string to remove + * @param t Trailing string to remove + * @return String without the lead/trailing strings + */ + public static String remove(String s, String l, String t) { + if (s.startsWith(l)) { + s = s.substring(l.length()); + } + if (s.endsWith(t)) { + s = s.substring(0, s.length() - t.length()); + } + return s; + } + + /** + * This removes the lead/trailing strings from all tokens. + * + * @param l Leading string to remove + * @param t Trailing string to remove + */ + public void remove(String l, String t) { + for (int i = 0; i < tokens.size(); i++) { + tokens.set(i, remove(tokens.get(i), l, t)); + } + } + + /** + * Removes ( and ) from the beginning and end of a string. + * + * @param s String to remove from + * @return String without the ( or ) + */ + public static String removePara(String s) { + return remove(s, "(", ")"); + } + + /** + * Removes ( and ) from the beginning and end of all tokens. + */ + public void removePara() { + remove("(", ")"); + } + + /** + * Removes [ and ] from the beginning and end of a string. + * + * @param s String to remove from + * @return String without the [ or ] + */ + public static String removeBox(String s) { + return remove(s, "[", "]"); + } + + /** + * Removes [ and ] from the beginning and end of all tokens. + */ + public void removeBox() { + remove("[", "]"); + } + + /** + * Removes < and > from the beginning and end of a string. + * + * @param s String to remove from + * @return String without the < or > + */ + public static String removeAngle(String s) { + return remove(s, "<", ">"); + } + + /** + * Removes < and > from the beginning and end of all tokens. + */ + public void removeAngle() { + remove("<", ">"); + } + + /** + * Removes curly braces { and } from the beginning and end of a string. + * + * @param s String to remove from + * @return String without the { or } + */ + public static String removeCurlyBrace(String s) { + return remove(s, "{", "}"); + } + + /** + * Removes < and > from the beginning and end of all tokens. + */ + public void removeCurlyBrace() { + remove("{", "}"); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PSQLException.java b/pgjdbc/src/main/java/org/postgresql/util/PSQLException.java new file mode 100644 index 0000000..8a1913d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PSQLException.java @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.sql.SQLException; + +@SuppressWarnings("serial") +public class PSQLException extends SQLException { + + private ServerErrorMessage serverError; + + public PSQLException(String msg, PSQLState state, Throwable cause) { + super(msg, state == null ? null : state.getState(), cause); + } + + public PSQLException(String msg, PSQLState state) { + super(msg, state == null ? null : state.getState()); + } + + public PSQLException(ServerErrorMessage serverError) { + this(serverError, true); + } + + public PSQLException(ServerErrorMessage serverError, boolean detail) { + super(detail ? serverError.toString() : serverError.getNonSensitiveErrorMessage(), serverError.getSQLState()); + this.serverError = serverError; + } + + public ServerErrorMessage getServerErrorMessage() { + return serverError; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PSQLState.java b/pgjdbc/src/main/java/org/postgresql/util/PSQLState.java new file mode 100644 index 0000000..507ce13 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PSQLState.java @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +/** + * This class is used for holding SQLState codes. + */ +public enum PSQLState { + + UNKNOWN_STATE(""), + + TOO_MANY_RESULTS("0100E"), + + NO_DATA("02000"), + + INVALID_PARAMETER_TYPE("07006"), + + /** + * We could establish a connection with the server for unknown reasons. Could be a network + * problem. + */ + CONNECTION_UNABLE_TO_CONNECT("08001"), + + CONNECTION_DOES_NOT_EXIST("08003"), + + /** + * The server rejected our connection attempt. Usually an authentication failure, but could be a + * configuration error like asking for a SSL connection with a server that wasn't built with SSL + * support. + */ + CONNECTION_REJECTED("08004"), + + /** + * After a connection has been established, it went bad. + */ + CONNECTION_FAILURE("08006"), + CONNECTION_FAILURE_DURING_TRANSACTION("08007"), + + /** + * The server sent us a response the driver was not prepared for and is either bizarre datastream + * corruption, a driver bug, or a protocol violation on the server's part. + */ + PROTOCOL_VIOLATION("08P01"), + + COMMUNICATION_ERROR("08S01"), + + NOT_IMPLEMENTED("0A000"), + + DATA_ERROR("22000"), + STRING_DATA_RIGHT_TRUNCATION("22001"), + NUMERIC_VALUE_OUT_OF_RANGE("22003"), + BAD_DATETIME_FORMAT("22007"), + DATETIME_OVERFLOW("22008"), + DIVISION_BY_ZERO("22012"), + MOST_SPECIFIC_TYPE_DOES_NOT_MATCH("2200G"), + INVALID_PARAMETER_VALUE("22023"), + + NOT_NULL_VIOLATION("23502"), + FOREIGN_KEY_VIOLATION("23503"), + UNIQUE_VIOLATION("23505"), + CHECK_VIOLATION("23514"), + EXCLUSION_VIOLATION("23P01"), + + INVALID_CURSOR_STATE("24000"), + + TRANSACTION_STATE_INVALID("25000"), + ACTIVE_SQL_TRANSACTION("25001"), + NO_ACTIVE_SQL_TRANSACTION("25P01"), + IN_FAILED_SQL_TRANSACTION("25P02"), + + INVALID_SQL_STATEMENT_NAME("26000"), + INVALID_AUTHORIZATION_SPECIFICATION("28000"), + INVALID_PASSWORD("28P01"), + + INVALID_TRANSACTION_TERMINATION("2D000"), + + STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL("2F003"), + + INVALID_SAVEPOINT_SPECIFICATION("3B000"), + + SERIALIZATION_FAILURE("40001"), + DEADLOCK_DETECTED("40P01"), + SYNTAX_ERROR("42601"), + UNDEFINED_COLUMN("42703"), + UNDEFINED_OBJECT("42704"), + WRONG_OBJECT_TYPE("42809"), + NUMERIC_CONSTANT_OUT_OF_RANGE("42820"), + DATA_TYPE_MISMATCH("42821"), + UNDEFINED_FUNCTION("42883"), + INVALID_NAME("42602"), + DATATYPE_MISMATCH("42804"), + CANNOT_COERCE("42846"), + UNDEFINED_TABLE("42P01"), + + OUT_OF_MEMORY("53200"), + OBJECT_NOT_IN_STATE("55000"), + OBJECT_IN_USE("55006"), + + QUERY_CANCELED("57014"), + + SYSTEM_ERROR("60000"), + IO_ERROR("58030"), + + UNEXPECTED_ERROR("99999"); + + private final String state; + + PSQLState(String state) { + this.state = state; + } + + public String getState() { + return this.state; + } + + public static boolean isConnectionError(String psqlState) { + return PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState().equals(psqlState) + || PSQLState.CONNECTION_DOES_NOT_EXIST.getState().equals(psqlState) + || PSQLState.CONNECTION_REJECTED.getState().equals(psqlState) + || PSQLState.CONNECTION_FAILURE.getState().equals(psqlState) + || PSQLState.CONNECTION_FAILURE_DURING_TRANSACTION.getState().equals(psqlState); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PSQLWarning.java b/pgjdbc/src/main/java/org/postgresql/util/PSQLWarning.java new file mode 100644 index 0000000..3f45d7d --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PSQLWarning.java @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.sql.SQLWarning; + +@SuppressWarnings("serial") +public class PSQLWarning extends SQLWarning { + + private final ServerErrorMessage serverError; + + public PSQLWarning(ServerErrorMessage err) { + super(err.toString(), err.getSQLState()); + this.serverError = err; + } + + @Override + public String getMessage() { + return serverError.getMessage(); + } + + public ServerErrorMessage getServerErrorMessage() { + return serverError; + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/PasswordUtil.java b/pgjdbc/src/main/java/org/postgresql/util/PasswordUtil.java new file mode 100644 index 0000000..77301c5 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/PasswordUtil.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.core.Utils; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Objects; + +public class PasswordUtil { + private static final int DEFAULT_ITERATIONS = 4096; + private static final int DEFAULT_SALT_LENGTH = 16; + + private static class SecureRandomHolder { + static final SecureRandom INSTANCE = new SecureRandom(); + } + + public PasswordUtil() { + } + + private static SecureRandom getSecureRandom() { + return SecureRandomHolder.INSTANCE; + } + + /** + * Encode the given password for use with md5 authentication. The PostgreSQL + * server uses the username as the per-user salt so that must also be provided. + * The return value of this method is the literal text that may be used when + * creating or modifying a user with the given password without the surrounding + * single quotes. + * + * @param user The username of the database user + * @param password The plain text of the user's password. The implementation will zero out the + * array after use + * @return The text representation of the password encrypted for md5 + * authentication. + * @deprecated prefer {@link org.postgresql.PGConnection#alterUserPassword(String, char[], String)} + * for better security. + */ + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") + public static String encodeMd5(String user, char[] password) { + Objects.requireNonNull(user, "user"); + Objects.requireNonNull(password, "password"); + ByteBuffer passwordBytes = null; + try { + passwordBytes = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password)); + byte[] userBytes = user.getBytes(StandardCharsets.UTF_8); + final MessageDigest md = MessageDigest.getInstance("MD5"); + + md.update(passwordBytes); + md.update(userBytes); + byte[] digest = md.digest(); // 16-byte MD5 + + final byte[] encodedPassword = new byte[35]; // 3 + 2 x 16 + encodedPassword[0] = (byte) 'm'; + encodedPassword[1] = (byte) 'd'; + encodedPassword[2] = (byte) '5'; + MD5Digest.bytesToHex(digest, encodedPassword, 3); + + return new String(encodedPassword, StandardCharsets.UTF_8); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException("Unable to encode password with MD5", e); + } finally { + Arrays.fill(password, (char) 0); + if (passwordBytes != null) { + if (passwordBytes.hasArray()) { + Arrays.fill(passwordBytes.array(), (byte) 0); + } else { + int limit = passwordBytes.limit(); + for (int i = 0; i < limit; i++) { + passwordBytes.put(i, (byte) 0); + } + } + } + } + } + + /** + * Encode the given password for the specified encryption type. + * The word "encryption" is used here to match the verbiage in the PostgreSQL + * server, i.e. the "password_encryption" setting. In reality, a cryptographic + * digest / HMAC operation is being performed. + * The database user is only required for the md5 encryption type. + * + * @param user The username of the database user + * @param password The plain text of the user's password. The implementation will zero + * out the array after use + * @param encryptionType The encryption type for which to encode the user's + * password. This should match the database's supported + * methods and value of the password_encryption setting. + * @return The encoded password + * @throws SQLException If an error occurs encoding the password + */ + public static String encodePassword(String user, char[] password, String encryptionType) + throws SQLException { + Objects.requireNonNull(password, "password"); + Objects.requireNonNull(encryptionType, "encryptionType"); + switch (encryptionType) { + case "on": + case "off": + case "md5": + return encodeMd5(user, password); + } + // If we get here then it's an unhandled encryption type so we must wipe the array ourselves + Arrays.fill(password, (char) 0); + throw new PSQLException("Unable to determine encryption type: " + encryptionType, PSQLState.SYSTEM_ERROR); + } + + /** + * Generate the SQL statement to alter a user's password using the given + * encryption. + * All other encryption settings for the password will use the driver's + * defaults. + * + * @param user The username of the database user + * @param password The plain text of the user's password. The implementation will zero + * out the array after use + * @param encryptionType The encryption type of the password + * @return An SQL statement that may be executed to change the user's password + * @throws SQLException If an error occurs encoding the password + */ + public static String genAlterUserPasswordSQL(String user, char[] password, String encryptionType) + throws SQLException { + try { + String encodedPassword = encodePassword(user, password, encryptionType); + StringBuilder sb = new StringBuilder(); + sb.append("ALTER USER "); + Utils.escapeIdentifier(sb, user); + sb.append(" PASSWORD '"); + // The choice of true / false for standard conforming strings does not matter + // here as the value being escaped is generated by us and known to be hex + // characters for all of the implemented password encryption methods. + Utils.escapeLiteral(sb, encodedPassword, true); + sb.append("'"); + return sb.toString(); + } finally { + Arrays.fill(password, (char) 0); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ReaderInputStream.java b/pgjdbc/src/main/java/org/postgresql/util/ReaderInputStream.java new file mode 100644 index 0000000..0227bf5 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ReaderInputStream.java @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CoderResult; +import java.nio.charset.StandardCharsets; + +/** + *

ReaderInputStream accepts a UTF-16 char stream (Reader) as input and + * converts it to a UTF-8 byte stream (InputStream) as output.

+ * + *

This is the inverse of java.io.InputStreamReader which converts a + * binary stream to a character stream.

+ */ +public class ReaderInputStream extends InputStream { + private static final int DEFAULT_CHAR_BUFFER_SIZE = 8 * 1024; + + private final Reader reader; + private final CharsetEncoder encoder; + private final ByteBuffer bbuf; + private final CharBuffer cbuf; + + /** + * true when all of the characters have been read from the reader into inbuf. + */ + private boolean endOfInput; + private final byte[] oneByte = new byte[1]; + + public ReaderInputStream(Reader reader) { + this(reader, DEFAULT_CHAR_BUFFER_SIZE); + } + + /** + * Allow ReaderInputStreamTest to use small buffers to force UTF-16 + * surrogate pairs to cross buffer boundaries in interesting ways. + * Because this constructor is package-private, the unit test must be in + * the same package. + */ + ReaderInputStream(Reader reader, int charBufferSize) { + if (reader == null) { + throw new IllegalArgumentException("reader cannot be null"); + } + + // The standard UTF-8 encoder will only encode a UTF-16 surrogate pair + // when both surrogates are available in the CharBuffer. + if (charBufferSize < 2) { + throw new IllegalArgumentException("charBufferSize must be at least 2 chars"); + } + + this.reader = reader; + this.encoder = StandardCharsets.UTF_8.newEncoder(); + // encoder.maxBytesPerChar() always returns 3.0 for UTF-8 + this.bbuf = ByteBuffer.allocate(3 * charBufferSize); + this.bbuf.flip(); // prepare for subsequent write + this.cbuf = CharBuffer.allocate(charBufferSize); + this.cbuf.flip(); // prepare for subsequent write + } + + private void advance() throws IOException { + assert !endOfInput; + assert !bbuf.hasRemaining() + : "advance() should be called when output byte buffer is empty. bbuf: " + bbuf + ", as string: " + bbuf.asCharBuffer().toString(); + assert cbuf.remaining() < 2; + + // given that bbuf.capacity = 3 x cbuf.capacity, the only time that we should have a + // remaining char is if the last char read was the 1st half of a surrogate pair + if (cbuf.remaining() == 0) { + cbuf.clear(); + } else { + cbuf.compact(); + } + + int n = reader.read(cbuf); // read #1 + cbuf.flip(); + + CoderResult result; + + endOfInput = n == -1; + + bbuf.clear(); + result = encoder.encode(cbuf, bbuf, endOfInput); + checkEncodeResult(result); + + if (endOfInput) { + result = encoder.flush(bbuf); + checkEncodeResult(result); + } + + bbuf.flip(); + } + + private void checkEncodeResult(CoderResult result) throws CharacterCodingException { + if (result.isError()) { + result.throwException(); + } + } + + @Override + public int read() throws IOException { + int res = 0; + while (res != -1) { + res = read(oneByte); + if (res > 0) { + return oneByte[0] & 0xFF; + } + } + return -1; + } + + // The implementation of InputStream.read(byte[], int, int) silently ignores + // an IOException thrown by overrides of the read() method. + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + if (endOfInput && !bbuf.hasRemaining()) { + return -1; + } + + int totalRead = 0; + while (len > 0 && !endOfInput) { + if (bbuf.hasRemaining()) { + int remaining = Math.min(len, bbuf.remaining()); + bbuf.get(b, off, remaining); + totalRead += remaining; + off += remaining; + len -= remaining; + if (len == 0) { + return totalRead; + } + } + advance(); + } + if (endOfInput && !bbuf.hasRemaining() && totalRead == 0) { + return -1; + } + return totalRead; + } + + @Override + public void close() throws IOException { + endOfInput = true; + reader.close(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/ServerErrorMessage.java b/pgjdbc/src/main/java/org/postgresql/util/ServerErrorMessage.java new file mode 100644 index 0000000..6b60de5 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/ServerErrorMessage.java @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.core.EncodingPredictor; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + +@SuppressWarnings("serial") +public class ServerErrorMessage implements Serializable { + private static final Logger LOGGER = Logger.getLogger(ServerErrorMessage.class.getName()); + + private static final Character SEVERITY = 'S'; + private static final Character MESSAGE = 'M'; + private static final Character DETAIL = 'D'; + private static final Character HINT = 'H'; + private static final Character POSITION = 'P'; + private static final Character WHERE = 'W'; + private static final Character FILE = 'F'; + private static final Character LINE = 'L'; + private static final Character ROUTINE = 'R'; + private static final Character SQLSTATE = 'C'; + private static final Character INTERNAL_POSITION = 'p'; + private static final Character INTERNAL_QUERY = 'q'; + private static final Character SCHEMA = 's'; + private static final Character TABLE = 't'; + private static final Character COLUMN = 'c'; + private static final Character DATATYPE = 'd'; + private static final Character CONSTRAINT = 'n'; + + private final Map mesgParts = new HashMap<>(); + + public ServerErrorMessage(EncodingPredictor.DecodeResult serverError) { + this(serverError.result); + if (serverError.encoding != null) { + mesgParts.put(MESSAGE, mesgParts.get(MESSAGE) + + GT.tr(" (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)", + serverError.encoding) + ); + } + } + + public ServerErrorMessage(String serverError) { + char[] chars = serverError.toCharArray(); + int pos = 0; + int length = chars.length; + while (pos < length) { + char mesgType = chars[pos]; + if (mesgType != '\0') { + pos++; + int startString = pos; + // order here is important position must be checked before accessing the array + while (pos < length && chars[pos] != '\0') { + pos++; + } + String mesgPart = new String(chars, startString, pos - startString); + mesgParts.put(mesgType, mesgPart); + } + pos++; + } + } + + public String getSQLState() { + return mesgParts.get(SQLSTATE); + } + + public String getMessage() { + return mesgParts.get(MESSAGE); + } + + public String getSeverity() { + return mesgParts.get(SEVERITY); + } + + public String getDetail() { + return mesgParts.get(DETAIL); + } + + public String getHint() { + return mesgParts.get(HINT); + } + + public int getPosition() { + return getIntegerPart(POSITION); + } + + public String getWhere() { + return mesgParts.get(WHERE); + } + + public String getSchema() { + return mesgParts.get(SCHEMA); + } + + public String getTable() { + return mesgParts.get(TABLE); + } + + public String getColumn() { + return mesgParts.get(COLUMN); + } + + public String getDatatype() { + return mesgParts.get(DATATYPE); + } + + public String getConstraint() { + return mesgParts.get(CONSTRAINT); + } + + public String getFile() { + return mesgParts.get(FILE); + } + + public int getLine() { + return getIntegerPart(LINE); + } + + public String getRoutine() { + return mesgParts.get(ROUTINE); + } + + public String getInternalQuery() { + return mesgParts.get(INTERNAL_QUERY); + } + + public int getInternalPosition() { + return getIntegerPart(INTERNAL_POSITION); + } + + private int getIntegerPart(Character c) { + String s = mesgParts.get(c); + if (s == null) { + return 0; + } + return Integer.parseInt(s); + } + + String getNonSensitiveErrorMessage() { + StringBuilder totalMessage = new StringBuilder(); + String message = mesgParts.get(SEVERITY); + if (message != null) { + totalMessage.append(message).append(": "); + } + message = mesgParts.get(MESSAGE); + if (message != null) { + totalMessage.append(message); + } + return totalMessage.toString(); + } + + @Override + public String toString() { + // Now construct the message from what the server sent + // The general format is: + // SEVERITY: Message \n + // Detail: \n + // Hint: \n + // Position: \n + // Where: \n + // Internal Query: \n + // Internal Position: \n + // Location: File:Line:Routine \n + // SQLState: \n + // + // Normally only the message and detail is included. + // If INFO level logging is enabled then detail, hint, position and where are + // included. If DEBUG level logging is enabled then all information + // is included. + + StringBuilder totalMessage = new StringBuilder(); + String message = mesgParts.get(SEVERITY); + if (message != null) { + totalMessage.append(message).append(": "); + } + message = mesgParts.get(MESSAGE); + if (message != null) { + totalMessage.append(message); + } + message = mesgParts.get(DETAIL); + if (message != null) { + totalMessage.append("\n ").append(GT.tr("Detail: {0}", message)); + } + + message = mesgParts.get(HINT); + if (message != null) { + totalMessage.append("\n ").append(GT.tr("Hint: {0}", message)); + } + message = mesgParts.get(POSITION); + if (message != null) { + totalMessage.append("\n ").append(GT.tr("Position: {0}", message)); + } + message = mesgParts.get(WHERE); + if (message != null) { + totalMessage.append("\n ").append(GT.tr("Where: {0}", message)); + } + + if (LOGGER.isLoggable(Level.FINEST)) { + String internalQuery = mesgParts.get(INTERNAL_QUERY); + if (internalQuery != null) { + totalMessage.append("\n ").append(GT.tr("Internal Query: {0}", internalQuery)); + } + String internalPosition = mesgParts.get(INTERNAL_POSITION); + if (internalPosition != null) { + totalMessage.append("\n ").append(GT.tr("Internal Position: {0}", internalPosition)); + } + + String file = mesgParts.get(FILE); + String line = mesgParts.get(LINE); + String routine = mesgParts.get(ROUTINE); + if (file != null || line != null || routine != null) { + totalMessage.append("\n ").append(GT.tr("Location: File: {0}, Routine: {1}, Line: {2}", + file, routine, line)); + } + message = mesgParts.get(SQLSTATE); + if (message != null) { + totalMessage.append("\n ").append(GT.tr("Server SQLState: {0}", message)); + } + } + + return totalMessage.toString(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/SharedTimer.java b/pgjdbc/src/main/java/org/postgresql/util/SharedTimer.java new file mode 100644 index 0000000..b96fdc8 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/SharedTimer.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.postgresql.jdbc.ResourceLock; + +import java.util.Timer; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; + +@SuppressWarnings("try") +public class SharedTimer { + static class TimerCleanup implements LazyCleaner.CleaningAction { + private final Timer timer; + + TimerCleanup(Timer timer) { + this.timer = timer; + } + + @Override + public void onClean(boolean leak) throws RuntimeException { + timer.cancel(); + } + } + + // Incremented for each Timer created, this allows each to have a unique Timer name + private static final AtomicInteger timerCount = new AtomicInteger(0); + + private static final Logger LOGGER = Logger.getLogger(SharedTimer.class.getName()); + private volatile Timer timer; + private final AtomicInteger refCount = new AtomicInteger(0); + private final ResourceLock lock = new ResourceLock(); + private LazyCleaner.Cleanable timerCleanup; + + public SharedTimer() { + } + + public int getRefCount() { + return refCount.get(); + } + + public Timer getTimer() { + try (ResourceLock ignore = lock.obtain()) { + Timer timer = this.timer; + if (timer == null) { + int index = timerCount.incrementAndGet(); + + /* + Temporarily switch contextClassLoader to the one that loaded this driver to avoid TimerThread preventing current + contextClassLoader - which may be the ClassLoader of a web application - from being GC:ed. + */ + final ClassLoader prevContextCL = Thread.currentThread().getContextClassLoader(); + try { + /* + Scheduled tasks should not need to use .getContextClassLoader, so we just reset it to null + */ + Thread.currentThread().setContextClassLoader(null); + + this.timer = timer = new Timer("PostgreSQL-JDBC-SharedTimer-" + index, true); + this.timerCleanup = LazyCleaner.getInstance().register(refCount, new TimerCleanup(timer)); + } finally { + Thread.currentThread().setContextClassLoader(prevContextCL); + } + } + refCount.incrementAndGet(); + return timer; + } + } + + public void releaseTimer() { + try (ResourceLock ignore = lock.obtain()) { + int count = refCount.decrementAndGet(); + if (count > 0) { + // There are outstanding references to the timer so do nothing + LOGGER.log(Level.FINEST, "Outstanding references still exist so not closing shared Timer"); + } else if (count == 0) { + // This is the last usage of the Timer so cancel it so it's resources can be release. + LOGGER.log(Level.FINEST, "No outstanding references to shared Timer, will cancel and close it"); + if (timerCleanup != null) { + timerCleanup.clean(); + timer = null; + timerCleanup = null; + } + } else { + // Should not get here under normal circumstance, probably a bug in app code. + LOGGER.log(Level.WARNING, + "releaseTimer() called too many times; there is probably a bug in the calling code"); + refCount.set(0); + } + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/StreamWrapper.java b/pgjdbc/src/main/java/org/postgresql/util/StreamWrapper.java new file mode 100644 index 0000000..1d3961c --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/StreamWrapper.java @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ +// Copyright (c) 2004, Open Cloud Limited. + +package org.postgresql.util; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * Wrapper around a length-limited InputStream. + * + * @author Oliver Jowett (oliver@opencloud.com) + */ +public final class StreamWrapper implements Closeable { + + private static final int MAX_MEMORY_BUFFER_BYTES = 51200; + + private static final String TEMP_FILE_PREFIX = "postgres-pgjdbc-stream"; + + public StreamWrapper(byte[] data, int offset, int length) { + this.stream = null; + this.rawData = data; + this.offset = offset; + this.length = length; + } + + public StreamWrapper(InputStream stream, int length) { + this.stream = stream; + this.rawData = null; + this.offset = 0; + this.length = length; + } + + public StreamWrapper(InputStream stream) throws PSQLException { + try { + ByteArrayOutputStream memoryOutputStream = new ByteArrayOutputStream(); + final int memoryLength = copyStream(stream, memoryOutputStream, MAX_MEMORY_BUFFER_BYTES); + byte[] rawData = memoryOutputStream.toByteArray(); + + if (memoryLength == -1) { + final int diskLength; + final Path tempFile = Files.createTempFile(TEMP_FILE_PREFIX, ".tmp"); + try (OutputStream diskOutputStream = Files.newOutputStream(tempFile)) { + diskOutputStream.write(rawData); + diskLength = copyStream(stream, diskOutputStream, Integer.MAX_VALUE - rawData.length); + if (diskLength == -1) { + throw new PSQLException(GT.tr("Object is too large to send over the protocol."), + PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE); + } + } catch (RuntimeException | Error | PSQLException e) { + try { + tempFile.toFile().delete(); + } catch (Throwable ignore) { + } + throw e; + } + // The finalize action is not created if the above code throws + this.offset = 0; + this.length = rawData.length + diskLength; + this.rawData = null; + this.stream = null; // The stream is opened on demand + TempFileHolder tempFileHolder = new TempFileHolder(tempFile); + this.tempFileHolder = tempFileHolder; + cleaner = LazyCleaner.getInstance().register(leakHandle, tempFileHolder); + } else { + this.rawData = rawData; + this.stream = null; + this.offset = 0; + this.length = rawData.length; + } + } catch (IOException e) { + throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."), + PSQLState.IO_ERROR, e); + } + } + + public InputStream getStream() throws IOException { + if (stream != null) { + return stream; + } + TempFileHolder finalizeAction = this.tempFileHolder; + if (finalizeAction != null) { + return finalizeAction.getStream(); + } + + return new ByteArrayInputStream(rawData, offset, length); + } + + @Override + public void close() throws IOException { + if (cleaner != null) { + cleaner.clean(); + } + } + + public int getLength() { + return length; + } + + public int getOffset() { + return offset; + } + + public byte [] getBytes() { + return rawData; + } + + @Override + public String toString() { + return ""; + } + + private static int copyStream(InputStream inputStream, OutputStream outputStream, int limit) + throws IOException { + int totalLength = 0; + byte[] buffer = new byte[2048]; + int readLength = inputStream.read(buffer); + while (readLength > 0) { + totalLength += readLength; + outputStream.write(buffer, 0, readLength); + if (totalLength >= limit) { + return -1; + } + readLength = inputStream.read(buffer); + } + return totalLength; + } + + private final InputStream stream; + private TempFileHolder tempFileHolder; + private final Object leakHandle = new Object(); + private LazyCleaner.Cleanable cleaner; + private final byte [] rawData; + private final int offset; + private final int length; +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/TempFileHolder.java b/pgjdbc/src/main/java/org/postgresql/util/TempFileHolder.java new file mode 100644 index 0000000..367ae90 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/TempFileHolder.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * The action deletes temporary file in case the user submits a large input stream, + * and then abandons the statement. + */ +class TempFileHolder implements LazyCleaner.CleaningAction { + + private static final Logger LOGGER = Logger.getLogger(StreamWrapper.class.getName()); + private InputStream stream; + private Path tempFile; + + TempFileHolder(Path tempFile) { + this.tempFile = tempFile; + } + + public InputStream getStream() throws IOException { + InputStream stream = this.stream; + if (stream == null) { + stream = Files.newInputStream(tempFile); + this.stream = stream; + } + return stream; + } + + @Override + public void onClean(boolean leak) throws IOException { + if (leak) { + LOGGER.log(Level.WARNING, GT.tr("StreamWrapper leak detected StreamWrapper.close() was not called. ")); + } + Path tempFile = this.tempFile; + if (tempFile != null) { + tempFile.toFile().delete(); + this.tempFile = null; + } + InputStream stream = this.stream; + if (stream != null) { + stream.close(); + this.stream = null; + } + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/util/URLCoder.java b/pgjdbc/src/main/java/org/postgresql/util/URLCoder.java new file mode 100644 index 0000000..d92ab94 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/util/URLCoder.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.net.URLEncoder; + +/** + *

This class helps with URL encoding and decoding. UTF-8 encoding is used by default to make + * encoding consistent across the driver, and encoding might be changed via {@code + * postgresql.url.encoding} property

+ * + *

Note: this should not be used outside of PostgreSQL source, this is not a public API of the + * driver.

+ */ +public final class URLCoder { + private static final String ENCODING_FOR_URL = + System.getProperty("postgresql.url.encoding", "UTF-8"); + + + public URLCoder() { + } + + /** + * Decodes {@code x-www-form-urlencoded} string into Java string. + * + * @param encoded encoded value + * @return decoded value + * @see URLDecoder#decode(String, String) + */ + public static String decode(String encoded) { + try { + return URLDecoder.decode(encoded, ENCODING_FOR_URL); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException( + "Unable to decode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e); + } + } + + /** + * Encodes Java string into {@code x-www-form-urlencoded} format + * + * @param plain input value + * @return encoded value + * @see URLEncoder#encode(String, String) + */ + public static String encode(String plain) { + try { + return URLEncoder.encode(plain, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException( + "Unable to encode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e); + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXAConnection.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXAConnection.java new file mode 100644 index 0000000..b785722 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXAConnection.java @@ -0,0 +1,688 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xa; + +import org.postgresql.PGConnection; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.TransactionState; +import org.postgresql.ds.PGPooledConnection; +import org.postgresql.util.GT; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.LinkedList; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.sql.XAConnection; +import javax.transaction.xa.XAException; +import javax.transaction.xa.XAResource; +import javax.transaction.xa.Xid; + +/** + *

The PostgreSQL implementation of {@link XAResource}.

+ * + *

This implementation doesn't support transaction interleaving (see JTA specification, section + * 3.4.4) and suspend/resume.

+ * + *

Two-phase commit requires PostgreSQL server version 8.1 or higher.

+ * + * @author Heikki Linnakangas (heikki.linnakangas@iki.fi) + */ +public class PGXAConnection extends PGPooledConnection implements XAConnection, XAResource { + + private static final Logger LOGGER = Logger.getLogger(PGXAConnection.class.getName()); + + /** + * Underlying physical database connection. It's used for issuing PREPARE TRANSACTION/ COMMIT + * PREPARED/ROLLBACK PREPARED commands. + */ + private final BaseConnection conn; + + private Xid currentXid; + + private State state; + private Xid preparedXid; + private boolean committedOrRolledBack; + + /* + * When an XA transaction is started, we put the underlying connection into non-autocommit mode. + * The old setting is saved in localAutoCommitMode, so that we can restore it when the XA + * transaction ends and the connection returns into local transaction mode. + */ + private boolean localAutoCommitMode = true; + + private void debug(String s) { + if (LOGGER.isLoggable(Level.FINEST)) { + LOGGER.log(Level.FINEST, "XAResource {0}: {1}", new Object[]{Integer.toHexString(this.hashCode()), s}); + } + } + + public PGXAConnection(BaseConnection conn) throws SQLException { + super(conn, true, true); + this.conn = conn; + this.state = State.IDLE; + } + + /** + * XAConnection interface. + */ + @SuppressWarnings("rawtypes") + @Override + public Connection getConnection() throws SQLException { + Connection conn = super.getConnection(); + + // When we're outside an XA transaction, autocommit + // is supposed to be true, per usual JDBC convention. + // When an XA transaction is in progress, it should be + // false. + if (state == State.IDLE) { + conn.setAutoCommit(true); + } + + /* + * Wrap the connection in a proxy to forbid application from fiddling with transaction state + * directly during an XA transaction + */ + ConnectionHandler handler = new ConnectionHandler(conn); + return (Connection) Proxy.newProxyInstance(getClass().getClassLoader(), + new Class[]{Connection.class, PGConnection.class}, handler); + } + + @Override + public XAResource getXAResource() { + return this; + } + + /* + * A java.sql.Connection proxy class to forbid calls to transaction control methods while the + * connection is used for an XA transaction. + */ + private class ConnectionHandler implements InvocationHandler { + private final Connection con; + + ConnectionHandler(Connection con) { + this.con = con; + } + + @Override + @SuppressWarnings("throwing.nullable") + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + if (state != State.IDLE) { + String methodName = method.getName(); + if ("commit".equals(methodName) + || "rollback".equals(methodName) + || "setSavePoint".equals(methodName) + || ("setAutoCommit".equals(methodName) && (Boolean) args[0])) { + throw new PSQLException( + GT.tr( + "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active."), + PSQLState.OBJECT_NOT_IN_STATE); + } + } + try { + /* + * If the argument to equals-method is also a wrapper, present the original unwrapped + * connection to the underlying equals method. + */ + if ("equals".equals(method.getName()) && args.length == 1) { + Object arg = args[0]; + if (arg != null && Proxy.isProxyClass(arg.getClass())) { + InvocationHandler h = Proxy.getInvocationHandler(arg); + if (h instanceof ConnectionHandler) { + // unwrap argument + args = new Object[]{((ConnectionHandler) h).con}; + } + } + } + + return method.invoke(con, args); + } catch (InvocationTargetException ex) { + throw ex.getTargetException(); + } + } + } + + /** + *

Preconditions:

+ *
    + *
  1. Flags must be one of TMNOFLAGS, TMRESUME or TMJOIN
  2. + *
  3. xid != null
  4. + *
  5. Connection must not be associated with a transaction
  6. + *
  7. The TM hasn't seen the xid before
  8. + *
+ * + *

Implementation deficiency preconditions:

+ *
    + *
  1. TMRESUME not supported.
  2. + *
  3. If flags is TMJOIN, we must be in ended state, and xid must be the current transaction
  4. + *
  5. Unless flags is TMJOIN, previous transaction using the connection must be committed or prepared or rolled + * back
  6. + *
+ * + *

Postconditions:

+ *
    + *
  1. Connection is associated with the transaction
  2. + *
+ */ + @Override + public void start(Xid xid, int flags) throws XAException { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("starting transaction xid = " + xid); + } + + // Check preconditions + if (flags != XAResource.TMNOFLAGS && flags != XAResource.TMRESUME + && flags != XAResource.TMJOIN) { + throw new PGXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL); + } + + if (xid == null) { + throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL); + } + + if (state == State.ACTIVE) { + throw new PGXAException(GT.tr("Connection is busy with another transaction"), + XAException.XAER_PROTO); + } + + // We can't check precondition 4 easily, so we don't. Duplicate xid will be catched in prepare + // phase. + + // Check implementation deficiency preconditions + if (flags == TMRESUME) { + throw new PGXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR); + } + + // It's ok to join an ended transaction. WebLogic does that. + if (flags == TMJOIN) { + if (state != State.ENDED) { + throw new PGXAException( + GT.tr( + "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", + xid, currentXid, state, flags), XAException.XAER_RMERR); + } + + if (!xid.equals(currentXid)) { + throw new PGXAException( + GT.tr( + "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", + xid, currentXid, state, flags), XAException.XAER_RMERR); + } + } else if (state == State.ENDED) { + throw new PGXAException(GT.tr("Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", xid, currentXid, state, flags), + XAException.XAER_RMERR); + } + + // Only need save localAutoCommitMode for NOFLAGS, TMRESUME and TMJOIN already saved old + // localAutoCommitMode. + if (flags == TMNOFLAGS) { + try { + localAutoCommitMode = conn.getAutoCommit(); + conn.setAutoCommit(false); + } catch (SQLException ex) { + throw new PGXAException(GT.tr("Error disabling autocommit"), ex, XAException.XAER_RMERR); + } + } + + // Preconditions are met, Associate connection with the transaction + state = State.ACTIVE; + currentXid = xid; + preparedXid = null; + committedOrRolledBack = false; + } + + /** + *

Preconditions:

+ *
    + *
  1. Flags is one of TMSUCCESS, TMFAIL, TMSUSPEND
  2. + *
  3. xid != null
  4. + *
  5. Connection is associated with transaction xid
  6. + *
+ * + *

Implementation deficiency preconditions:

+ *
    + *
  1. Flags is not TMSUSPEND
  2. + *
+ * + *

Postconditions:

+ *
    + *
  1. Connection is disassociated from the transaction.
  2. + *
+ */ + @Override + public void end(Xid xid, int flags) throws XAException { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("ending transaction xid = " + xid); + } + + // Check preconditions + + if (flags != XAResource.TMSUSPEND && flags != XAResource.TMFAIL + && flags != XAResource.TMSUCCESS) { + throw new PGXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL); + } + + if (xid == null) { + throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL); + } + + if (state != State.ACTIVE || !xid.equals(currentXid)) { + throw new PGXAException(GT.tr("tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}", state, xid, currentXid, preparedXid), + XAException.XAER_PROTO); + } + + // Check implementation deficiency preconditions + if (flags == XAResource.TMSUSPEND) { + throw new PGXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR); + } + + // We ignore TMFAIL. It's just a hint to the RM. We could roll back immediately + // if TMFAIL was given. + + // All clear. We don't have any real work to do. + state = State.ENDED; + } + + /** + *

Prepares transaction. Preconditions:

+ *
    + *
  1. xid != null
  2. + *
  3. xid is in ended state
  4. + *
+ * + *

Implementation deficiency preconditions:

+ *
    + *
  1. xid was associated with this connection
  2. + *
+ * + *

Postconditions:

+ *
    + *
  1. Transaction is prepared
  2. + *
+ */ + @Override + public int prepare(Xid xid) throws XAException { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("preparing transaction xid = " + xid); + } + + // Check preconditions + if (currentXid == null && preparedXid != null) { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("Prepare xid " + xid + " but current connection is not attached to a transaction" + + " while it was prepared in past with prepared xid " + preparedXid); + } + throw new PGXAException(GT.tr( + "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}", preparedXid, xid), XAException.XAER_PROTO); + } else if (currentXid == null) { + throw new PGXAException(GT.tr( + "Current connection does not have an associated xid. prepare xid={0}", xid), XAException.XAER_NOTA); + } + if (!currentXid.equals(xid)) { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("Error to prepare xid " + xid + ", the current connection already bound with xid " + currentXid); + } + throw new PGXAException(GT.tr( + "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}", currentXid, xid), + XAException.XAER_RMERR); + } + if (state != State.ENDED) { + throw new PGXAException(GT.tr("Prepare called before end. prepare xid={0}, state={1}", xid), XAException.XAER_INVAL); + } + + state = State.IDLE; + preparedXid = currentXid; + currentXid = null; + + try { + String s = RecoveredXid.xidToString(xid); + + Statement stmt = conn.createStatement(); + try { + stmt.executeUpdate("PREPARE TRANSACTION '" + s + "'"); + } finally { + stmt.close(); + } + conn.setAutoCommit(localAutoCommitMode); + + return XA_OK; + } catch (SQLException ex) { + throw new PGXAException(GT.tr("Error preparing transaction. prepare xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex)); + } + } + + /** + *

Recovers transaction. Preconditions:

+ *
    + *
  1. flag must be one of TMSTARTRSCAN, TMENDRSCAN, TMNOFLAGS or TMSTARTTRSCAN | TMENDRSCAN
  2. + *
  3. If flag isn't TMSTARTRSCAN or TMSTARTRSCAN | TMENDRSCAN, a recovery scan must be in progress
  4. + *
+ * + *

Postconditions:

+ *
    + *
  1. list of prepared xids is returned
  2. + *
+ */ + @Override + public Xid[] recover(int flag) throws XAException { + // Check preconditions + if (flag != TMSTARTRSCAN && flag != TMENDRSCAN && flag != TMNOFLAGS + && flag != (TMSTARTRSCAN | TMENDRSCAN)) { + throw new PGXAException(GT.tr("Invalid flags {0}", flag), XAException.XAER_INVAL); + } + + // We don't check for precondition 2, because we would have to add some additional state in + // this object to keep track of recovery scans. + + // All clear. We return all the xids in the first TMSTARTRSCAN call, and always return + // an empty array otherwise. + if ((flag & TMSTARTRSCAN) == 0) { + return new Xid[0]; + } else { + try { + Statement stmt = conn.createStatement(); + try { + // If this connection is simultaneously used for a transaction, + // this query gets executed inside that transaction. It's OK, + // except if the transaction is in abort-only state and the + // backed refuses to process new queries. Hopefully not a problem + // in practise. + ResultSet rs = stmt.executeQuery( + "SELECT gid FROM pg_prepared_xacts where database = current_database()"); + LinkedList l = new LinkedList<>(); + while (rs.next()) { + Xid recoveredXid = RecoveredXid.stringToXid(rs.getString(1)); + if (recoveredXid != null) { + l.add(recoveredXid); + } + } + rs.close(); + + return l.toArray(new Xid[0]); + } finally { + stmt.close(); + } + } catch (SQLException ex) { + throw new PGXAException(GT.tr("Error during recover"), ex, XAException.XAER_RMERR); + } + } + } + + /** + *

Preconditions:

+ *
    + *
  1. xid is known to the RM or it's in prepared state
  2. + *
+ * + *

Implementation deficiency preconditions:

+ *
    + *
  1. xid must be associated with this connection if it's not in prepared state.
  2. + *
+ * + *

Postconditions:

+ *
    + *
  1. Transaction is rolled back and disassociated from connection
  2. + *
+ */ + @Override + public void rollback(Xid xid) throws XAException { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("rolling back xid = " + xid); + } + + // We don't explicitly check precondition 1. + + try { + if (currentXid != null && currentXid.equals(xid)) { + state = State.IDLE; + currentXid = null; + conn.rollback(); + conn.setAutoCommit(localAutoCommitMode); + } else { + String s = RecoveredXid.xidToString(xid); + + conn.setAutoCommit(true); + Statement stmt = conn.createStatement(); + try { + stmt.executeUpdate("ROLLBACK PREPARED '" + s + "'"); + } finally { + stmt.close(); + } + } + committedOrRolledBack = true; + } catch (SQLException ex) { + int errorCode = XAException.XAER_RMERR; + if (PSQLState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) { + if (committedOrRolledBack || !xid.equals(preparedXid)) { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("rolling back xid " + xid + " while the connection prepared xid is " + preparedXid + + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : "")); + } + errorCode = XAException.XAER_NOTA; + } + } + if (PSQLState.isConnectionError(ex.getSQLState())) { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("rollback connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected"); + } + errorCode = XAException.XAER_RMFAIL; + } + throw new PGXAException(GT.tr("Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid, currentXid), ex, errorCode); + } + } + + @Override + public void commit(Xid xid, boolean onePhase) throws XAException { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("committing xid = " + xid + (onePhase ? " (one phase) " : " (two phase)")); + } + + if (xid == null) { + throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL); + } + + if (onePhase) { + commitOnePhase(xid); + } else { + commitPrepared(xid); + } + } + + /** + *

Preconditions:

+ *
    + *
  1. xid must in ended state.
  2. + *
+ * + *

Implementation deficiency preconditions:

+ *
    + *
  1. this connection must have been used to run the transaction
  2. + *
+ * + *

Postconditions:

+ *
    + *
  1. Transaction is committed
  2. + *
+ */ + private void commitOnePhase(Xid xid) throws XAException { + try { + // Check preconditions + if (xid.equals(preparedXid)) { // TODO: check if the condition should be negated + throw new PGXAException(GT.tr("One-phase commit called for xid {0} but connection was prepared with xid {1}", + xid, preparedXid), XAException.XAER_PROTO); + } + if (currentXid == null && !committedOrRolledBack) { + // In fact, we don't know if xid is bogus, or if it just wasn't associated with this connection. + // Assume it's our fault. + // TODO: pick proper error message. Current one does not clarify what went wrong + throw new PGXAException(GT.tr( + "Not implemented: one-phase commit must be issued using the same connection that was used to start it", xid), + XAException.XAER_RMERR); + } + if (!xid.equals(currentXid) || committedOrRolledBack) { + throw new PGXAException(GT.tr("One-phase commit with unknown xid. commit xid={0}, currentXid={1}", + xid, currentXid), XAException.XAER_NOTA); + } + if (state != State.ENDED) { + throw new PGXAException(GT.tr("commit called before end. commit xid={0}, state={1}", xid, state), XAException.XAER_PROTO); + } + + // Preconditions are met. Commit + state = State.IDLE; + currentXid = null; + committedOrRolledBack = true; + + conn.commit(); + conn.setAutoCommit(localAutoCommitMode); + } catch (SQLException ex) { + throw new PGXAException(GT.tr("Error during one-phase commit. commit xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex)); + } + } + + /** + *

Commits prepared transaction. Preconditions:

+ *
    + *
  1. xid must be in prepared state in the server
  2. + *
+ * + *

Implementation deficiency preconditions:

+ *
    + *
  1. Connection must be in idle state
  2. + *
+ * + *

Postconditions:

+ *
    + *
  1. Transaction is committed
  2. + *
+ */ + private void commitPrepared(Xid xid) throws XAException { + try { + // Check preconditions. The connection mustn't be used for another + // other XA or local transaction, or the COMMIT PREPARED command + // would mess it up. + if (state != State.IDLE + || conn.getTransactionState() != TransactionState.IDLE) { + throw new PGXAException( + GT.tr("Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}", xid, currentXid, state, conn.getTransactionState()), + XAException.XAER_RMERR); + } + + String s = RecoveredXid.xidToString(xid); + + localAutoCommitMode = conn.getAutoCommit(); + conn.setAutoCommit(true); + Statement stmt = conn.createStatement(); + try { + stmt.executeUpdate("COMMIT PREPARED '" + s + "'"); + } finally { + stmt.close(); + conn.setAutoCommit(localAutoCommitMode); + } + committedOrRolledBack = true; + } catch (SQLException ex) { + int errorCode = XAException.XAER_RMERR; + if (PSQLState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) { + if (committedOrRolledBack || !xid.equals(preparedXid)) { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("committing xid " + xid + " while the connection prepared xid is " + preparedXid + + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : "")); + } + errorCode = XAException.XAER_NOTA; + } + } + if (PSQLState.isConnectionError(ex.getSQLState())) { + if (LOGGER.isLoggable(Level.FINEST)) { + debug("commit connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected"); + } + errorCode = XAException.XAER_RMFAIL; + } + throw new PGXAException(GT.tr("Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid, currentXid), ex, errorCode); + } + } + + @Override + public boolean isSameRM(XAResource xares) throws XAException { + // This trivial implementation makes sure that the + // application server doesn't try to use another connection + // for prepare, commit and rollback commands. + return xares == this; + } + + /** + * Does nothing, since we don't do heuristics. + */ + @Override + public void forget(Xid xid) throws XAException { + throw new PGXAException(GT.tr("Heuristic commit/rollback not supported. forget xid={0}", xid), + XAException.XAER_NOTA); + } + + /** + * We don't do transaction timeouts. Just returns 0. + */ + @Override + public int getTransactionTimeout() { + return 0; + } + + /** + * We don't do transaction timeouts. Returns false. + */ + @Override + public boolean setTransactionTimeout(int seconds) { + return false; + } + + private int mapSQLStateToXAErrorCode(SQLException sqlException) { + if (isPostgreSQLIntegrityConstraintViolation(sqlException)) { + return XAException.XA_RBINTEGRITY; + } + + return XAException.XAER_RMFAIL; + } + + private boolean isPostgreSQLIntegrityConstraintViolation(SQLException sqlException) { + if (!(sqlException instanceof PSQLException)) { + return false; + } + String sqlState = sqlException.getSQLState(); + return sqlState != null + && sqlState.length() == 5 + && sqlState.startsWith("23"); // Class 23 - Integrity Constraint Violation + } + + private enum State { + /** + * {@code PGXAConnection} not associated with a XA-transaction. You can still call {@link #getConnection()} and + * use the connection outside XA. {@code currentXid} is {@code null}. autoCommit is {@code true} on a connection + * by getConnection, per normal JDBC rules, though the caller can change it to {@code false} and manage + * transactions itself using Connection.commit and rollback. + */ + IDLE, + /** + * {@link #start(Xid, int)} has been called, and we're associated with an XA transaction. {@code currentXid} + * is valid. autoCommit is false on a connection returned by getConnection, and should not be messed with by + * the caller or the XA transaction will be broken. + */ + ACTIVE, + /** + * {@link #end(Xid, int)} has been called, but the transaction has not yet been prepared. {@code currentXid} + * is still valid. You shouldn't use the connection for anything else than issuing a {@link XAResource#commit(Xid, boolean)} or + * rollback. + */ + ENDED + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSource.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSource.java new file mode 100644 index 0000000..ab12d00 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSource.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xa; + +import org.postgresql.core.BaseConnection; +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.util.DriverInfo; + +import java.sql.Connection; +import java.sql.SQLException; + +import javax.naming.Reference; +import javax.sql.XAConnection; +import javax.sql.XADataSource; + +/** + * XA-enabled DataSource implementation. + * + * @author Heikki Linnakangas (heikki.linnakangas@iki.fi) + */ +public class PGXADataSource extends BaseDataSource implements XADataSource { + /** + * Gets a connection to the PostgreSQL database. The database is identified by the DataSource + * properties serverName, databaseName, and portNumber. The user to connect as is identified by + * the DataSource properties user and password. + * + * @return A valid database connection. + * @throws SQLException Occurs when the database connection cannot be established. + */ + @Override + public XAConnection getXAConnection() throws SQLException { + return getXAConnection(getUser(), getPassword()); + } + + /** + * Gets a XA-enabled connection to the PostgreSQL database. The database is identified by the + * DataSource properties serverName, databaseName, and portNumber. The user to connect as is + * identified by the arguments user and password, which override the DataSource properties by the + * same name. + * + * @return A valid database connection. + * @throws SQLException Occurs when the database connection cannot be established. + */ + @Override + public XAConnection getXAConnection(String user, String password) + throws SQLException { + Connection con = super.getConnection(user, password); + return new PGXAConnection((BaseConnection) con); + } + + @Override + public String getDescription() { + return "XA-enabled DataSource from " + DriverInfo.DRIVER_FULL_NAME; + } + + /** + * Generates a reference using the appropriate object factory. + */ + protected Reference createReference() { + return new Reference(getClass().getName(), PGXADataSourceFactory.class.getName(), null); + } + +} diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSourceFactory.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSourceFactory.java new file mode 100644 index 0000000..ab8a6d9 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSourceFactory.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xa; + +import org.postgresql.ds.common.PGObjectFactory; + +import java.util.Hashtable; + +import javax.naming.Context; +import javax.naming.Name; +import javax.naming.Reference; + +/** + * An ObjectFactory implementation for PGXADataSource-objects. + */ + +public class PGXADataSourceFactory extends PGObjectFactory { + /* + * All the other PostgreSQL DataSource use PGObjectFactory directly, but we can't do that with + * PGXADataSource because referencing PGXADataSource from PGObjectFactory would break + * "JDBC2 Enterprise" edition build which doesn't include PGXADataSource. + */ + + @Override + public Object getObjectInstance(Object obj, Name name, Context nameCtx, + Hashtable environment) throws Exception { + Reference ref = (Reference) obj; + String className = ref.getClassName(); + if ("org.postgresql.xa.PGXADataSource".equals(className)) { + return loadXADataSource(ref); + } else { + return null; + } + } + + private Object loadXADataSource(Reference ref) { + PGXADataSource ds = new PGXADataSource(); + return loadBaseDataSource(ds, ref); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXAException.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXAException.java new file mode 100644 index 0000000..c8c5631 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXAException.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xa; + +import javax.transaction.xa.XAException; + +/** + * A convenience subclass of XAException which makes it easy to create an instance of + * XAException with a human-readable message, a Throwable cause, and an XA + * error code. + * + * @author Michael S. Allman + */ +@SuppressWarnings("serial") +public class PGXAException extends XAException { + PGXAException(String message, int errorCode) { + super(message); + + this.errorCode = errorCode; + } + + PGXAException(String message, Throwable cause, int errorCode) { + super(message); + + initCause(cause); + this.errorCode = errorCode; + } + + PGXAException(Throwable cause, int errorCode) { + super(errorCode); + + initCause(cause); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xa/RecoveredXid.java b/pgjdbc/src/main/java/org/postgresql/xa/RecoveredXid.java new file mode 100644 index 0000000..e7a9252 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xa/RecoveredXid.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xa; + +import java.util.Arrays; +import java.util.Base64; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +import javax.transaction.xa.Xid; + +class RecoveredXid implements Xid { + int formatId; + byte[] globalTransactionId; + byte[] branchQualifier; + + RecoveredXid(int formatId, byte[] globalTransactionId, byte[] branchQualifier) { + this.formatId = formatId; + this.globalTransactionId = globalTransactionId; + this.branchQualifier = branchQualifier; + } + + @Override + public int getFormatId() { + return formatId; + } + + @Override + public byte[] getGlobalTransactionId() { + return globalTransactionId; + } + + @Override + public byte[] getBranchQualifier() { + return branchQualifier; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(branchQualifier); + result = prime * result + formatId; + result = prime * result + Arrays.hashCode(globalTransactionId); + return result; + } + + @Override + public boolean equals(Object o) { + if (o == this) { + // optimization for the common case. + return true; + } + + if (!(o instanceof Xid)) { + return false; + } + + Xid other = (Xid) o; + return formatId == other.getFormatId() + && Arrays.equals(globalTransactionId, other.getGlobalTransactionId()) + && Arrays.equals(branchQualifier, other.getBranchQualifier()); + } + + /** + * This is for debugging purposes only. + */ + @Override + public String toString() { + return xidToString(this); + } + + // --- Routines for converting xid to string and back. + + static String xidToString(Xid xid) { + final byte[] globalTransactionId = xid.getGlobalTransactionId(); + final byte[] branchQualifier = xid.getBranchQualifier(); + final StringBuilder sb = new StringBuilder((int) (16 + globalTransactionId.length * 1.5 + branchQualifier.length * 1.5)); + sb.append(xid.getFormatId()) + .append('_') + .append(Base64.getEncoder().encodeToString(globalTransactionId)) + .append('_') + .append(Base64.getEncoder().encodeToString(branchQualifier)); + return sb.toString(); + } + + /** + * @return recovered xid, or null if s does not represent a valid xid encoded by the driver. + */ + static Xid stringToXid(String s) { + final int a = s.indexOf('_'); + final int b = s.lastIndexOf('_'); + + if (a == b) { + // this also catches the case a == b == -1. + return null; + } + + try { + int formatId = Integer.parseInt(s.substring(0, a)); + //mime decoder is more forgiving to extraneous characters by ignoring them + byte[] globalTransactionId = Base64.getMimeDecoder().decode(s.substring(a + 1, b)); + byte[] branchQualifier = Base64.getMimeDecoder().decode(s.substring(b + 1)); + return new RecoveredXid(formatId, globalTransactionId, branchQualifier); + } catch (Exception ex) { + final LogRecord logRecord = new LogRecord(Level.FINE, "XID String is invalid: [{0}]"); + logRecord.setParameters(new Object[]{s}); + logRecord.setThrown(ex); + Logger.getLogger(RecoveredXid.class.getName()).log(logRecord); + // Doesn't seem to be an xid generated by this driver. + return null; + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xml/DefaultPGXmlFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/xml/DefaultPGXmlFactoryFactory.java new file mode 100644 index 0000000..bd59458 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xml/DefaultPGXmlFactoryFactory.java @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xml; + +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; +import org.xml.sax.helpers.XMLReaderFactory; + +import javax.xml.XMLConstants; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.sax.SAXTransformerFactory; + +/** + * Default implementation of PGXmlFactoryFactory that configures each factory per OWASP recommendations. + * + * @see https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html + */ +public class DefaultPGXmlFactoryFactory implements PGXmlFactoryFactory { + public static final DefaultPGXmlFactoryFactory INSTANCE = new DefaultPGXmlFactoryFactory(); + + private DefaultPGXmlFactoryFactory() { + } + + private DocumentBuilderFactory getDocumentBuilderFactory() { + DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); + setFactoryProperties(factory); + factory.setXIncludeAware(false); + factory.setExpandEntityReferences(false); + return factory; + } + + @Override + public DocumentBuilder newDocumentBuilder() throws ParserConfigurationException { + DocumentBuilder builder = getDocumentBuilderFactory().newDocumentBuilder(); + builder.setEntityResolver(EmptyStringEntityResolver.INSTANCE); + builder.setErrorHandler(NullErrorHandler.INSTANCE); + return builder; + } + + @Override + public TransformerFactory newTransformerFactory() { + TransformerFactory factory = TransformerFactory.newInstance(); + setFactoryProperties(factory); + return factory; + } + + @Override + public SAXTransformerFactory newSAXTransformerFactory() { + SAXTransformerFactory factory = (SAXTransformerFactory) SAXTransformerFactory.newInstance(); + setFactoryProperties(factory); + return factory; + } + + @Override + public XMLInputFactory newXMLInputFactory() { + XMLInputFactory factory = XMLInputFactory.newInstance(); + setPropertyQuietly(factory, XMLInputFactory.SUPPORT_DTD, false); + setPropertyQuietly(factory, XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + return factory; + } + + @Override + public XMLOutputFactory newXMLOutputFactory() { + return XMLOutputFactory.newInstance(); + } + + @SuppressWarnings("deprecation") + @Override + public XMLReader createXMLReader() throws SAXException { + XMLReader factory = XMLReaderFactory.createXMLReader(); + setFeatureQuietly(factory, "http://apache.org/xml/features/disallow-doctype-decl", true); + setFeatureQuietly(factory, "http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + setFeatureQuietly(factory, "http://xml.org/sax/features/external-general-entities", false); + setFeatureQuietly(factory, "http://xml.org/sax/features/external-parameter-entities", false); + factory.setErrorHandler(NullErrorHandler.INSTANCE); + return factory; + } + + private static void setFeatureQuietly(Object factory, String name, boolean value) { + try { + if (factory instanceof DocumentBuilderFactory) { + ((DocumentBuilderFactory) factory).setFeature(name, value); + } else if (factory instanceof TransformerFactory) { + ((TransformerFactory) factory).setFeature(name, value); + } else if (factory instanceof XMLReader) { + ((XMLReader) factory).setFeature(name, value); + } else { + throw new Error("Invalid factory class: " + factory.getClass()); + } + return; + } catch (Exception ignore) { + } + } + + private static void setAttributeQuietly(Object factory, String name, Object value) { + try { + if (factory instanceof DocumentBuilderFactory) { + ((DocumentBuilderFactory) factory).setAttribute(name, value); + } else if (factory instanceof TransformerFactory) { + ((TransformerFactory) factory).setAttribute(name, value); + } else { + throw new Error("Invalid factory class: " + factory.getClass()); + } + } catch (Exception ignore) { + } + } + + private static void setFactoryProperties(Object factory) { + setFeatureQuietly(factory, XMLConstants.FEATURE_SECURE_PROCESSING, true); + setFeatureQuietly(factory, "http://apache.org/xml/features/disallow-doctype-decl", true); + setFeatureQuietly(factory, "http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + setFeatureQuietly(factory, "http://xml.org/sax/features/external-general-entities", false); + setFeatureQuietly(factory, "http://xml.org/sax/features/external-parameter-entities", false); + // Values from XMLConstants inlined for JDK 1.6 compatibility + setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalDTD", ""); + setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalSchema", ""); + setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalStylesheet", ""); + } + + private static void setPropertyQuietly(Object factory, String name, Object value) { + try { + if (factory instanceof XMLReader) { + ((XMLReader) factory).setProperty(name, value); + } else if (factory instanceof XMLInputFactory) { + ((XMLInputFactory) factory).setProperty(name, value); + } else { + throw new Error("Invalid factory class: " + factory.getClass()); + } + } catch (Exception ignore) { + } + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xml/EmptyStringEntityResolver.java b/pgjdbc/src/main/java/org/postgresql/xml/EmptyStringEntityResolver.java new file mode 100644 index 0000000..506e0fd --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xml/EmptyStringEntityResolver.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xml; + +import org.xml.sax.EntityResolver; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; + +import java.io.IOException; +import java.io.StringReader; + +public class EmptyStringEntityResolver implements EntityResolver { + public static final EmptyStringEntityResolver INSTANCE = new EmptyStringEntityResolver(); + + public EmptyStringEntityResolver() { + } + + @Override + public InputSource resolveEntity(String publicId, String systemId) + throws SAXException, IOException { + return new InputSource(new StringReader("")); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xml/LegacyInsecurePGXmlFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/xml/LegacyInsecurePGXmlFactoryFactory.java new file mode 100644 index 0000000..579ab5e --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xml/LegacyInsecurePGXmlFactoryFactory.java @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xml; + +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; +import org.xml.sax.helpers.XMLReaderFactory; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.sax.SAXTransformerFactory; + +public class LegacyInsecurePGXmlFactoryFactory implements PGXmlFactoryFactory { + public static final LegacyInsecurePGXmlFactoryFactory INSTANCE = new LegacyInsecurePGXmlFactoryFactory(); + + private LegacyInsecurePGXmlFactoryFactory() { + } + + @Override + public DocumentBuilder newDocumentBuilder() throws ParserConfigurationException { + DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder(); + builder.setErrorHandler(NullErrorHandler.INSTANCE); + return builder; + } + + @Override + public TransformerFactory newTransformerFactory() { + return TransformerFactory.newInstance(); + } + + @Override + public SAXTransformerFactory newSAXTransformerFactory() { + return (SAXTransformerFactory) SAXTransformerFactory.newInstance(); + } + + @Override + public XMLInputFactory newXMLInputFactory() { + return XMLInputFactory.newInstance(); + } + + @Override + public XMLOutputFactory newXMLOutputFactory() { + return XMLOutputFactory.newInstance(); + } + + @SuppressWarnings("deprecation") + @Override + public XMLReader createXMLReader() throws SAXException { + return XMLReaderFactory.createXMLReader(); + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xml/NullErrorHandler.java b/pgjdbc/src/main/java/org/postgresql/xml/NullErrorHandler.java new file mode 100644 index 0000000..fcf6575 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xml/NullErrorHandler.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xml; + +import org.xml.sax.ErrorHandler; +import org.xml.sax.SAXParseException; + +/** + * Error handler that silently suppresses all errors. + */ +public class NullErrorHandler implements ErrorHandler { + public static final NullErrorHandler INSTANCE = new NullErrorHandler(); + + public NullErrorHandler() { + } + + @Override + public void error(SAXParseException e) { + } + + @Override + public void fatalError(SAXParseException e) { + } + + @Override + public void warning(SAXParseException e) { + } +} diff --git a/pgjdbc/src/main/java/org/postgresql/xml/PGXmlFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/xml/PGXmlFactoryFactory.java new file mode 100644 index 0000000..d5c74d5 --- /dev/null +++ b/pgjdbc/src/main/java/org/postgresql/xml/PGXmlFactoryFactory.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.xml; + +import org.xml.sax.SAXException; +import org.xml.sax.XMLReader; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.sax.SAXTransformerFactory; + +public interface PGXmlFactoryFactory { + DocumentBuilder newDocumentBuilder() throws ParserConfigurationException; + + TransformerFactory newTransformerFactory(); + + SAXTransformerFactory newSAXTransformerFactory(); + + XMLInputFactory newXMLInputFactory(); + + XMLOutputFactory newXMLOutputFactory(); + + XMLReader createXMLReader() throws SAXException; +} diff --git a/pgjdbc/src/main/resources/META-INF/services/java.sql.Driver b/pgjdbc/src/main/resources/META-INF/services/java.sql.Driver new file mode 100644 index 0000000..6f03688 --- /dev/null +++ b/pgjdbc/src/main/resources/META-INF/services/java.sql.Driver @@ -0,0 +1 @@ +org.postgresql.Driver diff --git a/pgjdbc/src/test/java/org/postgresql/core/AsciiStringInternerTest.java b/pgjdbc/src/test/java/org/postgresql/core/AsciiStringInternerTest.java new file mode 100644 index 0000000..450941f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/AsciiStringInternerTest.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.LongAdder; + +/** + * + * @author Brett Okken + */ +class AsciiStringInternerTest { + + @Test + void canonicalValue() throws Exception { + AsciiStringInterner interner = new AsciiStringInterner(); + String s1 = "testCanonicalValue"; + byte[] bytes = s1.getBytes(StandardCharsets.US_ASCII); + String interned = interner.getString(bytes, 0, bytes.length, null); + + //interned value should be equal + assertEquals(s1, interned); + //but should be different instance + assertNotSame(s1, interned); + //asking for it again, however should return same instance + assertSame(interned, interner.getString(bytes, 0, bytes.length, null)); + + //now show that we can get the value back from a different byte[] + byte[] bytes2 = new byte[128]; + System.arraycopy(bytes, 0, bytes2, 73, bytes.length); + assertSame(interned, interner.getString(bytes2, 73, bytes.length, null)); + + //now we will mutate the original byte[] to show that does not affect the map + Arrays.fill(bytes, (byte) 13); + assertSame(interned, interner.getString(bytes2, 73, bytes.length, null)); + } + + @Test + void stagedValue() throws Exception { + AsciiStringInterner interner = new AsciiStringInterner(); + String s1 = "testStagedValue"; + interner.putString(s1); + byte[] bytes = s1.getBytes(StandardCharsets.US_ASCII); + String interned = interner.getString(bytes, 0, bytes.length, null); + // should be same instance + assertSame(s1, interned); + //asking for it again should also return same instance + assertSame(s1, interner.getString(bytes, 0, bytes.length, null)); + + //now show that we can get the value back from a different byte[] + byte[] bytes2 = new byte[128]; + System.arraycopy(bytes, 0, bytes2, 73, bytes.length); + assertSame(s1, interner.getString(bytes2, 73, bytes.length, null)); + } + + @Test + void nonAsciiValue() throws Exception { + final Encoding encoding = Encoding.getJVMEncoding("UTF-8"); + AsciiStringInterner interner = new AsciiStringInterner(); + String s1 = "testNonAsciiValue" + '\u03C0'; // add multi-byte to string to make invalid for intern + byte[] bytes = s1.getBytes(StandardCharsets.UTF_8); + String interned = interner.getString(bytes, 0, bytes.length, encoding); + + //interned value should be equal + assertEquals(s1, interned); + //but should be different instance + assertNotSame(s1, interned); + //asking for it again should again return a different instance + final String interned2 = interner.getString(bytes, 0, bytes.length, encoding); + assertEquals(s1, interned2); + assertNotSame(s1, interned2); + assertNotSame(interned, interned2); + } + + @Test + void testToString() throws Exception { + AsciiStringInterner interner = new AsciiStringInterner(); + assertEquals("AsciiStringInterner []", interner.toString(), "empty"); + interner.putString("s1"); + assertEquals("AsciiStringInterner ['s1']", interner.toString(), "empty"); + interner.getString("s2".getBytes(StandardCharsets.US_ASCII), 0, 2, null); + assertEquals("AsciiStringInterner ['s1', 's2']", interner.toString(), "empty"); + } + + @Test + void garbageCleaning() throws Exception { + final byte[] bytes = new byte[100000]; + for (int i = 0; i < 100000; i++) { + bytes[i] = (byte) ThreadLocalRandom.current().nextInt(128); + } + final AsciiStringInterner interner = new AsciiStringInterner(); + final LongAdder length = new LongAdder(); + final Callable c = () -> { + for (int i = 0; i < 25000; i++) { + String str; + try { + str = interner.getString(bytes, 0, ThreadLocalRandom.current().nextInt(1000, bytes.length), null); + } catch (IOException e) { + throw new IllegalStateException(e); + } + length.add(str.length()); + } + return null; + }; + final ExecutorService exec = Executors.newCachedThreadPool(); + try { + exec.invokeAll(Arrays.asList(c, c, c, c)); + } finally { + exec.shutdown(); + } + //this is really just done to make sure java cannot tell that nothing is really being done + assertTrue(length.sum() > 0); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserNegativeTest.java b/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserNegativeTest.java new file mode 100644 index 0000000..4f7beeb --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserNegativeTest.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.util.PSQLException; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +public class CommandCompleteParserNegativeTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {"SELECT 0_0 42"}, + {"SELECT 42 0_0"}, + {"SELECT 0_0 0_0"}, + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "input={0}") + void run(String input) throws PSQLException { + CommandCompleteParser parser = new CommandCompleteParser(); + try { + parser.parse(input); + fail("CommandCompleteParser should throw NumberFormatException for " + input); + } catch (PSQLException e) { + Throwable cause = e.getCause(); + if (cause == null) { + throw e; + } + if (!(cause instanceof NumberFormatException)) { + throw e; + } + // NumerFormatException is expected + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserTest.java b/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserTest.java new file mode 100644 index 0000000..245e695 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserTest.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.util.PSQLException; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +public class CommandCompleteParserTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {"SELECT 0", 0, 0}, + {"SELECT -42", 0, 0}, + {"SELECT", 0, 0}, + {"", 0, 0}, + {"A", 0, 0}, + {"SELECT 42", 0, 42}, + {"UPDATE 43 42", 43, 42}, + {"UPDATE 43 " + Long.MAX_VALUE, 43, Long.MAX_VALUE}, + {"UPDATE " + Long.MAX_VALUE + " " + Long.MAX_VALUE, Long.MAX_VALUE, Long.MAX_VALUE}, + {"UPDATE " + (Long.MAX_VALUE / 10) + " " + (Long.MAX_VALUE / 10), (Long.MAX_VALUE / 10), + (Long.MAX_VALUE / 10)}, + {"UPDATE " + (Long.MAX_VALUE / 100) + " " + (Long.MAX_VALUE / 100), (Long.MAX_VALUE / 100), + (Long.MAX_VALUE / 100)}, + {"CREATE TABLE " + (Long.MAX_VALUE / 100) + " " + (Long.MAX_VALUE / 100), + (Long.MAX_VALUE / 100), (Long.MAX_VALUE / 100)}, + {"CREATE TABLE", 0, 0}, + {"CREATE OR DROP OR DELETE TABLE 42", 0, 42}, + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "input={0}, oid={1}, rows={2}") + void run(String input, long oid, long rows) throws PSQLException { + CommandCompleteParser expected = new CommandCompleteParser(); + CommandCompleteParser actual = new CommandCompleteParser(); + expected.set(oid, rows); + actual.parse(input); + assertEquals(expected, actual, input); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/OidToStringTest.java b/pgjdbc/src/test/java/org/postgresql/core/OidToStringTest.java new file mode 100644 index 0000000..76d6ff4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/OidToStringTest.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.util.PSQLException; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +public class OidToStringTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {142, "XML"}, + {0, "UNSPECIFIED"}, + {-235, ""}, + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "expected={1}, value={0}") + void run(int value, String expected) throws PSQLException { + assertEquals(expected, Oid.toString(value)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/OidValueOfTest.java b/pgjdbc/src/test/java/org/postgresql/core/OidValueOfTest.java new file mode 100644 index 0000000..f72ad89 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/OidValueOfTest.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.util.PSQLException; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +public class OidValueOfTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {25, "TEXT"}, + {0, "UNSPECIFIED"}, + {199, "JSON_ARRAY"}, + {100, "100"}, + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "expected={0}, value={1}") + void run(int expected, String value) throws PSQLException { + assertEquals(expected, Oid.valueOf(value)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/OidValuesCorrectnessTest.java b/pgjdbc/src/test/java/org/postgresql/core/OidValuesCorrectnessTest.java new file mode 100644 index 0000000..5446e62 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/OidValuesCorrectnessTest.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.lang.reflect.Field; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * Test to check if values in Oid class are correct with Oid values in a database. + */ +@RunWith(Parameterized.class) +public class OidValuesCorrectnessTest extends BaseTest4 { + + @Parameterized.Parameter(0) + public String oidName; + @Parameterized.Parameter(1) + public int oidValue; + + /** + * List to contain names of all variables, which should be ignored by this test. + * Prevents situation that a new value will be added to Oid class with ignoring the test. + */ + private static List oidsToIgnore = Arrays.asList( + "UNSPECIFIED" //UNSPECIFIED isn't an Oid, it's a value to specify that Oid value is unspecified + ); + + /** + * Map to contain Oid names with server version of their support. + * Prevents that some Oid values will be tested with a database not supporting given Oid. + */ + private static Map oidsMinimumVersions; + + static { + oidsMinimumVersions = new HashMap<>(); + oidsMinimumVersions.put("JSON", ServerVersion.v9_2); + oidsMinimumVersions.put("JSON_ARRAY", ServerVersion.v9_2); + oidsMinimumVersions.put("JSONB", ServerVersion.v9_4); + oidsMinimumVersions.put("JSONB_ARRAY", ServerVersion.v9_4); + oidsMinimumVersions.put("MACADDR8", ServerVersion.v10); + } + + /** + * Map to contain Oid names with their proper names from pg_type table (typname) if they are different. + * Helps in situation when variable name in Oid class isn't the same as typname in pg_type table. + */ + private static Map oidTypeNames; + + static { + oidTypeNames = new HashMap<>(); + oidTypeNames.put("BOX_ARRAY", "_BOX"); + oidTypeNames.put("INT2_ARRAY", "_INT2"); + oidTypeNames.put("INT4_ARRAY", "_INT4"); + oidTypeNames.put("INT8_ARRAY", "_INT8"); + oidTypeNames.put("TEXT_ARRAY", "_TEXT"); + oidTypeNames.put("NUMERIC_ARRAY", "_NUMERIC"); + oidTypeNames.put("FLOAT4_ARRAY", "_FLOAT4"); + oidTypeNames.put("FLOAT8_ARRAY", "_FLOAT8"); + oidTypeNames.put("BOOL_ARRAY", "_BOOL"); + oidTypeNames.put("DATE_ARRAY", "_DATE"); + oidTypeNames.put("TIME_ARRAY", "_TIME"); + oidTypeNames.put("TIMETZ_ARRAY", "_TIMETZ"); + oidTypeNames.put("TIMESTAMP_ARRAY", "_TIMESTAMP"); + oidTypeNames.put("TIMESTAMPTZ_ARRAY", "_TIMESTAMPTZ"); + oidTypeNames.put("BYTEA_ARRAY", "_BYTEA"); + oidTypeNames.put("VARCHAR_ARRAY", "_VARCHAR"); + oidTypeNames.put("OID_ARRAY", "_OID"); + oidTypeNames.put("BPCHAR_ARRAY", "_BPCHAR"); + oidTypeNames.put("MONEY_ARRAY", "_MONEY"); + oidTypeNames.put("NAME_ARRAY", "_NAME"); + oidTypeNames.put("BIT_ARRAY", "_BIT"); + oidTypeNames.put("INTERVAL_ARRAY", "_INTERVAl"); + oidTypeNames.put("CHAR_ARRAY", "_CHAR"); + oidTypeNames.put("VARBIT_ARRAY", "_VARBIT"); + oidTypeNames.put("UUID_ARRAY", "_UUID"); + oidTypeNames.put("XML_ARRAY", "_XML"); + oidTypeNames.put("POINT_ARRAY", "_POINT"); + oidTypeNames.put("JSONB_ARRAY", "_JSONB"); + oidTypeNames.put("JSON_ARRAY", "_JSON"); + oidTypeNames.put("REF_CURSOR", "REFCURSOR"); + oidTypeNames.put("REF_CURSOR_ARRAY", "_REFCURSOR"); + } + + @Parameterized.Parameters(name = "oidName={0}, oidValue={1}") + public static Iterable data() throws IllegalAccessException { + Field[] fields = Oid.class.getFields(); + List data = new ArrayList<>(); + + for (Field field : fields) { + if (!oidsToIgnore.contains(field.getName())) { + data.add(new Object[]{field.getName(), field.getInt(null)}); + } + } + + return data; + } + + /** + * The testcase to check if expected value of Oid, read from a database, is the same as value + * written in the Oid class. + */ + @Test + public void testValue() throws SQLException { + // check if Oid can be tested with given database by checking version + if (oidsMinimumVersions.containsKey(oidName)) { + Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, oidsMinimumVersions.get(oidName))); + } + + String typeName = oidTypeNames.getOrDefault(oidName, oidName); + + Statement stmt = con.createStatement(); + ResultSet resultSet; + stmt.execute("select oid from pg_type where typname = '" + typeName.toLowerCase(Locale.ROOT) + "'"); + resultSet = stmt.getResultSet(); + + // resultSet have to have next row + Assert.assertTrue("Oid value doesn't exist for oid " + oidName + ";with used type: " + typeName, + resultSet.next()); + // check if expected value from a database is the same as value in Oid class + Assert.assertEquals("Wrong value for oid: " + oidName + ";with used type: " + typeName, + resultSet.getInt(1), oidValue); + + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/ParserTest.java b/pgjdbc/src/test/java/org/postgresql/core/ParserTest.java new file mode 100644 index 0000000..e903df2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/ParserTest.java @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.jdbc.EscapeSyntaxCallMode; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; +import java.util.List; + +/** + * Test cases for the Parser. + * @author Jeremy Whiting jwhiting@redhat.com + */ +class ParserTest { + + /** + * Test to make sure delete command is detected by parser and detected via + * api. Mix up the case of the command to check detection continues to work. + */ + @Test + void deleteCommandParsing() { + char[] command = new char[6]; + "DELETE".getChars(0, 6, command, 0); + assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse upper case command."); + "DelEtE".getChars(0, 6, command, 0); + assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command."); + "deleteE".getChars(0, 6, command, 0); + assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command."); + "delete".getChars(0, 6, command, 0); + assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse lower case command."); + "Delete".getChars(0, 6, command, 0); + assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command."); + } + + /** + * Test UPDATE command parsing. + */ + @Test + void updateCommandParsing() { + char[] command = new char[6]; + "UPDATE".getChars(0, 6, command, 0); + assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse upper case command."); + "UpDateE".getChars(0, 6, command, 0); + assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command."); + "updatE".getChars(0, 6, command, 0); + assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command."); + "Update".getChars(0, 6, command, 0); + assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command."); + "update".getChars(0, 6, command, 0); + assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse lower case command."); + } + + /** + * Test MOVE command parsing. + */ + @Test + void moveCommandParsing() { + char[] command = new char[4]; + "MOVE".getChars(0, 4, command, 0); + assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse upper case command."); + "mOVe".getChars(0, 4, command, 0); + assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command."); + "movE".getChars(0, 4, command, 0); + assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command."); + "Move".getChars(0, 4, command, 0); + assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command."); + "move".getChars(0, 4, command, 0); + assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse lower case command."); + } + + /** + * Test WITH command parsing. + */ + @Test + void withCommandParsing() { + char[] command = new char[4]; + "WITH".getChars(0, 4, command, 0); + assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse upper case command."); + "wITh".getChars(0, 4, command, 0); + assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command."); + "witH".getChars(0, 4, command, 0); + assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command."); + "With".getChars(0, 4, command, 0); + assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command."); + "with".getChars(0, 4, command, 0); + assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse lower case command."); + } + + /** + * Test SELECT command parsing. + */ + @Test + void selectCommandParsing() { + char[] command = new char[6]; + "SELECT".getChars(0, 6, command, 0); + assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse upper case command."); + "sELect".getChars(0, 6, command, 0); + assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command."); + "selecT".getChars(0, 6, command, 0); + assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command."); + "Select".getChars(0, 6, command, 0); + assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command."); + "select".getChars(0, 6, command, 0); + assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse lower case command."); + } + + @Test + void escapeProcessing() throws Exception { + assertEquals("DATE '1999-01-09'", Parser.replaceProcessing("{d '1999-01-09'}", true, false)); + assertEquals("DATE '1999-01-09'", Parser.replaceProcessing("{D '1999-01-09'}", true, false)); + assertEquals("TIME '20:00:03'", Parser.replaceProcessing("{t '20:00:03'}", true, false)); + assertEquals("TIME '20:00:03'", Parser.replaceProcessing("{T '20:00:03'}", true, false)); + assertEquals("TIMESTAMP '1999-01-09 20:11:11.123455'", Parser.replaceProcessing("{ts '1999-01-09 20:11:11.123455'}", true, false)); + assertEquals("TIMESTAMP '1999-01-09 20:11:11.123455'", Parser.replaceProcessing("{Ts '1999-01-09 20:11:11.123455'}", true, false)); + + assertEquals("user", Parser.replaceProcessing("{fn user()}", true, false)); + assertEquals("cos(1)", Parser.replaceProcessing("{fn cos(1)}", true, false)); + assertEquals("extract(week from DATE '2005-01-24')", Parser.replaceProcessing("{fn week({d '2005-01-24'})}", true, false)); + + assertEquals("\"T1\" LEFT OUTER JOIN t2 ON \"T1\".id = t2.id", + Parser.replaceProcessing("{oj \"T1\" LEFT OUTER JOIN t2 ON \"T1\".id = t2.id}", true, false)); + + assertEquals("ESCAPE '_'", Parser.replaceProcessing("{escape '_'}", true, false)); + + // nothing should be changed in that case, no valid escape code + assertEquals("{obj : 1}", Parser.replaceProcessing("{obj : 1}", true, false)); + } + + @Test + void modifyJdbcCall() throws SQLException { + assertEquals("select * from pack_getValue(?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("select * from pack_getValue(?,?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue(?) }", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("select * from pack_getValue(?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue()}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("select * from pack_getValue(?,?,?,?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue(?,?,?) }", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql()); + assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql()); + assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql()); + assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql()); + assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql()); + } + + @Test + void unterminatedEscape() throws Exception { + assertEquals("{oj ", Parser.replaceProcessing("{oj ", true, false)); + } + + @Test + @Disabled(value = "returning in the select clause is hard to distinguish from insert ... returning *") + void insertSelectFakeReturning() throws SQLException { + String query = + "insert test(id, name) select 1, 'value' as RETURNING from test2"; + List qry = + Parser.parseJdbcSql( + query, true, true, true, true, true); + boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent(); + assertFalse(returningKeywordPresent, "Query does not have returning clause " + query); + } + + @Test + void insertSelectReturning() throws SQLException { + String query = + "insert test(id, name) select 1, 'value' from test2 RETURNING id"; + List qry = + Parser.parseJdbcSql( + query, true, true, true, true, true); + boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent(); + assertTrue(returningKeywordPresent, "Query has a returning clause " + query); + } + + @Test + void insertReturningInWith() throws SQLException { + String query = + "with x as (insert into mytab(x) values(1) returning x) insert test(id, name) select 1, 'value' from test2"; + List qry = + Parser.parseJdbcSql( + query, true, true, true, true, true); + boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent(); + assertFalse(returningKeywordPresent, "There's no top-level <> clause " + query); + } + + @Test + void insertBatchedReWriteOnConflict() throws SQLException { + String query = "insert into test(id, name) values (:id,:name) ON CONFLICT (id) DO NOTHING"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true); + SqlCommand command = qry.get(0).getCommand(); + assertEquals(34, command.getBatchRewriteValuesBraceOpenPosition()); + assertEquals(44, command.getBatchRewriteValuesBraceClosePosition()); + } + + @Test + void insertBatchedReWriteOnConflictUpdateBind() throws SQLException { + String query = "insert into test(id, name) values (?,?) ON CONFLICT (id) UPDATE SET name=?"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true); + SqlCommand command = qry.get(0).getCommand(); + assertFalse(command.isBatchedReWriteCompatible(), "update set name=? is NOT compatible with insert rewrite"); + } + + @Test + void insertBatchedReWriteOnConflictUpdateConstant() throws SQLException { + String query = "insert into test(id, name) values (?,?) ON CONFLICT (id) UPDATE SET name='default'"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true); + SqlCommand command = qry.get(0).getCommand(); + assertTrue(command.isBatchedReWriteCompatible(), "update set name='default' is compatible with insert rewrite"); + } + + @Test + void insertMultiInsert() throws SQLException { + String query = + "insert into test(id, name) values (:id,:name),(:id,:name) ON CONFLICT (id) DO NOTHING"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true); + SqlCommand command = qry.get(0).getCommand(); + assertEquals(34, command.getBatchRewriteValuesBraceOpenPosition()); + assertEquals(56, command.getBatchRewriteValuesBraceClosePosition()); + } + + @Test + void valuesTableParse() throws SQLException { + String query = "insert into values_table (id, name) values (?,?)"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true); + SqlCommand command = qry.get(0).getCommand(); + assertEquals(43, command.getBatchRewriteValuesBraceOpenPosition()); + assertEquals(49, command.getBatchRewriteValuesBraceClosePosition()); + + query = "insert into table_values (id, name) values (?,?)"; + qry = Parser.parseJdbcSql(query, true, true, true, true, true); + command = qry.get(0).getCommand(); + assertEquals(43, command.getBatchRewriteValuesBraceOpenPosition()); + assertEquals(49, command.getBatchRewriteValuesBraceClosePosition()); + } + + @Test + void createTableParseWithOnDeleteClause() throws SQLException { + String[] returningColumns = {"*"}; + String query = "create table \"testTable\" (\"id\" INT SERIAL NOT NULL PRIMARY KEY, \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON DELETE NO ACTION)"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns); + SqlCommand command = qry.get(0).getCommand(); + assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present"); + assertEquals(SqlCommandType.CREATE, command.getType()); + } + + @Test + void createTableParseWithOnUpdateClause() throws SQLException { + String[] returningColumns = {"*"}; + String query = "create table \"testTable\" (\"id\" INT SERIAL NOT NULL PRIMARY KEY, \"foreignId\" INT REFERENCES \"otherTable\" (\"id\")) ON UPDATE NO ACTION"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns); + SqlCommand command = qry.get(0).getCommand(); + assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present"); + assertEquals(SqlCommandType.CREATE, command.getType()); + } + + @Test + void alterTableParseWithOnDeleteClause() throws SQLException { + String[] returningColumns = {"*"}; + String query = "alter table \"testTable\" ADD \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON DELETE NO ACTION"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns); + SqlCommand command = qry.get(0).getCommand(); + assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present"); + assertEquals(SqlCommandType.ALTER, command.getType()); + } + + @Test + void alterTableParseWithOnUpdateClause() throws SQLException { + String[] returningColumns = {"*"}; + String query = "alter table \"testTable\" ADD \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON UPDATE RESTRICT"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns); + SqlCommand command = qry.get(0).getCommand(); + assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present"); + assertEquals(SqlCommandType.ALTER, command.getType()); + } + + @Test + void parseV14functions() throws SQLException { + String[] returningColumns = {"*"}; + String query = "CREATE OR REPLACE FUNCTION asterisks(n int)\n" + + " RETURNS SETOF text\n" + + " LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\n" + + "BEGIN ATOMIC\n" + + "SELECT repeat('*', g) FROM generate_series (1, n) g; \n" + + "END;"; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns); + assertNotNull(qry); + assertEquals(1, qry.size(), "There should only be one query returned here"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/ReturningParserTest.java b/pgjdbc/src/test/java/org/postgresql/core/ReturningParserTest.java new file mode 100644 index 0000000..3b8292e --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/ReturningParserTest.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class ReturningParserTest { + public static Iterable data() { + Collection ids = new ArrayList<>(); + + String[] delimiters = {"", "_", "3", "*", " "}; + + for (String columnName : new String[]{"returning", "returningreturning"}) { + for (String prefix : delimiters) { + for (String suffix : delimiters) { + for (String returning : new String[]{"returning", "returningreturning"}) { + ids.add(new Object[]{columnName, returning, prefix, suffix}); + } + } + } + } + return ids; + } + + @MethodSource("data") + @ParameterizedTest(name = "columnName={2} {0} {3}, returning={2} {1} {3}") + void test(String columnName, String returning, String prefix, String suffix) throws SQLException { + String query = + "insert into\"prep\"(a, " + prefix + columnName + suffix + ")values(1,2)" + prefix + + returning + suffix; + List qry = Parser.parseJdbcSql(query, true, true, true, true, true); + boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent(); + + boolean expectedReturning = "returning".equalsIgnoreCase(returning) + && (prefix.isEmpty() || !Character.isJavaIdentifierStart(prefix.charAt(0))) + && (suffix.isEmpty() || !Character.isJavaIdentifierPart(suffix.charAt(0))); + if (expectedReturning != returningKeywordPresent) { + assertEquals(expectedReturning, + returningKeywordPresent, + "Wrong detected in SQL " + query); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/UTF8EncodingTest.java b/pgjdbc/src/test/java/org/postgresql/core/UTF8EncodingTest.java new file mode 100644 index 0000000..9e3e846 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/UTF8EncodingTest.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.List; + +public class UTF8EncodingTest { + + private static final int STEP = 8 * 1024; + + public static Iterable data() { + final StringBuilder reallyLongString = new StringBuilder(1024 * 1024); + for (int i = 0; i < 185000; i++) { + reallyLongString.append(i); + } + + final List strings = new ArrayList<>(150); + strings.add("short simple"); + strings.add("longer but still not really all that long"); + strings.add(reallyLongString.toString()); + strings.add(reallyLongString.append('\u03C0').toString()); // add multi-byte to end of a long string + strings.add(reallyLongString.delete((32 * 1024) + 5, reallyLongString.capacity() - 1).toString()); + strings.add(reallyLongString.append('\u00DC').toString()); // add high order char to end of mid length string + strings.add(reallyLongString.delete((16 * 1024) + 5, reallyLongString.capacity() - 1).toString()); + strings.add(reallyLongString.append('\u00DD').toString()); // add high order char to end of mid length string + strings.add("e\u00E4t \u03A3 \u03C0 \u798F, it is good"); // need to test some multi-byte characters + + for (int i = 1; i < 0xd800; i += STEP) { + int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP; + char[] testChars = new char[count]; + for (int j = 0; j < count; j++) { + testChars[j] = (char) (i + j); + } + + strings.add(new String(testChars)); + } + + for (int i = 0xe000; i < 0x10000; i += STEP) { + int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP; + char[] testChars = new char[count]; + for (int j = 0; j < count; j++) { + testChars[j] = (char) (i + j); + } + + strings.add(new String(testChars)); + } + + for (int i = 0x10000; i < 0x110000; i += STEP) { + int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP; + char[] testChars = new char[count * 2]; + for (int j = 0; j < count; j++) { + testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10)); + testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff)); + } + + strings.add(new String(testChars)); + } + + final List data = new ArrayList<>(strings.size() * 2); + for (String string : strings) { + String shortString = string; + if (shortString != null && shortString.length() > 1000) { + shortString = shortString.substring(0, 100) + "...(" + string.length() + " chars)"; + } + data.add(new Object[]{Encoding.getDatabaseEncoding("UNICODE"), string, shortString}); + } + return data; + } + + @MethodSource("data") + @ParameterizedTest(name = "string={2}, encoding={0}") + void test(Encoding encoding, String string, String shortString) throws Exception { + final byte[] encoded = encoding.encode(string); + assertEquals(string, encoding.decode(encoded)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/v3/V3ParameterListTests.java b/pgjdbc/src/test/java/org/postgresql/core/v3/V3ParameterListTests.java new file mode 100644 index 0000000..e26d5e4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/v3/V3ParameterListTests.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; + +/** + * Test cases to make sure the parameterlist implementation works as expected. + * + * @author Jeremy Whiting jwhiting@redhat.com + * + */ +class V3ParameterListTests { + private TypeTransferModeRegistry transferModeRegistry; + + @BeforeEach + void setUp() throws Exception { + transferModeRegistry = new TypeTransferModeRegistry() { + @Override + public boolean useBinaryForSend(int oid) { + return false; + } + + @Override + public boolean useBinaryForReceive(int oid) { + return false; + } + }; + } + + /** + * Test to check the merging of two collections of parameters. All elements + * are kept. + * + * @throws SQLException + * raised exception if setting parameter fails. + */ + @Test + void mergeOfParameterLists() throws SQLException { + SimpleParameterList s1SPL = new SimpleParameterList(8, transferModeRegistry); + s1SPL.setIntParameter(1, 1); + s1SPL.setIntParameter(2, 2); + s1SPL.setIntParameter(3, 3); + s1SPL.setIntParameter(4, 4); + + SimpleParameterList s2SPL = new SimpleParameterList(4, transferModeRegistry); + s2SPL.setIntParameter(1, 5); + s2SPL.setIntParameter(2, 6); + s2SPL.setIntParameter(3, 7); + s2SPL.setIntParameter(4, 8); + + s1SPL.appendAll(s2SPL); + assertEquals( + "<[('1'::int4) ,('2'::int4) ,('3'::int4) ,('4'::int4) ,('5'::int4) ,('6'::int4) ,('7'::int4) ,('8'::int4)]>", s1SPL.toString(), "Expected string representation of values does not match outcome."); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java b/pgjdbc/src/test/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java new file mode 100644 index 0000000..42e2c2c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java @@ -0,0 +1,1088 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.core.v3.adaptivefetch; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +import org.postgresql.PGProperty; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.SqlCommand; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Field; +import java.sql.SQLException; +import java.util.Map; +import java.util.Properties; + +/** + * Unit tests for AdaptiveFetchCache class. + */ +class AdaptiveFetchCacheTest { + + private AdaptiveFetchCache adaptiveFetchCache; + private int size; + + // Strings containing variables names in AdaptiveFetchCache class + private static final String infoMapVariableName = "adaptiveFetchInfoMap"; + private static final String minimumSizeVariableName = "minimumAdaptiveFetchSize"; + private static final String maximumSizeVariableName = "maximumAdaptiveFetchSize"; + private static final String adaptiveFetchVariableName = "adaptiveFetch"; + private static final String maximumBufferSizeVariableName = "maximumResultBufferSize"; + + /** + * Simple setup to create new AdaptiveFetchCache with buffer size 1000. + */ + @BeforeEach + void setUp() throws SQLException { + Properties properties = new Properties(); + size = 1000; + adaptiveFetchCache = new AdaptiveFetchCache(size, properties); + } + + /** + * Tests for calling constructor with empty properties (just asserts after setUp). + */ + @Test + void constructorDefault() throws NoSuchFieldException, IllegalAccessException { + assertNotNull(getInfoMapVariable()); + assertEquals(size, getMaximumBufferVariable()); + assertFalse(getAdaptiveFetchVariable()); + assertEquals(0, getMinimumSizeVariable()); + assertEquals(-1, getMaximumSizeVariable()); + } + + /** + * Test for calling constructor with information about adaptiveFetch property. + */ + @Test + void constructorWithAdaptiveFetch() + throws SQLException, NoSuchFieldException, IllegalAccessException { + Properties properties = new Properties(); + boolean expectedValue = true; + PGProperty.ADAPTIVE_FETCH.set(properties, expectedValue); + + adaptiveFetchCache = new AdaptiveFetchCache(size, properties); + + assertNotNull(getInfoMapVariable()); + assertEquals(size, getMaximumBufferVariable()); + assertEquals(expectedValue, getAdaptiveFetchVariable()); + assertEquals(0, getMinimumSizeVariable()); + assertEquals(-1, getMaximumSizeVariable()); + } + + /** + * Test for calling constructor with information about adaptiveFetchMinimum property. + */ + @Test + void constructorWithMinimumSize() + throws SQLException, NoSuchFieldException, IllegalAccessException { + Properties properties = new Properties(); + int expectedValue = 100; + PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedValue); + + adaptiveFetchCache = new AdaptiveFetchCache(size, properties); + + assertNotNull(getInfoMapVariable()); + assertEquals(size, getMaximumBufferVariable()); + assertFalse(getAdaptiveFetchVariable()); + assertEquals(expectedValue, getMinimumSizeVariable()); + assertEquals(-1, getMaximumSizeVariable()); + } + + /** + * Test for calling constructor with information about adaptiveFetchMaximum property. + */ + @Test + void constructorWithMaximumSize() + throws SQLException, NoSuchFieldException, IllegalAccessException { + Properties properties = new Properties(); + int expectedValue = 100; + PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedValue); + + adaptiveFetchCache = new AdaptiveFetchCache(size, properties); + + assertNotNull(getInfoMapVariable()); + assertEquals(size, getMaximumBufferVariable()); + assertFalse(getAdaptiveFetchVariable()); + assertEquals(0, getMinimumSizeVariable()); + assertEquals(expectedValue, getMaximumSizeVariable()); + } + + /** + * Test for calling constructor with information about adaptiveFetch, adaptiveFetchMinimum and + * adaptiveFetchMaximum properties. + */ + @Test + void constructorWithAllProperties() + throws SQLException, NoSuchFieldException, IllegalAccessException { + Properties properties = new Properties(); + boolean expectedAdaptiveFetchValue = false; + int expectedMinimumSizeValue = 70; + int expectedMaximumSizeValue = 130; + PGProperty.ADAPTIVE_FETCH.set(properties, expectedAdaptiveFetchValue); + PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedMinimumSizeValue); + PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedMaximumSizeValue); + + adaptiveFetchCache = new AdaptiveFetchCache(size, properties); + + assertNotNull(getInfoMapVariable()); + assertEquals(size, getMaximumBufferVariable()); + assertEquals(expectedAdaptiveFetchValue, getAdaptiveFetchVariable()); + assertEquals(expectedMinimumSizeValue, getMinimumSizeVariable()); + assertEquals(expectedMaximumSizeValue, getMaximumSizeVariable()); + } + + + /** + * Test for calling addNewQuery method. + */ + @Test + void addingSingleQuery() throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = true; + + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + Map map = getInfoMapVariable(); + + assertEquals(1, map.size()); + assertNotNull(map.get(expectedQuery)); + } + + /** + * Test for calling addNewQuery method, but adaptiveFetch is set to false. + */ + @Test + void addingSingleQueryWithoutAdaptiveFetch() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = false; + + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + Map map = getInfoMapVariable(); + + assertEquals(0, map.size()); + assertNull(map.get(expectedQuery)); + } + + /** + * Test for calling addNewQuery method twice with the same query. The query should be added only + * once, with counter set as 2. + */ + @Test + void addingSameQueryTwoTimes() throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = true; + + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + Map map = getInfoMapVariable(); + + assertEquals(1, map.size()); + assertNotNull(map.get(expectedQuery)); + assertEquals(2, map.get(expectedQuery).getCounter()); + } + + /** + * Test for calling addNewQuery method twice with the same query, but with adaptiveFetch is set to + * false. The query shouldn't be added. + */ + @Test + void addingSameQueryTwoTimesWithoutAdaptiveFetch() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = false; + + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + Map map = getInfoMapVariable(); + + assertEquals(0, map.size()); + assertNull(map.get(expectedQuery)); + } + + /** + * Test for calling addNewQuery method twice with different queries. Both queries should be + * added. + */ + @Test + void addingTwoDifferentQueries() throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + boolean adaptiveFetch = true; + + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery2)); + + Map map = getInfoMapVariable(); + + assertEquals(2, map.size()); + assertNotNull(map.get(expectedQuery)); + assertEquals(1, map.get(expectedQuery).getCounter()); + assertNotNull(map.get(expectedQuery2)); + assertEquals(1, map.get(expectedQuery).getCounter()); + } + + /** + * Test for calling addNewQuery method twice with different queries, but adaptiveFetch is set to + * false. Both queries shouldn't be added. + */ + @Test + void addingTwoDifferentQueriesWithoutAdaptiveFetch() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + boolean adaptiveFetch = false; + + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery2)); + + Map map = getInfoMapVariable(); + + assertEquals(0, map.size()); + assertNull(map.get(expectedQuery)); + } + + /** + * Test for calling getAdaptiveFetch method with value true. + */ + @Test + void gettingAdaptiveFetchIfTrue() + throws NoSuchFieldException, IllegalAccessException { + boolean expectedResult = true; + + setAdaptiveFetchVariable(expectedResult); + + assertEquals(expectedResult, adaptiveFetchCache.getAdaptiveFetch()); + } + + /** + * Test for calling getAdaptiveFetch method with value false. + */ + @Test + void gettingAdaptiveFetchIfFalse() + throws NoSuchFieldException, IllegalAccessException { + boolean expectedResult = false; + + setAdaptiveFetchVariable(expectedResult); + + assertEquals(expectedResult, adaptiveFetchCache.getAdaptiveFetch()); + } + + /** + * Test for calling getFetchSizeForQuery method for not existing query. Should return value -1. + */ + @Test + void gettingFetchSizeForNotExistingQuery() { + String expectedQuery = "test-query"; + boolean adaptiveFetch = true; + + int resultSize = adaptiveFetchCache + .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(-1, resultSize); + } + + /** + * Test for calling getFetchSizeForQuery method for not existing query, but adaptiveFetch is set + * to false. Should return value -1. + */ + @Test + void gettingFetchSizeForNotExistingQueryIfAdaptiveFetchFalse() { + String expectedQuery = "test-query"; + boolean adaptiveFetch = false; + + int resultSize = adaptiveFetchCache + .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(-1, resultSize); + } + + /** + * Test for calling getFetchSizeForQuery method for existing query. Should return set fetch size + * for the query. + */ + @Test + void gettingFetchSizeForExistingQuery() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = true; + + Map map = getInfoMapVariable(); + + int expectedSize = 500; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(expectedSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + int resultSize = adaptiveFetchCache + .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(expectedSize, resultSize); + } + + /** + * Test for calling getFetchSizeForQuery method for existing query, but adaptiveFetch is set to + * false. Should return value -1. + */ + @Test + void gettingFetchSizeForExistingQueryIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = false; + + Map map = getInfoMapVariable(); + + int newSize = 500; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(newSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + int resultSize = adaptiveFetchCache + .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(-1, resultSize); + } + + /** + * Test for calling removeQuery method for not existing query. Should nothing happen. + */ + @Test + void removingNotExistingQuery() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = true; + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + Map map = getInfoMapVariable(); + + assertEquals(0, map.size()); + } + + /** + * Test for calling removeQuery method for not existing query, but adaptiveFetch is set false. + * Should nothing happen. + */ + @Test + void removingNotExistingQueryIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = false; + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + Map map = getInfoMapVariable(); + + assertEquals(0, map.size()); + } + + /** + * Test for calling removeQuery method for existing query. The query should be removed from the + * map inside AdaptiveFetchCache. + */ + @Test + void removingExistingQuery() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = true; + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setCounter(1); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + assertEquals(1, map.size()); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(0, map.size()); + assertNull(map.get(expectedQuery)); + } + + /** + * Test for calling removeQuery method for existing query, but adaptiveFetch is set false. The + * query shouldn't be removed. + */ + @Test + void removingExistingQueryIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = false; + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setCounter(1); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + assertEquals(1, map.size()); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(1, map.size()); + assertNotNull(map.get(expectedQuery)); + assertEquals(1, map.get(expectedQuery).getCounter()); + } + + /** + * Test for calling removeQuery method for existing query with counter set to 2. After call, query + * shouldn't be removed, but counter set to 1. After next call, query should be removed. + */ + @Test + void removingExistingQueryWithLargeCounter() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = true; + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setCounter(2); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(1, map.size()); + assertNotNull(map.get(expectedQuery)); + assertEquals(1, map.get(expectedQuery).getCounter()); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(0, map.size()); + assertNull(map.get(expectedQuery)); + } + + /** + * Test for calling removeQuery method for existing query with counter set to 2, but with + * adaptiveFetch set false. After both calls query should be removed and counter shouldn't + * change. + */ + @Test + void removingExistingQueryWithLargeCounterIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query"; + boolean adaptiveFetch = false; + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setCounter(2); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(1, map.size()); + assertNotNull(map.get(expectedQuery)); + assertEquals(2, map.get(expectedQuery).getCounter()); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + assertEquals(1, map.size()); + assertNotNull(map.get(expectedQuery)); + assertEquals(2, map.get(expectedQuery).getCounter()); + } + + /** + * Test for calling removeQuery method for existing query with more queries put in the map. Only + * query used in method call should be removed, other shouldn't change. + */ + @Test + void removingExistingQueryWithMoreQueriesCached() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + String expectedQuery3 = "test-query-3"; + boolean adaptiveFetch = true; + + Map map = getInfoMapVariable(); + + int expectedCounter1 = 1; + int expectedCounter2 = 37; + int expectedCounter3 = 14; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry1 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry1.setCounter(expectedCounter1); + map.put(expectedQuery, adaptiveFetchCacheEntry1); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry2.setCounter(expectedCounter2); + map.put(expectedQuery2, adaptiveFetchCacheEntry2); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry3 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry3.setCounter(expectedCounter3); + map.put(expectedQuery3, adaptiveFetchCacheEntry3); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + AdaptiveFetchCacheEntry resultInfo1 = map.get(expectedQuery); + AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2); + AdaptiveFetchCacheEntry resultInfo3 = map.get(expectedQuery3); + + assertEquals(2, map.size()); + assertNull(resultInfo1); + assertNotNull(resultInfo2); + assertEquals(adaptiveFetchCacheEntry2, resultInfo2); + assertEquals(expectedCounter2, resultInfo2.getCounter()); + assertNotNull(resultInfo3); + assertEquals(adaptiveFetchCacheEntry3, resultInfo3); + assertEquals(expectedCounter3, resultInfo3.getCounter()); + } + + /** + * Test for calling removeQuery method for existing query with more queries put in the map, but + * adaptiveFetch is set false. Queries shouldn't change + */ + @Test + void removingExistingQueryWithMoreQueriesCachedIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + String expectedQuery3 = "test-query-3"; + boolean adaptiveFetch = false; + + Map map = getInfoMapVariable(); + + int expectedCounter1 = 1; + int expectedCounter2 = 37; + int expectedCounter3 = 14; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry1 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry1.setCounter(expectedCounter1); + map.put(expectedQuery, adaptiveFetchCacheEntry1); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry2.setCounter(expectedCounter2); + map.put(expectedQuery2, adaptiveFetchCacheEntry2); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry3 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry3.setCounter(expectedCounter3); + map.put(expectedQuery3, adaptiveFetchCacheEntry3); + + adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery)); + + AdaptiveFetchCacheEntry resultInfo1 = map.get(expectedQuery); + AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2); + AdaptiveFetchCacheEntry resultInfo3 = map.get(expectedQuery3); + + assertEquals(3, map.size()); + assertNotNull(resultInfo1); + assertEquals(adaptiveFetchCacheEntry1, resultInfo1); + assertEquals(expectedCounter1, resultInfo1.getCounter()); + assertNotNull(resultInfo2); + assertEquals(adaptiveFetchCacheEntry2, resultInfo2); + assertEquals(expectedCounter2, resultInfo2.getCounter()); + assertNotNull(resultInfo3); + assertEquals(adaptiveFetchCacheEntry3, resultInfo3); + assertEquals(expectedCounter3, resultInfo3.getCounter()); + } + + /** + * Test for calling setAdaptiveFetch method with true value. + */ + @Test + void settingAdaptiveFetchAsTrue() + throws NoSuchFieldException, IllegalAccessException { + boolean expectedAdaptiveFetch = true; + + adaptiveFetchCache.setAdaptiveFetch(expectedAdaptiveFetch); + + boolean resultAdaptiveFetch = getAdaptiveFetchVariable(); + + assertEquals(expectedAdaptiveFetch, resultAdaptiveFetch); + } + + /** + * Test for calling setAdaptiveFetch method with false value. + */ + @Test + void settingAdaptiveFetchAsFalse() + throws NoSuchFieldException, IllegalAccessException { + boolean expectedAdaptiveFetch = false; + + adaptiveFetchCache.setAdaptiveFetch(expectedAdaptiveFetch); + + boolean resultAdaptiveFetch = getAdaptiveFetchVariable(); + + assertEquals(expectedAdaptiveFetch, resultAdaptiveFetch); + } + + /** + * Test for calling updateQueryFetchSize method. Method should update a value for a query. + */ + @Test + void updatingAdaptiveFetchSize() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + boolean adaptiveFetch = true; + + Map map = getInfoMapVariable(); + + int rowSize = 33; + int startSize = size / rowSize - 15; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + + assertNotNull(resultInfo); + assertEquals(size / rowSize, resultInfo.getSize()); + } + + /** + * Test for calling updateQueryFetchSize method, but adaptiveFetch is set false. Method shouldn't + * update any values. + */ + @Test + void updatingAdaptiveFetchSizeIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + boolean adaptiveFetch = false; + + Map map = getInfoMapVariable(); + + int rowSize = 33; + int startSize = size / rowSize - 15; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + + assertNotNull(resultInfo); + assertEquals(startSize, resultInfo.getSize()); + } + + /** + * Test for calling updateQueryFetchSize method for not existing query. Method shouldn't update + * any values. + */ + @Test + void updatingAdaptiveFetchSizeForNotExistingQuery() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + boolean adaptiveFetch = true; + + Map map = getInfoMapVariable(); + + int rowSize = 33; + int startSize = size / rowSize - 15; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery2, adaptiveFetchCacheEntry); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2); + + assertNull(resultInfo); + assertNotNull(resultInfo2); + assertEquals(adaptiveFetchCacheEntry, resultInfo2); + assertEquals(startSize, resultInfo2.getSize()); + assertEquals(1, map.size()); + } + + /** + * Test for calling updateQueryFetchSize method for not existing query, but adaptiveFetch is set + * false. Method shouldn't update any values. + */ + @Test + void updatingAdaptiveFetchSizeForNotExistingQueryIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + boolean adaptiveFetch = false; + + Map map = getInfoMapVariable(); + + int rowSize = 33; + int startSize = size / rowSize - 15; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery2, adaptiveFetchCacheEntry); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2); + + assertNull(resultInfo); + assertNotNull(resultInfo2); + assertEquals(adaptiveFetchCacheEntry, resultInfo2); + assertEquals(startSize, resultInfo2.getSize()); + assertEquals(1, map.size()); + } + + /** + * Test for calling updateQueryFetchSize method in a situation when there are more queries saved + * in a map. The method should only change value for query used in a call. + */ + @Test + void updatingAdaptiveFetchSizeWithMoreQueriesInMap() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + boolean adaptiveFetch = true; + + Map map = getInfoMapVariable(); + + int rowSize = 33; + int startSize = size / rowSize - 15; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry2.setSize(startSize); + map.put(expectedQuery2, adaptiveFetchCacheEntry2); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2); + + assertNotNull(resultInfo); + assertEquals(adaptiveFetchCacheEntry, resultInfo); + assertEquals(size / rowSize, resultInfo.getSize()); + assertNotNull(resultInfo2); + assertEquals(adaptiveFetchCacheEntry2, resultInfo2); + assertEquals(startSize, resultInfo2.getSize()); + assertEquals(2, map.size()); + } + + /** + * Test for calling updateQueryFetchSize method in a situation when there are more queries saved + * in a map, but adaptiveFetch is set false. The method shouldn't change any values. + */ + @Test + void updatingAdaptiveFetchSizeWithMoreQueriesInMapIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + String expectedQuery2 = "test-query-2"; + boolean adaptiveFetch = false; + + Map map = getInfoMapVariable(); + + int rowSize = 33; + int startSize = size / rowSize - 15; + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry2.setSize(startSize); + map.put(expectedQuery2, adaptiveFetchCacheEntry2); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2); + + assertNotNull(resultInfo); + assertEquals(adaptiveFetchCacheEntry, resultInfo); + assertEquals(startSize, resultInfo.getSize()); + assertNotNull(resultInfo2); + assertEquals(adaptiveFetchCacheEntry2, resultInfo2); + assertEquals(startSize, resultInfo2.getSize()); + assertEquals(2, map.size()); + } + + /** + * Test for calling updateQueryFetchSize method with value to make computed value below minimum + * value. The method should update a query to have value of minimum. + */ + @Test + void updatingAdaptiveFetchSizeWithMinimumSize() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + boolean adaptiveFetch = true; + + int rowSize = size + 1000; + int startSize = 2; + int expectedSize = 10; + + setMinimumSizeVariable(expectedSize); + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + + assertNotNull(resultInfo); + assertEquals(expectedSize, resultInfo.getSize()); + } + + /** + * Test for calling updateQueryFetchSize method with value to make computed value below minimum + * value, but adaptiveFetch is set false. The method shouldn't update size for a query. + */ + @Test + void updatingAdaptiveFetchSizeWithMinimumSizeIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + boolean adaptiveFetch = false; + + int rowSize = size + 1000; + int startSize = 2; + int expectedSize = 10; + + setMinimumSizeVariable(expectedSize); + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + + assertNotNull(resultInfo); + assertEquals(startSize, resultInfo.getSize()); + } + + /** + * Test for calling updateQueryFetchSize method with value to make computed value above maximum + * value. The method should update a query to have value of maximum. + */ + @Test + void updatingAdaptiveFetchSizeWithMaximumSize() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + boolean adaptiveFetch = true; + + int rowSize = 1; + int startSize = 2; + int expectedSize = size / rowSize - 20; + + setMaximumSizeVariable(expectedSize); + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry(); + adaptiveFetchCacheEntry.setSize(startSize); + map.put(expectedQuery, adaptiveFetchCacheEntry); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + + assertNotNull(resultInfo); + assertEquals(expectedSize, resultInfo.getSize()); + } + + /** + * Test for calling updateQueryFetchSize method with value to make computed value below maximum + * value, but adaptiveFetch is set false. The method shouldn't update size for a query. + */ + @Test + void updatingAdaptiveFetchSizeWithMaximumSizeIfAdaptiveFetchFalse() + throws NoSuchFieldException, IllegalAccessException { + String expectedQuery = "test-query-1"; + boolean adaptiveFetch = false; + + int rowSize = 1; + int startSize = 2; + int expectedSize = size / rowSize - 20; + + setMaximumSizeVariable(expectedSize); + + Map map = getInfoMapVariable(); + + AdaptiveFetchCacheEntry adaptiveFetchQueryInfo = new AdaptiveFetchCacheEntry(); + adaptiveFetchQueryInfo.setSize(startSize); + map.put(expectedQuery, adaptiveFetchQueryInfo); + + adaptiveFetchCache + .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize); + + AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery); + + assertNotNull(resultInfo); + assertEquals(startSize, resultInfo.getSize()); + } + + // Here are methods for retrieving values from adaptiveFetchCache without calling methods + + private Map getInfoMapVariable() + throws IllegalAccessException, NoSuchFieldException { + Field field = adaptiveFetchCache.getClass().getDeclaredField(infoMapVariableName); + field.setAccessible(true); + return (Map) field.get(adaptiveFetchCache); + } + + private int getMinimumSizeVariable() throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass().getDeclaredField(minimumSizeVariableName); + field.setAccessible(true); + return (Integer) field.get(adaptiveFetchCache); + } + + private int getMaximumSizeVariable() throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass().getDeclaredField(maximumSizeVariableName); + field.setAccessible(true); + return (Integer) field.get(adaptiveFetchCache); + } + + private boolean getAdaptiveFetchVariable() throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass() + .getDeclaredField(adaptiveFetchVariableName); + field.setAccessible(true); + return (Boolean) field.get(adaptiveFetchCache); + } + + private long getMaximumBufferVariable() throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass() + .getDeclaredField(maximumBufferSizeVariableName); + field.setAccessible(true); + return (Long) field.get(adaptiveFetchCache); + } + + private void setMinimumSizeVariable(int value) + throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass().getDeclaredField(minimumSizeVariableName); + field.setAccessible(true); + field.set(adaptiveFetchCache, value); + } + + private void setMaximumSizeVariable(int value) + throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass() + .getDeclaredField(maximumSizeVariableName); + field.setAccessible(true); + field.set(adaptiveFetchCache, value); + } + + private void setAdaptiveFetchVariable(boolean value) + throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass() + .getDeclaredField(adaptiveFetchVariableName); + field.setAccessible(true); + field.set(adaptiveFetchCache, value); + } + + private void setMaximumBufferVariable(long value) + throws NoSuchFieldException, IllegalAccessException { + Field field = adaptiveFetchCache.getClass() + .getDeclaredField(maximumBufferSizeVariableName); + field.setAccessible(true); + field.set(adaptiveFetchCache, value); + } + + /** + * Class to mock object with Query interface. As AdaptiveFetchCache is using only + * getNativeSql method from Query interface, other shouldn't be called. + */ + private class MockUpQuery implements Query { + + public String sql; + + MockUpQuery(String sql) { + this.sql = sql; + } + + @Override + public ParameterList createParameterList() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public String toString(ParameterList parameters) { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public String getNativeSql() { + return this.sql; + } + + @Override + public SqlCommand getSqlCommand() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public void close() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public boolean isStatementDescribed() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public boolean isEmpty() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public int getBatchSize() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public Map getResultSetColumnNameIndexMap() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + + @Override + public Query[] getSubqueries() { + throw new WrongMethodCallException("Method shouldn't be called."); + } + } + + /** + * An exception used when method shouldn't be called in MockUpQuery class. + */ + private class WrongMethodCallException extends RuntimeException { + + WrongMethodCallException(String msg) { + super(msg); + } + + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/AbstractArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/AbstractArraysTest.java new file mode 100644 index 0000000..3f0f5b2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/AbstractArraysTest.java @@ -0,0 +1,1116 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.PGNotification; +import org.postgresql.copy.CopyManager; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.CachedQuery; +import org.postgresql.core.Encoding; +import org.postgresql.core.QueryExecutor; +import org.postgresql.core.ReplicationProtocol; +import org.postgresql.core.TransactionState; +import org.postgresql.core.TypeInfo; +import org.postgresql.core.Version; +import org.postgresql.fastpath.Fastpath; +import org.postgresql.jdbc.FieldMetadata.Key; +import org.postgresql.largeobject.LargeObjectManager; +import org.postgresql.replication.PGReplicationConnection; +import org.postgresql.util.LruCache; +import org.postgresql.util.PGobject; +import org.postgresql.xml.PGXmlFactoryFactory; + +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.Map; +import java.util.Properties; +import java.util.TimerTask; +import java.util.concurrent.Executor; +import java.util.logging.Logger; + +public abstract class AbstractArraysTest { + + private static final BaseConnection ENCODING_CONNECTION = new EncodingConnection(Encoding.getJVMEncoding("utf-8")); + + private final A[][] testData; + + private final boolean binarySupported; + + private final int arrayTypeOid; + + /** + * + * @param testData + * 3 dimensional array to use for testing. + * @param binarySupported + * Indicates if binary support is expected for the type. + */ + public AbstractArraysTest(A[][] testData, boolean binarySupported, int arrayTypeOid) { + super(); + this.testData = testData; + this.binarySupported = binarySupported; + this.arrayTypeOid = arrayTypeOid; + } + + protected void assertArraysEquals(String message, A expected, Object actual) { + final int expectedLength = Array.getLength(expected); + assertEquals(expectedLength, Array.getLength(actual), message + " size"); + for (int i = 0; i < expectedLength; i++) { + assertEquals(Array.get(expected, i), Array.get(actual, i), message + " value at " + i); + } + } + + @Test + public void binary() throws Exception { + + A data = testData[0][0]; + + ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(data); + + final int defaultArrayTypeOid = support.getDefaultArrayTypeOid(); + + assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid)); + + if (binarySupported) { + + final PgArray pgArray = new PgArray(ENCODING_CONNECTION, defaultArrayTypeOid, + support.toBinaryRepresentation(ENCODING_CONNECTION, data, defaultArrayTypeOid)); + + Object actual = pgArray.getArray(); + + assertArraysEquals("", data, actual); + } + } + + @Test + public void string() throws Exception { + + A data = testData[0][0]; + + ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(data); + + final String arrayString = support.toArrayString(',', data); + + final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString); + + Object actual = pgArray.getArray(); + + assertArraysEquals("", data, actual); + } + + @Test + public void test2dBinary() throws Exception { + + A[] data = testData[0]; + + ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(data); + + final int defaultArrayTypeOid = support.getDefaultArrayTypeOid(); + + assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid)); + + if (binarySupported) { + + final PgArray pgArray = new PgArray(ENCODING_CONNECTION, support.getDefaultArrayTypeOid(), + support.toBinaryRepresentation(ENCODING_CONNECTION, data, defaultArrayTypeOid)); + + Object[] actual = (Object[]) pgArray.getArray(); + + assertEquals(data.length, actual.length); + + for (int i = 0; i < data.length; i++) { + assertArraysEquals("array at position " + i, data[i], actual[i]); + } + } + } + + @Test + public void test2dString() throws Exception { + + final A[] data = testData[0]; + + final ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(data); + + final String arrayString = support.toArrayString(',', data); + + final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString); + + Object[] actual = (Object[]) pgArray.getArray(); + + assertEquals(data.length, actual.length); + + for (int i = 0; i < data.length; i++) { + assertArraysEquals("array at position " + i, data[i], actual[i]); + } + } + + @Test + public void test3dBinary() throws Exception { + + ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(testData); + + final int defaultArrayTypeOid = support.getDefaultArrayTypeOid(); + + assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid)); + + if (binarySupported) { + + final PgArray pgArray = new PgArray(ENCODING_CONNECTION, support.getDefaultArrayTypeOid(), + support.toBinaryRepresentation(ENCODING_CONNECTION, testData, defaultArrayTypeOid)); + + Object[][] actual = (Object[][]) pgArray.getArray(); + + assertEquals(testData.length, actual.length); + + for (int i = 0; i < testData.length; i++) { + assertEquals(testData[i].length, actual[i].length, "array length at " + i); + for (int j = 0; j < testData[i].length; j++) { + assertArraysEquals("array at " + i + ',' + j, testData[i][j], actual[i][j]); + } + } + } + } + + @Test + public void test3dString() throws Exception { + + final ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(testData); + + final String arrayString = support.toArrayString(',', testData); + + final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString); + + Object[][] actual = (Object[][]) pgArray.getArray(); + + assertEquals(testData.length, actual.length); + + for (int i = 0; i < testData.length; i++) { + assertEquals(testData[i].length, actual[i].length, "array length at " + i); + for (int j = 0; j < testData[i].length; j++) { + assertArraysEquals("array at " + i + ',' + j, testData[i][j], actual[i][j]); + } + } + } + + @Test + public void objectArrayCopy() throws Exception { + final Object[] copy = new Object[testData.length]; + for (int i = 0; i < testData.length; i++) { + copy[i] = testData[i]; + } + + final ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(testData); + final String arrayString = support.toArrayString(',', testData); + + final ArrayEncoding.ArrayEncoder copySupport = ArrayEncoding.getArrayEncoder(copy); + final String actual = copySupport.toArrayString(',', copy); + + assertEquals(arrayString, actual); + } + + @Test + public void object2dArrayCopy() throws Exception { + final Object[][] copy = new Object[testData.length][]; + for (int i = 0; i < testData.length; i++) { + copy[i] = testData[i]; + } + + final ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(testData); + final String arrayString = support.toArrayString(',', testData); + + final ArrayEncoding.ArrayEncoder copySupport = ArrayEncoding.getArrayEncoder(copy); + final String actual = copySupport.toArrayString(',', copy); + + assertEquals(arrayString, actual); + } + + @Test + public void object3dArrayCopy() throws Exception { + final A[][][] source = (A[][][]) Array.newInstance(testData.getClass(), 2); + source[0] = testData; + source[1] = testData; + final Object[][][] copy = new Object[][][]{testData, testData}; + + final ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(source); + final String arrayString = support.toArrayString(',', source); + + final ArrayEncoding.ArrayEncoder copySupport = ArrayEncoding.getArrayEncoder(copy); + final String actual = copySupport.toArrayString(',', copy); + + assertEquals(arrayString, actual); + } + + private static final class EncodingConnection implements BaseConnection { + private final Encoding encoding; + private final TypeInfo typeInfo = new TypeInfoCache(this, -1); + + EncodingConnection(Encoding encoding) { + this.encoding = encoding; + } + + /** + * {@inheritDoc} + */ + @Override + public Encoding getEncoding() throws SQLException { + return encoding; + } + + /** + * {@inheritDoc} + */ + @Override + public TypeInfo getTypeInfo() { + return typeInfo; + } + + /** + * {@inheritDoc} + */ + @Override + public void cancelQuery() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultSet execSQLQuery(String s) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void execSQLUpdate(String s) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public QueryExecutor getQueryExecutor() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public ReplicationProtocol getReplicationProtocol() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Object getObject(String type, String value, byte[] byteValue) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean haveMinimumServerVersion(int ver) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean haveMinimumServerVersion(Version ver) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public byte[] encodeString(String str) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String escapeString(String str) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean getStandardConformingStrings() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public TimestampUtils getTimestampUtils() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Logger getLogger() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean getStringVarcharFlag() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public TransactionState getTransactionState() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean binaryTransferSend(int oid) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isColumnSanitiserDisabled() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void addTimerTask(TimerTask timerTask, long milliSeconds) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void purgeTimerTasks() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public LruCache getFieldMetadataCache() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, String... columnNames) + throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Statement createStatement() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String nativeSQL(String sql) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean getAutoCommit() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void commit() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void rollback() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void close() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isClosed() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public DatabaseMetaData getMetaData() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isReadOnly() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setCatalog(String catalog) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getCatalog() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setTransactionIsolation(int level) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public int getTransactionIsolation() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public SQLWarning getWarnings() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void clearWarnings() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Map> getTypeMap() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setTypeMap(Map> map) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setHoldability(int holdability) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public int getHoldability() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Savepoint setSavepoint() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Savepoint setSavepoint(String name) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void rollback(Savepoint savepoint) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Clob createClob() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Blob createBlob() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public NClob createNClob() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public SQLXML createSQLXML() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isValid(int timeout) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getClientInfo(String name) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Properties getClientInfo() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public java.sql.Array createArrayOf(String typeName, Object[] elements) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setSchema(String schema) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getSchema() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void abort(Executor executor) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public int getNetworkTimeout() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public T unwrap(Class iface) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public java.sql.Array createArrayOf(String typeName, Object elements) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PGNotification[] getNotifications() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PGNotification[] getNotifications(int timeoutMillis) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public CopyManager getCopyAPI() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public LargeObjectManager getLargeObjectAPI() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Fastpath getFastpathAPI() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void addDataType(String type, String className) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void addDataType(String type, Class klass) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setPrepareThreshold(int threshold) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public int getPrepareThreshold() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setDefaultFetchSize(int fetchSize) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public int getDefaultFetchSize() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public int getBackendPID() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String escapeIdentifier(String identifier) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String escapeLiteral(String literal) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PreferQueryMode getPreferQueryMode() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public AutoSave getAutosave() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void setAutosave(AutoSave autoSave) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PGReplicationConnection getReplicationAPI() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public Map getParameterStatuses() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getParameterStatus(String parameterName) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean hintReadOnly() { + return false; + } + + /** + * {@inheritDoc} + */ + @Override + public void setAdaptiveFetch(boolean adaptiveFetch) { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean getAdaptiveFetch() { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean getLogServerErrorDetail() { + return false; + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTest.java new file mode 100644 index 0000000..5f7e0d0 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTest.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.postgresql.core.Oid; +import org.postgresql.util.PSQLException; + +import org.junit.jupiter.api.Test; + +import java.math.BigDecimal; +import java.sql.SQLFeatureNotSupportedException; + +class ArraysTest { + + @Test + void nonArrayNotSupported() throws Exception { + assertThrows(PSQLException.class, () -> { + ArrayEncoding.getArrayEncoder("asdflkj"); + }); + } + + @Test + void noByteArray() throws Exception { + assertThrows(PSQLException.class, () -> { + ArrayEncoding.getArrayEncoder(new byte[]{}); + }); + } + + @Test + void binaryNotSupported() throws Exception { + assertThrows(SQLFeatureNotSupportedException.class, () -> { + final ArrayEncoding.ArrayEncoder support = ArrayEncoding.getArrayEncoder(new BigDecimal[]{}); + + assertFalse(support.supportBinaryRepresentation(Oid.FLOAT8_ARRAY)); + + support.toBinaryRepresentation(null, new BigDecimal[]{BigDecimal.valueOf(3)}, Oid.FLOAT8_ARRAY); + }); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTestSuite.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTestSuite.java new file mode 100644 index 0000000..0cb8395 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTestSuite.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + BigDecimalObjectArraysTest.class, + BooleanArraysTest.class, + BooleanObjectArraysTest.class, + ByteaArraysTest.class, + DoubleArraysTest.class, + DoubleObjectArraysTest.class, + FloatArraysTest.class, + FloatObjectArraysTest.class, + IntArraysTest.class, + IntegerObjectArraysTest.class, + LongArraysTest.class, + LongObjectArraysTest.class, + ShortArraysTest.class, + ShortObjectArraysTest.class, + StringArraysTest.class, + UUIDArrayTest.class +}) +public class ArraysTestSuite { +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BigDecimalObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BigDecimalObjectArraysTest.java new file mode 100644 index 0000000..61aaf3d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/BigDecimalObjectArraysTest.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static java.math.BigDecimal.valueOf; + +import org.postgresql.core.Oid; + +import java.math.BigDecimal; + +public class BigDecimalObjectArraysTest extends AbstractArraysTest { + + private static final BigDecimal[][][] doubles = new BigDecimal[][][]{ + {{valueOf(1.3), valueOf(2.4), valueOf(3.1), valueOf(4.2)}, + {valueOf(5D), valueOf(6D), valueOf(7D), valueOf(8D)}, + {valueOf(9D), valueOf(10D), valueOf(11D), valueOf(12D)}}, + {{valueOf(13D), valueOf(14D), valueOf(15D), valueOf(16D)}, {valueOf(17D), valueOf(18D), valueOf(19D), null}, + {valueOf(21D), valueOf(22D), valueOf(23D), valueOf(24D)}}}; + + public BigDecimalObjectArraysTest() { + super(doubles, false, Oid.NUMERIC_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BitFieldTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BitFieldTest.java new file mode 100644 index 0000000..d8b5b26 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/BitFieldTest.java @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PGobject; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class BitFieldTest extends BaseTest4 { + + private static class TestData { + private final String bitValue; + private final String tableName; + private final String tableFields; + private final boolean isVarBit; + + TestData(String bitValue, String tableName, String tableFields, boolean isVarBit) { + this.bitValue = bitValue; + this.tableName = tableName; + this.tableFields = tableFields; + this.isVarBit = isVarBit; + } + + public String getBitValue() { + return bitValue; + } + + public String getTableName() { + return tableName; + } + + public String getTableFields() { + return tableFields; + } + + public boolean getIsVarBit() { + return isVarBit; + } + } + + private static final String fieldName = "field_bit"; + public static final String testBitValue = "0101010100101010101010100101"; + private static final TestData[] testBitValues = new TestData[]{ + new TestData("0", "test_bit_field_0a", fieldName + " bit", false), + new TestData("0", "test_bit_field_0b", fieldName + " bit(1)", false), + new TestData("1", "test_bit_field_1a", fieldName + " bit", false), + new TestData("1", "test_bit_field_1b", fieldName + " bit(1)", false), + new TestData(testBitValue, "test_bit_field_gt1_1", String.format("%s bit(%d)", fieldName, + testBitValue.length()), false), + new TestData(testBitValue, "test_varbit_field_gt1_1", String.format("%s varbit(%d)", fieldName, + testBitValue.length()), true), + new TestData("1", "test_varbit_field_1", String.format("%s varbit(1)", fieldName), true), + new TestData("0", "test_varbit_field_0", String.format("%s varbit(1)", fieldName), true) + }; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + con = TestUtil.openDB(); + Statement stmt = con.createStatement(); + for (TestData testData : testBitValues) { + TestUtil.createTempTable(con, testData.getTableName(), testData.getTableFields()); + stmt.execute(String.format("INSERT INTO %s values(b'%s')", testData.getTableName(), + testData.getBitValue())); + } + } + + @After + public void tearDown() throws SQLException { + Statement stmt = con.createStatement(); + for (TestData testData : testBitValues) { + stmt.execute(String.format("DROP TABLE %s", testData.getTableName())); + } + stmt.close(); + TestUtil.closeDB(con); + } + + @Test + public void TestGetObjectForBitFields() throws SQLException { + // Start from 1 to skip the first testBit value + for (TestData testData : testBitValues) { + PreparedStatement pstmt = con.prepareStatement(String.format("SELECT field_bit FROM %s " + + "limit 1", testData.getTableName())); + checkBitFieldValue(pstmt, testData.getBitValue(), testData.getIsVarBit()); + pstmt.close(); + } + } + + @Test + public void TestSetBitParameter() throws SQLException { + for (TestData testData : testBitValues) { + PreparedStatement pstmt = con.prepareStatement( + String.format("SELECT field_bit FROM %s where ", testData.getTableName()) + + "field_bit = ?"); + PGobject param = new PGobject(); + param.setValue(testData.getBitValue()); + param.setType(testData.getIsVarBit() ? "varbit" : "bit"); + pstmt.setObject(1, param); + checkBitFieldValue(pstmt, testData.getBitValue(), testData.getIsVarBit()); + pstmt.close(); + } + } + + private void checkBitFieldValue(PreparedStatement pstmt, String bitValue, boolean isVarBit) throws SQLException { + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Object o = rs.getObject(1); + if (bitValue.length() == 1 && !isVarBit) { + Assert.assertTrue("Failed for " + bitValue, o instanceof java.lang.Boolean); + Boolean b = (Boolean) o; + Assert.assertEquals("Failed for " + bitValue, bitValue.charAt(0) == '1', b); + } else { + Assert.assertTrue("Failed for " + bitValue, o instanceof PGobject); + PGobject pGobject = (PGobject) o; + Assert.assertEquals("Failed for " + bitValue, bitValue, pGobject.getValue()); + } + String s = rs.getString(1); + Assert.assertEquals(bitValue, s); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanArraysTest.java new file mode 100644 index 0000000..cc90451 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanArraysTest.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class BooleanArraysTest extends AbstractArraysTest { + private static final boolean[][][] booleans = new boolean[][][]{ + {{true, false, false, true}, {false, false, true, true}, {true, true, false, false}}, + {{false, true, true, false}, {true, false, true, false}, {false, true, false, true}}}; + + public BooleanArraysTest() { + super(booleans, true, Oid.BOOL_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanObjectArraysTest.java new file mode 100644 index 0000000..9357f09 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanObjectArraysTest.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class BooleanObjectArraysTest extends AbstractArraysTest { + private static final Boolean[][][] booleans = new Boolean[][][]{ + {{true, false, null, true}, {false, false, true, true}, {true, true, false, false}}, + {{false, true, true, false}, {true, false, true, null}, {false, true, false, true}}}; + + public BooleanObjectArraysTest() { + super(booleans, true, Oid.BOOL_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ByteaArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ByteaArraysTest.java new file mode 100644 index 0000000..b777a11 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ByteaArraysTest.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.core.Oid; + +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Array; + +public class ByteaArraysTest extends AbstractArraysTest { + + private static final byte[][][][] longs = new byte[][][][]{ + {{{0x1, 0x23, (byte) 0xDF, 0x43}, {0x5, 0x6, 0x7, (byte) 0xFF}, null, {0x9, 0x10, 0x11, 0x12}}, + {null, {0x13, 0x14, 0x15, 0x16}, {0x17, 0x18, (byte) 0xFF, 0x20}, {0x1, 0x2, (byte) 0xFF, 0x4F}}, + {{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, + {0x1, 0x2, (byte) 0xFF, 0x4}}}, + {{{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, + {0x1, 0x2, (byte) 0xFE, 0x4}}, + {{0x1, 0x2, (byte) 0xCD, 0x4}, {0x1, 0x73, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, + {0x1, 0x2, (byte) 0xFF, 0x4}}, + {{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFE, 0x10}, {0x1, 0x2, (byte) 0xFF, 0x4}, + {0x1, 0x2, (byte) 0xFF, 0x4}}}}; + + public ByteaArraysTest() { + super(longs, true, Oid.BYTEA_ARRAY); + } + + /** + * {@inheritDoc} + */ + @Override + protected void assertArraysEquals(String message, byte[][] expected, Object actual) { + final int expectedLength = Array.getLength(expected); + assertEquals(expectedLength, Array.getLength(actual), message + " size"); + for (int i = 0; i < expectedLength; i++) { + assertArrayEquals(expected[i], (byte[]) Array.get(actual, i), message + " value at " + i); + } + } + + @Test + void objectArrayWrapper() throws Exception { + final Object[] array = new Object[]{new byte[]{0x1, 0x2, (byte) 0xFF, 0x4}, new byte[]{0x5, 0x6, 0x7, (byte) 0xFF}}; + + final ArrayEncoding.ArrayEncoder copySupport = ArrayEncoding.getArrayEncoder(array); + try { + copySupport.toArrayString(',', array); + fail("byte[] in Object[] should not be supported"); + } catch (UnsupportedOperationException e) { + assertEquals("byte[] nested inside Object[]", e.getMessage()); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ConnectionValidTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ConnectionValidTest.java new file mode 100644 index 0000000..8709b3b --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ConnectionValidTest.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.io.InputStream; +import java.io.OutputStream; +import java.net.ServerSocket; +import java.net.Socket; +import java.sql.Connection; +import java.util.Properties; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +@DisabledIfServerVersionBelow("9.4") +class ConnectionValidTest { + private static final int LOCAL_SHADOW_PORT = 9009; + + private Connection connection; + + private ConnectionBreaker connectionBreaker; + + @BeforeEach + void setUp() throws Exception { + final Properties shadowProperties = new Properties(); + shadowProperties.setProperty(TestUtil.SERVER_HOST_PORT_PROP, + String.format("%s:%s", "localhost", LOCAL_SHADOW_PORT)); + + connectionBreaker = new ConnectionBreaker(LOCAL_SHADOW_PORT, + TestUtil.getServer(), + TestUtil.getPort()); + connectionBreaker.acceptAsyncConnection(); + connection = TestUtil.openDB(shadowProperties); + } + + @AfterEach + void tearDown() throws Exception { + connectionBreaker.close(); + connection.close(); + } + + /** + * Tests if a connection is valid within 5 seconds. + * @throws Exception if a database exception occurs. + */ + @Test + @Timeout(30) + void isValid() throws Exception { + connectionBreaker.breakConnection(); + boolean result = connection.isValid(5); + + assertThat("Is connection valid?", + result, + equalTo(false) + ); + } + + private static final class ConnectionBreaker { + + private final ExecutorService workers; + + private final ServerSocket internalServer; + + private final Socket pgSocket; + + private boolean breakConnection; + + /** + * Constructor of the forwarder for the PostgreSQL server. + * + * @param serverPort The forwarder server port. + * @param pgServer The PostgreSQL server address. + * @param pgPort The PostgreSQL server port. + * @throws Exception if anything goes wrong binding the server. + */ + ConnectionBreaker(final int serverPort, final String pgServer, + final int pgPort) throws Exception { + workers = Executors.newCachedThreadPool(); + internalServer = new ServerSocket(serverPort); + pgSocket = new Socket(pgServer, pgPort); + breakConnection = false; + } + + /** + * Starts to accept a asynchronous connection. + * + * @throws Exception if something goes wrong with the sockets. + */ + public void acceptAsyncConnection() throws Exception { + final InputStream pgServerInputStream = pgSocket.getInputStream(); + final OutputStream pgServerOutputStream = pgSocket.getOutputStream(); + + // Future socket; + final Future futureConnection = workers.submit(internalServer::accept); + + // Forward reads; + workers.submit(() -> { + while (!breakConnection) { + final Socket conn = futureConnection.get(); + int read = pgServerInputStream.read(); + conn.getOutputStream().write(read); + } + return null; + }); + + // Forwards writes; + workers.submit(() -> { + while (!breakConnection) { + final Socket conn = futureConnection.get(); + int read = conn.getInputStream().read(); + pgServerOutputStream.write(read); + } + return null; + }); + } + + /** + * Breaks the forwarding. + */ + public void breakConnection() { + this.breakConnection = true; + } + + /** + * Closes the sockets. + */ + public void close() throws Exception { + this.workers.shutdown(); + this.workers.awaitTermination(5, TimeUnit.SECONDS); + this.internalServer.close(); + this.pgSocket.close(); + } + + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/DeepBatchedInsertStatementTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/DeepBatchedInsertStatementTest.java new file mode 100644 index 0000000..d7d0207 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/DeepBatchedInsertStatementTest.java @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.PGProperty; +import org.postgresql.core.ParameterList; +import org.postgresql.core.Query; +import org.postgresql.core.v3.BatchedQuery; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.test.jdbc2.BatchExecuteTest; + +import org.junit.Test; + +import java.lang.reflect.Method; +import java.sql.Date; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Properties; + +/** + * This object tests the internals of the BatchedStatementDecorator during + * execution. Rather than rely on testing at the jdbc api layer. + * on. + */ +public class DeepBatchedInsertStatementTest extends BaseTest4 { + + /* + * Set up the fixture for this testcase: a connection to a database with a + * table for this test. + */ + @Override + public void setUp() throws Exception { + super.setUp(); + Statement stmt = con.createStatement(); + + /* + * Drop the test table if it already exists for some reason. It is not an + * error if it doesn't exist. + */ + TestUtil.createTable(con, "testbatch", "pk INTEGER, col1 INTEGER"); + TestUtil.createTable(con, "testunspecified", "pk INTEGER, bday TIMESTAMP"); + + stmt.executeUpdate("INSERT INTO testbatch VALUES (1, 0)"); + stmt.close(); + + /* + * Generally recommended with batch updates. By default we run all tests in + * this test case with autoCommit disabled. + */ + con.setAutoCommit(false); + } + + // Tear down the fixture for this test case. + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "testbatch"); + TestUtil.dropTable(con, "testunspecified"); + super.tearDown(); + } + + @Override + protected void updateProperties(Properties props) { + PGProperty.REWRITE_BATCHED_INSERTS.set(props, true); + forceBinary(props); + } + + @Test + public void testDeepInternalsBatchedQueryDecorator() throws Exception { + PgPreparedStatement pstmt = null; + try { + pstmt = (PgPreparedStatement) con.prepareStatement("INSERT INTO testbatch VALUES (?,?)"); + + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); // initial pass + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch();// preparedQuery should be wrapped + + BatchedQuery[] bqds; + bqds = transformBQD(pstmt); + assertEquals(2, getBatchSize(bqds)); + + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + + bqds = transformBQD(pstmt); + assertEquals(3, getBatchSize(bqds)); + + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + bqds = transformBQD(pstmt); + + assertEquals(0, getBatchSize(bqds)); + + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + + bqds = transformBQD(pstmt); + assertEquals(1, getBatchSize(bqds)); + + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(2, getBatchSize(bqds)); + + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(3, getBatchSize(bqds)); + + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(1, getBatchSize(bqds)); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(2, getBatchSize(bqds)); + + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(3, getBatchSize(bqds)); + + pstmt.setInt(1, 7); + pstmt.setInt(2, 8); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(4, getBatchSize(bqds)); + + BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch()); + + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(1, getBatchSize(bqds)); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(2, getBatchSize(bqds)); + + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(3, getBatchSize(bqds)); + + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(1, getBatchSize(bqds)); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(2, getBatchSize(bqds)); + + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(3, getBatchSize(bqds)); + + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(1, getBatchSize(bqds)); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + bqds = transformBQD(pstmt); + assertEquals(2, getBatchSize(bqds)); + + BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * + */ + @Test + public void testUnspecifiedParameterType() throws Exception { + PgPreparedStatement pstmt = null; + try { + pstmt = (PgPreparedStatement) con + .prepareStatement("INSERT INTO testunspecified VALUES (?,?)"); + + pstmt.setInt(1, 1); + pstmt.setDate(2, new Date(1)); + pstmt.addBatch(); + + pstmt.setInt(1, 2); + pstmt.setDate(2, new Date(2)); + pstmt.addBatch(); + + BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch()); + + pstmt.setInt(1, 1); + pstmt.setDate(2, new Date(3)); + pstmt.addBatch(); + pstmt.setInt(1, 2); + pstmt.setDate(2, new Date(4)); + pstmt.addBatch(); + + BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test to check the statement can provide the necessary number of prepared + * type fields. This is after running with a batch size of 1. + */ + @Test + public void testVaryingTypeCounts() throws SQLException { + PgPreparedStatement pstmt = null; + try { + pstmt = (PgPreparedStatement) con.prepareStatement("INSERT INTO testunspecified VALUES (?,?)"); + pstmt.setInt(1, 1); + pstmt.setDate(2, new Date(1)); + pstmt.addBatch(); + + BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch()); + pstmt.setInt(1, 1); + pstmt.setDate(2, new Date(2)); + pstmt.addBatch(); + pstmt.setInt(1, 2); + pstmt.setDate(2, new Date(3)); + pstmt.addBatch(); + + pstmt.setInt(1, 3); + pstmt.setDate(2, new Date(4)); + pstmt.addBatch(); + pstmt.setInt(1, 4); + pstmt.setDate(2, new Date(5)); + pstmt.addBatch(); + + BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * This method triggers the transformation of single batches to multi batches. + * + * @param ps PgPreparedStatement statement that will contain the field + * @return BatchedQueryDecorator[] queries after conversion + * @throws Exception fault raised when the field cannot be accessed + */ + private BatchedQuery[] transformBQD(PgPreparedStatement ps) throws Exception { + // We store collections that get replace on the statement + ArrayList batchStatements = ps.batchStatements; + ArrayList batchParameters = ps.batchParameters; + ps.transformQueriesAndParameters(); + BatchedQuery[] bqds = ps.batchStatements.toArray(new BatchedQuery[0]); + // Restore collections on the statement. + ps.batchStatements = batchStatements; + ps.batchParameters = batchParameters; + return bqds; + } + + /** + * Get the total batch size of multi batches. + * + * @param bqds the converted queries + * @return the total batch size + */ + private int getBatchSize(BatchedQuery[] bqds) { + int total = 0; + for (BatchedQuery bqd : bqds) { + total += bqd.getBatchSize(); + } + return total; + } + + /** + * Access the encoded statement name field. + * Again using reflection to gain access to a private field member + * @param bqd BatchedQueryDecorator object on which field is present + * @return byte[] array of bytes that represent the statement name + * when encoded + * @throws Exception fault raised if access to field not possible + */ + private byte[] getEncodedStatementName(BatchedQuery bqd) + throws Exception { + Class clazz = Class.forName("org.postgresql.core.v3.SimpleQuery"); + Method mESN = clazz.getDeclaredMethod("getEncodedStatementName"); + mESN.setAccessible(true); + return (byte[]) mESN.invoke(bqd); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleArraysTest.java new file mode 100644 index 0000000..3734d1e --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleArraysTest.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class DoubleArraysTest extends AbstractArraysTest { + + private static final double[][][] doubles = new double[][][]{ + {{1.2, 2.3, 3.7, 4.9}, {5, 6, 7, 8}, {9, 10, 11, 12}}, + {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}; + + public DoubleArraysTest() { + super(doubles, true, Oid.FLOAT8_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleObjectArraysTest.java new file mode 100644 index 0000000..854e7b2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleObjectArraysTest.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class DoubleObjectArraysTest extends AbstractArraysTest { + + private static final Double[][][] doubles = new Double[][][]{ + {{1.3, 2.4, 3.1, 4.2}, {5D, 6D, 7D, 8D}, {9D, 10D, 11D, 12D}}, + {{13D, 14D, 15D, 16D}, {17D, 18D, 19D, null}, {21D, 22D, 23D, 24D}}}; + + public DoubleObjectArraysTest() { + super(doubles, true, Oid.FLOAT8_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/FloatArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/FloatArraysTest.java new file mode 100644 index 0000000..fd23a27 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/FloatArraysTest.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class FloatArraysTest extends AbstractArraysTest { + + private static final float[][][] floats = new float[][][]{ + {{1.2f, 2.3f, 3.7f, 4.9f}, {5, 6, 7, 8}, {9, 10, 11, 12}}, + {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}; + + public FloatArraysTest() { + super(floats, true, Oid.FLOAT4_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/FloatObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/FloatObjectArraysTest.java new file mode 100644 index 0000000..50f08b9 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/FloatObjectArraysTest.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class FloatObjectArraysTest extends AbstractArraysTest { + + private static final Float[][][] floats = new Float[][][]{ + {{1.3f, 2.4f, 3.1f, 4.2f}, {5f, 6f, 7f, 8f}, {9f, 10f, 11f, 12f}}, + {{13f, 14f, 15f, 16f}, {17f, 18f, 19f, null}, {21f, 22f, 23f, 24f}}}; + + public FloatObjectArraysTest() { + super(floats, true, Oid.FLOAT4_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/IntArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/IntArraysTest.java new file mode 100644 index 0000000..82ab376 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/IntArraysTest.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class IntArraysTest extends AbstractArraysTest { + + private static final int[][][] ints = new int[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, + {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}; + + public IntArraysTest() { + super(ints, true, Oid.INT4_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/IntegerObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/IntegerObjectArraysTest.java new file mode 100644 index 0000000..792e50f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/IntegerObjectArraysTest.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class IntegerObjectArraysTest extends AbstractArraysTest { + + private static final Integer[][][] ints = new Integer[][][]{ + {{1, 2, 3, 4}, {5, null, 7, 8}, {9, 10, 11, 12}}, + {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}; + + public IntegerObjectArraysTest() { + super(ints, true, Oid.INT4_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/LargeObjectManagerTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/LargeObjectManagerTest.java new file mode 100644 index 0000000..7c506b9 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/LargeObjectManagerTest.java @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.largeobject.LargeObject; +import org.postgresql.largeobject.LargeObjectManager; +import org.postgresql.test.TestUtil; +import org.postgresql.test.util.StrangeInputStream; +import org.postgresql.test.util.StrangeOutputStream; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.MessageDigest; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +class LargeObjectManagerTest { + + /* + * It is possible for PostgreSQL to send a ParameterStatus message after an ErrorResponse + * Receiving such a message should not lead to an invalid connection state + * See https://github.com/pgjdbc/pgjdbc/issues/2237 + */ + @Test + void openWithErrorAndSubsequentParameterStatusMessageShouldLeaveConnectionInUsableStateAndUpdateParameterStatus() throws Exception { + try (PgConnection con = (PgConnection) TestUtil.openDB()) { + Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)); + con.setAutoCommit(false); + String originalApplicationName = con.getParameterStatus("application_name"); + try (Statement statement = con.createStatement()) { + statement.execute("begin;"); + // Set transaction application_name to trigger ParameterStatus message after error + // https://www.postgresql.org/docs/14/protocol-flow.html#PROTOCOL-ASYNC + String updatedApplicationName = "LargeObjectManagerTest-application-name"; + statement.execute("set application_name to '" + updatedApplicationName + "'"); + + LargeObjectManager loManager = con.getLargeObjectAPI(); + try { + loManager.open(0, false); + fail("Succeeded in opening a nonexistent large object"); + } catch (PSQLException e) { + assertEquals(PSQLState.UNDEFINED_OBJECT.getState(), e.getSQLState()); + } + + // Should be reset to original application name + assertEquals(originalApplicationName, con.getParameterStatus("application_name")); + } + } + } + + + /** + * Writes data into a large object and reads it back. + * The verifications are: + * 1) input size should match the output size + * 2) input checksum should match the output checksum + */ + @Test + void objectWriteThenRead() throws Throwable { + try (PgConnection con = (PgConnection) TestUtil.openDB()) { + // LO is not supported in auto-commit mode + con.setAutoCommit(false); + LargeObjectManager lom = con.unwrap(PGConnection.class).getLargeObjectAPI(); + MessageDigest md = MessageDigest.getInstance("SHA-256"); + long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10); + for (int i = 0; i < 100000 && System.currentTimeMillis() < deadline; i++) { + long seed = ThreadLocalRandom.current().nextLong(); + objectWriteThenRead(lom, seed, md); + // Creating too many large objects in a single transaction might lead to "ERROR: out of shared memory" + if (i % 1000 == 0) { + con.commit(); + } + } + } + } + + private final byte[][] buffers = new byte[][]{new byte[1024], new byte[8192], new byte[128 * 1024]}; + + private void objectWriteThenRead(LargeObjectManager lom, long seed, MessageDigest md) throws SQLException, IOException { + long loId = lom.createLO(); + try (LargeObject lo = lom.open(loId)) { + Random rnd = new Random(seed); + int expectedLength = rnd.nextInt(1000000); + // Write data to the stream + // We do not use try-with-resources as closing the output stream would close the large object + OutputStream os = lo.getOutputStream(); + { + byte[] buf = new byte[Math.min(256 * 1024, expectedLength)]; + // Do not use try-with-resources to avoid closing the large object + StrangeOutputStream fs = new StrangeOutputStream(os, rnd.nextLong(), 0.1); + { + int len = expectedLength; + while (len > 0) { + int writeSize = Math.min(buf.length, len); + rnd.nextBytes(buf); + md.update(buf, 0, writeSize); + fs.write(buf, 0, writeSize); + len -= writeSize; + } + fs.flush(); + } + } + // Verify the size of the resulting blob + assertEquals(expectedLength, lo.tell(), "Lob position after writing the data"); + + // Rewing the position to the beginning + // Ideally, .getInputStream should start reading from the beginning, however, it is not the + // case yet + lo.seek(0); + + // Read out the data and verify its contents + byte[] expectedChecksum = md.digest(); + md.reset(); + int actualLength = 0; + // Do not use try-with-resources to avoid closing the large object + InputStream is = lo.getInputStream(); + { + try (StrangeInputStream fs = new StrangeInputStream(is, rnd.nextLong())) { + while (true) { + int bufferIndex = rnd.nextInt(buffers.length); + byte[] buf = buffers[bufferIndex]; + int read = fs.read(buf); + if (read == -1) { + break; + } + actualLength += read; + md.update(buf, 0, read); + } + } + byte[] actualChecksum = md.digest(); + if (!Arrays.equals(expectedChecksum, actualChecksum)) { + fail("Checksum of the input and output streams mismatch." + + " Input actualLength: " + expectedLength + + ", output actualLength: " + actualLength + + ", test seed: " + seed + + ", large object id: " + loId + ); + } + } + } catch (Throwable t) { + String message = "Test seed is " + seed; + t.addSuppressed(new Throwable(message) { + @Override + public Throwable fillInStackTrace() { + return this; + } + }); + throw t; + } finally { + lom.delete(loId); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/LongArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/LongArraysTest.java new file mode 100644 index 0000000..dcc39f2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/LongArraysTest.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class LongArraysTest extends AbstractArraysTest { + + private static final long[][][] longs = new long[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, + {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}; + + public LongArraysTest() { + super(longs, true, Oid.INT8_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/LongObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/LongObjectArraysTest.java new file mode 100644 index 0000000..421cdd8 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/LongObjectArraysTest.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class LongObjectArraysTest extends AbstractArraysTest { + + private static final Long[][][] longs = new Long[][][]{ + {{1L, 2L, null, 4L}, {5L, 6L, 7L, 8L}, {9L, 10L, 11L, 12L}}, + {{13L, 14L, 15L, 16L}, {17L, 18L, 19L, 20L}, {21L, 22L, 23L, 24L}}}; + + public LongObjectArraysTest() { + super(longs, true, Oid.INT8_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/NoColumnMetadataIssue1613Test.java b/pgjdbc/src/test/java/org/postgresql/jdbc/NoColumnMetadataIssue1613Test.java new file mode 100644 index 0000000..ea3c69b --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/NoColumnMetadataIssue1613Test.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.Assert.assertTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Before; +import org.junit.Test; + +import java.sql.ResultSet; +import java.sql.Statement; + +/** + * If the SQL query has no column metadata, the driver shouldn't break by a null pointer exception. + * It should return the result correctly. + * + * @author Ivy (ivyyiyideng@gmail.com) + * + */ +public class NoColumnMetadataIssue1613Test extends BaseTest4 { + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTempTable(con, "test_no_column_metadata", "id int"); + } + + @Test + public void shouldBeNoNPE() throws Exception { + Statement statement = con.createStatement(); + statement.execute("INSERT INTO test_no_column_metadata values (1)"); + ResultSet rs = statement.executeQuery("SELECT x FROM test_no_column_metadata x"); + assertTrue(rs.next()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ParameterInjectionTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ParameterInjectionTest.java new file mode 100644 index 0000000..10c0af3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ParameterInjectionTest.java @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2024, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.Test; + +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +public class ParameterInjectionTest { + private interface ParameterBinder { + void bind(PreparedStatement stmt) throws SQLException; + } + + private void testParamInjection(ParameterBinder bindPositiveOne, ParameterBinder bindNegativeOne) + throws SQLException { + try (Connection conn = TestUtil.openDB()) { + { + PreparedStatement stmt = conn.prepareStatement("SELECT -?"); + bindPositiveOne.bind(stmt); + try (ResultSet rs = stmt.executeQuery()) { + assertTrue(rs.next()); + assertEquals(1, rs.getMetaData().getColumnCount(), + "number of result columns must match"); + int value = rs.getInt(1); + assertEquals(-1, value); + } + bindNegativeOne.bind(stmt); + try (ResultSet rs = stmt.executeQuery()) { + assertTrue(rs.next()); + assertEquals(1, rs.getMetaData().getColumnCount(), + "number of result columns must match"); + int value = rs.getInt(1); + assertEquals(1, value); + } + } + { + PreparedStatement stmt = conn.prepareStatement("SELECT -?, ?"); + bindPositiveOne.bind(stmt); + stmt.setString(2, "\nWHERE false --"); + try (ResultSet rs = stmt.executeQuery()) { + assertTrue(rs.next(), "ResultSet should contain a row"); + assertEquals(2, rs.getMetaData().getColumnCount(), + "rs.getMetaData().getColumnCount("); + int value = rs.getInt(1); + assertEquals(-1, value); + } + + bindNegativeOne.bind(stmt); + stmt.setString(2, "\nWHERE false --"); + try (ResultSet rs = stmt.executeQuery()) { + assertTrue(rs.next(), "ResultSet should contain a row"); + assertEquals(2, rs.getMetaData().getColumnCount(), "rs.getMetaData().getColumnCount("); + int value = rs.getInt(1); + assertEquals(1, value); + } + + } + } + } + + @Test + public void handleInt2() throws SQLException { + testParamInjection( + stmt -> { + stmt.setShort(1, (short) 1); + }, + stmt -> { + stmt.setShort(1, (short) -1); + } + ); + } + + @Test + public void handleInt4() throws SQLException { + testParamInjection( + stmt -> { + stmt.setInt(1, 1); + }, + stmt -> { + stmt.setInt(1, -1); + } + ); + } + + @Test + public void handleBigInt() throws SQLException { + testParamInjection( + stmt -> { + stmt.setLong(1, (long) 1); + }, + stmt -> { + stmt.setLong(1, (long) -1); + } + ); + } + + @Test + public void handleNumeric() throws SQLException { + testParamInjection( + stmt -> { + stmt.setBigDecimal(1, new BigDecimal("1")); + }, + stmt -> { + stmt.setBigDecimal(1, new BigDecimal("-1")); + } + ); + } + + @Test + public void handleFloat() throws SQLException { + testParamInjection( + stmt -> { + stmt.setFloat(1, 1); + }, + stmt -> { + stmt.setFloat(1, -1); + } + ); + } + + @Test + public void handleDouble() throws SQLException { + testParamInjection( + stmt -> { + stmt.setDouble(1, 1); + }, + stmt -> { + stmt.setDouble(1, -1); + } + ); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/PgSQLXMLTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/PgSQLXMLTest.java new file mode 100644 index 0000000..180cb5c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/PgSQLXMLTest.java @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.postgresql.PGProperty; +import org.postgresql.core.BaseConnection; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Before; +import org.junit.Test; + +import java.io.StringWriter; +import java.io.Writer; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Statement; +import java.util.Properties; + +import javax.xml.stream.XMLStreamException; +import javax.xml.stream.XMLStreamReader; +import javax.xml.transform.Source; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.sax.SAXSource; +import javax.xml.transform.stax.StAXSource; +import javax.xml.transform.stream.StreamResult; + +public class PgSQLXMLTest extends BaseTest4 { + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTempTable(con, "xmltab", "x xml"); + } + + @Test + public void setCharacterStream() throws Exception { + String example = "value"; + SQLXML pgSQLXML = con.createSQLXML(); + Writer writer = pgSQLXML.setCharacterStream(); + writer.write(example); + PreparedStatement preparedStatement = con.prepareStatement("insert into xmltab values (?)"); + preparedStatement.setSQLXML(1, pgSQLXML); + preparedStatement.execute(); + + Statement statement = con.createStatement(); + ResultSet rs = statement.executeQuery("select * from xmltab"); + assertTrue(rs.next()); + SQLXML result = rs.getSQLXML(1); + assertNotNull(result); + assertEquals(example, result.getString()); + } + + private static final String LICENSE_URL = + PgSQLXMLTest.class.getClassLoader().getResource("META-INF/LICENSE").toString(); + private static final String XXE_EXAMPLE = + "\n" + + "]>" + + "&xxe;"; + + @Test + public void testLegacyXxe() throws Exception { + Properties props = new Properties(); + props.setProperty(PGProperty.XML_FACTORY_FACTORY.getName(), "LEGACY_INSECURE"); + try (Connection conn = TestUtil.openDB(props)) { + BaseConnection baseConn = conn.unwrap(BaseConnection.class); + PgSQLXML xml = new PgSQLXML(baseConn, XXE_EXAMPLE); + xml.getSource(null); + } + } + + private static String sourceToString(Source source) throws TransformerException { + StringWriter sw = new StringWriter(); + Transformer transformer = TransformerFactory.newInstance().newTransformer(); + transformer.transform(source, new StreamResult(sw)); + return sw.toString(); + } + + private void testGetSourceXxe(Class clazz) { + SQLException ex = assertThrows(SQLException.class, () -> { + PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE); + xml.getSource(clazz); + }); + String message = ex.getCause().getMessage(); + assertTrue( + "Expected to get a <> SAXParseException. Actual message is " + message, + message.contains("DOCTYPE")); + } + + @Test + public void testGetSourceXxeNull() throws Exception { + testGetSourceXxe(null); + } + + @Test + public void testGetSourceXxeDOMSource() throws Exception { + testGetSourceXxe(DOMSource.class); + } + + @Test + public void testGetSourceXxeSAXSource() throws Exception { + PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE); + SAXSource source = xml.getSource(SAXSource.class); + TransformerException ex = assertThrows(TransformerException.class, () -> { + sourceToString(source); + }); + String message = ex.getCause().getMessage(); + assertTrue( + "Expected to get a <> TransformerException. Actual message is " + message, + message.contains("DOCTYPE")); + } + + @Test + public void testGetSourceXxeStAXSource() throws Exception { + PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE); + StAXSource source = xml.getSource(StAXSource.class); + XMLStreamReader reader = source.getXMLStreamReader(); + // STAX will not throw XXE error until we actually read the element + assertThrows(XMLStreamException.class, () -> { + while (reader.hasNext()) { + reader.next(); + } + }); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ResourceLockTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ResourceLockTest.java new file mode 100644 index 0000000..7d70d9d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ResourceLockTest.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.junit.jupiter.api.Test; + +class ResourceLockTest { + @Test + void obtainClose() { + final ResourceLock lock = new ResourceLock(); + + assertFalse(lock.isLocked(), + "lock.isLocked(). The newly created resource lock should be unlocked"); + assertFalse(lock.isHeldByCurrentThread(), + "lock.isHeldByCurrentThread(). The newly created resource lock should not be held by the current thread"); + + try (ResourceLock ignore = lock.obtain()) { + assertTrue(lock.isLocked(), + "lock.isLocked(). Obtained lock should be locked"); + assertTrue(lock.isHeldByCurrentThread(), + "lock.isHeldByCurrentThread(). Obtained lock should be held by the current thread"); + } + + assertFalse(lock.isLocked(), "lock.isLocked(). Closed resource lock should be unlocked"); + assertFalse(lock.isHeldByCurrentThread(), + "lock.isHeldByCurrentThread(). Closed resource lock should not be held by the current thread"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ScramTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ScramTest.java new file mode 100644 index 0000000..c67b778 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ScramTest.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; +import java.util.stream.Stream; + +class ScramTest { + + private static Connection con; + private static final String ROLE_NAME = "testscram"; + + @BeforeAll + static void setUp() throws Exception { + con = TestUtil.openPrivilegedDB(); + assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v10)); + } + + @AfterAll + static void tearDown() throws Exception { + try (Statement stmt = con.createStatement()) { + stmt.execute("DROP ROLE IF EXISTS " + ROLE_NAME); + } + TestUtil.closeDB(con); + } + + /** + * Test creating a role with passwords WITH spaces and opening a connection using the same + * password, should work because is the "same" password. + * + *

https://github.com/pgjdbc/pgjdbc/issues/1970 + */ + @ParameterizedTest + @ValueSource(strings = {"My Space", "$ec ret", " rover june spelling ", + "!zj5hs*k5 STj@DaRUy", "q\u00A0w\u2000e\u2003r\u2009t\u3000y"}) + void passwordWithSpace(String passwd) throws SQLException { + createRole(passwd); // Create role password with spaces. + + Properties props = new Properties(); + PGProperty.USER.set(props, ROLE_NAME); + PGProperty.PASSWORD.set(props, passwd); + + try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB(props)); + Statement stmt = c.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT current_user")) { + assertTrue(rs.next()); + assertEquals(ROLE_NAME, rs.getString(1)); + } + } + + /** + * Test creating a role with passwords WITHOUT spaces and opening a connection using password with + * spaces should fail since the spaces should not be stripped out. + * + *

https://github.com/pgjdbc/pgjdbc/issues/2000 + */ + @ParameterizedTest + @ValueSource(strings = {"My Space", "$ec ret", "rover june spelling", + "!zj5hs*k5 STj@DaRUy", "q\u00A0w\u2000e\u2003r\u2009t\u3000y"}) + void passwordWithoutSpace(String passwd) throws SQLException { + String passwdNoSpaces = passwd.codePoints() + .filter(i -> !Character.isSpaceChar(i)) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString(); + + createRole(passwdNoSpaces); // Create role password without spaces. + + Properties props = new Properties(); + PGProperty.USER.set(props, ROLE_NAME); + PGProperty.PASSWORD.set(props, passwd); // Open connection with spaces + + SQLException ex = assertThrows(SQLException.class, () -> TestUtil.openDB(props)); + assertEquals(PSQLState.INVALID_PASSWORD.getState(), ex.getSQLState()); + } + + private static Stream provideArgsForTestInvalid() { + return Stream.of( + Arguments.of(null, "The server requested SCRAM-based authentication, but no password was provided."), + Arguments.of("", "The server requested SCRAM-based authentication, but the password is an empty string.") + ); + } + + @ParameterizedTest + @MethodSource("provideArgsForTestInvalid") + void invalidPasswords(String password, String expectedMessage) throws SQLException { + // We are testing invalid passwords so that correct one does not matter + createRole("anything_goes_here"); + + Properties props = new Properties(); + PGProperty.USER.set(props, ROLE_NAME); + if (password != null) { + PGProperty.PASSWORD.set(props, password); + } + try (Connection conn = DriverManager.getConnection(TestUtil.getURL(), props)) { + fail("SCRAM connection attempt with invalid password should fail"); + } catch (SQLException e) { + assertEquals(expectedMessage, e.getMessage()); + } + } + + private void createRole(String passwd) throws SQLException { + try (Statement stmt = con.createStatement()) { + stmt.execute("SET password_encryption='scram-sha-256'"); + stmt.execute("DROP ROLE IF EXISTS " + ROLE_NAME); + stmt.execute("CREATE ROLE " + ROLE_NAME + " WITH LOGIN PASSWORD '" + passwd + "'"); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ShortArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ShortArraysTest.java new file mode 100644 index 0000000..3fb46b0 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ShortArraysTest.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class ShortArraysTest extends AbstractArraysTest { + + private static final short[][][] shorts = new short[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, + {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}; + + public ShortArraysTest() { + super(shorts, true, Oid.INT2_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ShortObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ShortObjectArraysTest.java new file mode 100644 index 0000000..9abc6cb --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/ShortObjectArraysTest.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class ShortObjectArraysTest extends AbstractArraysTest { + + private static final Short[][][] shorts = new Short[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, + {{13, 14, 15, 16}, {17, 18, null, 20}, {21, 22, 23, 24}}}; + + public ShortObjectArraysTest() { + super(shorts, true, Oid.INT2_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/StringArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/StringArraysTest.java new file mode 100644 index 0000000..8fc0ce0 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/StringArraysTest.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import org.postgresql.core.Oid; + +public class StringArraysTest extends AbstractArraysTest { + + private static final String[][][] strings = new String[][][]{ + {{"some", "String", "haVE some \u03C0", "another"}, {null, "6L", "7L", "8L"}, //unicode escape for pi character + {"asdf", " asdf ", "11L", null}}, + {{"13L", null, "asasde4wtq", "16L"}, {"17L", "", "19L", "20L"}, {"21L", "22L", "23L", "24L"}}}; + + public StringArraysTest() { + super(strings, true, Oid.VARCHAR_ARRAY); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/UUIDArrayTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/UUIDArrayTest.java new file mode 100644 index 0000000..e7363f7 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbc/UUIDArrayTest.java @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2022, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbc; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.UUID; + +class UUIDArrayTest { + + private static Connection con; + private static final String TABLE_NAME = "uuid_table"; + private static final String INSERT1 = "INSERT INTO " + TABLE_NAME + + " (id, data1) VALUES (?, ?)"; + private static final String INSERT2 = "INSERT INTO " + TABLE_NAME + + " (id, data2) VALUES (?, ?)"; + private static final String SELECT1 = "SELECT data1 FROM " + TABLE_NAME + + " WHERE id = ?"; + private static final String SELECT2 = "SELECT data2 FROM " + TABLE_NAME + + " WHERE id = ?"; + private static final UUID[] uids1 = new UUID[]{UUID.randomUUID(), UUID.randomUUID()}; + private static final UUID[][] uids2 = new UUID[][]{uids1}; + + @BeforeAll + static void setUp() throws Exception { + con = TestUtil.openDB(); + assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_6)); + try (Statement stmt = con.createStatement()) { + stmt.execute("CREATE TABLE " + TABLE_NAME + + " (id int PRIMARY KEY, data1 UUID[], data2 UUID[][])"); + } + } + + @AfterAll + static void tearDown() throws Exception { + try (Statement stmt = con.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME); + } + TestUtil.closeDB(con); + } + + @Test + void test1DWithCreateArrayOf() throws SQLException { + try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB()); + PreparedStatement stmt1 = c.prepareStatement(INSERT1); + PreparedStatement stmt2 = c.prepareStatement(SELECT1)) { + stmt1.setInt(1, 100); + stmt1.setArray(2, c.createArrayOf("uuid", uids1)); + stmt1.execute(); + + stmt2.setInt(1, 100); + stmt2.execute(); + try (ResultSet rs = stmt2.getResultSet()) { + assertTrue(rs.next()); + UUID[] array = (UUID[]) rs.getArray(1).getArray(); + assertEquals(uids1[0], array[0]); + assertEquals(uids1[1], array[1]); + } + } + } + + @Test + void test1DWithSetObject() throws SQLException { + try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB()); + PreparedStatement stmt1 = c.prepareStatement(INSERT1); + PreparedStatement stmt2 = c.prepareStatement(SELECT1)) { + stmt1.setInt(1, 101); + stmt1.setObject(2, uids1); + stmt1.execute(); + + stmt2.setInt(1, 101); + stmt2.execute(); + try (ResultSet rs = stmt2.getResultSet()) { + assertTrue(rs.next()); + UUID[] array = (UUID[]) rs.getArray(1).getArray(); + assertEquals(uids1[0], array[0]); + assertEquals(uids1[1], array[1]); + } + } + } + + @Test + void test2DWithCreateArrayOf() throws SQLException { + try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB()); + PreparedStatement stmt1 = c.prepareStatement(INSERT2); + PreparedStatement stmt2 = c.prepareStatement(SELECT2)) { + stmt1.setInt(1, 200); + stmt1.setArray(2, c.createArrayOf("uuid", uids2)); + stmt1.execute(); + + stmt2.setInt(1, 200); + stmt2.execute(); + try (ResultSet rs = stmt2.getResultSet()) { + assertTrue(rs.next()); + UUID[][] array = (UUID[][]) rs.getArray(1).getArray(); + assertEquals(uids2[0][0], array[0][0]); + assertEquals(uids2[0][1], array[0][1]); + } + } + } + + @Test + void test2DWithSetObject() throws SQLException { + try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB()); + PreparedStatement stmt1 = c.prepareStatement(INSERT2); + PreparedStatement stmt2 = c.prepareStatement(SELECT2)) { + stmt1.setInt(1, 201); + stmt1.setObject(2, uids2); + stmt1.execute(); + + stmt2.setInt(1, 201); + stmt2.execute(); + try (ResultSet rs = stmt2.getResultSet()) { + assertTrue(rs.next()); + UUID[][] array = (UUID[][]) rs.getArray(1).getArray(); + assertEquals(uids2[0][0], array[0][0]); + assertEquals(uids2[0][1], array[0][1]); + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgPassParserTest.java b/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgPassParserTest.java new file mode 100644 index 0000000..718ab3d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgPassParserTest.java @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbcurlresolver; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +import org.postgresql.PGEnvironment; +import org.postgresql.util.StubEnvironmentAndProperties; + +import org.junit.jupiter.api.Test; +import uk.org.webcompere.systemstubs.environment.EnvironmentVariables; +import uk.org.webcompere.systemstubs.properties.SystemProperties; +import uk.org.webcompere.systemstubs.resource.Resources; + +import java.net.URL; + +/** + * Password resource location used is decided based on availability of different environment + * variables and file existence in user home directory. Tests verify selection of proper resource. + * Also, resource content (* matching, escape character handling, comments etc) can be written + * creatively. Test verify several cases. + * + * @author Marek Läll + */ +@StubEnvironmentAndProperties +class PgPassParserTest { + + // "org.postgresql.pgpassfile" : missing + // "PGPASSFILE" : missing + // ".pgpass" : missing + @Test + void getPassword11() throws Exception { + Resources.with( + new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), "", "APPDATA", "/tmp/dir-nonexistent"), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", "/tmp/dir-nonexistent") + ).execute(() -> { + String result = PgPassParser.getPassword("localhost", "5432", "postgres", "postgres"); + assertNull(result); + }); + } + + // "org.postgresql.pgpassfile" : missing + // "PGPASSFILE" : missing + // ".pgpass" : exist + // : exist + @Test + void getPassword22() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), "", "APPDATA", urlPath.getPath() ), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + String result = PgPassParser.getPassword("localhost", "5432", "postgres", + "postgres"); + assertEquals("postgres1", result); + result = PgPassParser.getPassword("localhost2", "5432", "postgres", "postgres"); + assertEquals("postgres\\", result); + result = PgPassParser.getPassword("localhost3", "5432", "postgres", "postgres"); + assertEquals("postgres:", result); + result = PgPassParser.getPassword("localhost4", "5432", "postgres", "postgres"); + assertEquals("postgres1:", result); + result = PgPassParser.getPassword("localhost5", "5432", "postgres", "postgres"); + assertEquals("postgres5", result); + result = PgPassParser.getPassword("localhost6", "5432", "postgres", "postgres"); + assertEquals("post\\gres\\", result); + result = PgPassParser.getPassword("localhost7", "5432", "postgres", "postgres"); + assertEquals(" ab cd", result); + result = PgPassParser.getPassword("localhost8", "5432", "postgres", "postgres"); + assertEquals("", result); + // + result = PgPassParser.getPassword("::1", "1234", "colon:db", "colon:user"); + assertEquals("pass:pass", result); + result = PgPassParser.getPassword("::1", "12345", "colon:db", "colon:user"); + assertEquals("pass:pass1", result); + result = PgPassParser.getPassword("::1", "1234", "slash\\db", "slash\\user"); + assertEquals("pass\\pass", result); + result = PgPassParser.getPassword("::1", "12345", "slash\\db", "slash\\user"); + assertEquals("pass\\pass1", result); + // + result = PgPassParser.getPassword("any", "5432", "postgres", "postgres"); + assertEquals("anyhost5", result); + result = PgPassParser.getPassword("localhost11", "9999", "postgres", "postgres"); + assertEquals("anyport5", result); + result = PgPassParser.getPassword("localhost12", "5432", "anydb", "postgres"); + assertEquals("anydb5", result); + result = PgPassParser.getPassword("localhost13", "5432", "postgres", "anyuser"); + assertEquals("anyuser5", result); + // + result = PgPassParser.getPassword("anyhost", "6544", "anydb", "anyuser"); + assertEquals("absolute-any", result); + }); + } + + // "org.postgresql.pgpassfile" : missing + // "PGPASSFILE" : exist + // ".pgpass" : exist + // : missing + @Test + void getPassword31() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf"); + assertNotNull(urlFileEnv); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getFile(), "APPDATA", urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + String result = PgPassParser.getPassword("localhost-missing", "5432", "postgres1", "postgres2"); + assertNull(result); + }); + } + + // "org.postgresql.pgpassfile" : missing + // "PGPASSFILE" : exist + // ".pgpass" : exist + // : exist + @Test + void getPassword32() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf"); + assertNotNull(urlFileEnv); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getPath(), "APPDATA", urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + String result = PgPassParser.getPassword("localhost", "5432", "postgres1", + "postgres2"); + assertEquals("postgres3", result); + }); + } + + + // "org.postgresql.pgpassfile" : exist + // "PGPASSFILE" : exist + // ".pgpass" : exist + // : missing + @Test + void getPassword41() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf"); + assertNotNull(urlFileEnv); + URL urlFileProps = getClass().getResource("/pg_service/pgpassfileProps.conf"); + assertNotNull(urlFileProps); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getFile(), "APPDATA", urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + String result = PgPassParser.getPassword("localhost-missing", "5432", "postgres1", "postgres2"); + assertNull(result); + }); + } + + // "org.postgresql.pgpassfile" : exist + // "PGPASSFILE" : exist + // ".pgpass" : exist + // : exist + @Test + void getPassword42() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf"); + assertNotNull(urlFileEnv); + URL urlFileProps = getClass().getResource("/pg_service/pgpassfileProps.conf"); + assertNotNull(urlFileProps); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getPath(), "APPDATA", urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath()) + ).execute(() -> { + String result = PgPassParser.getPassword("localhost77", "5432", "any", "postgres11"); + assertEquals("postgres22", result); + result = PgPassParser.getPassword("localhost888", "5432", "any", "postgres11"); + assertNull(result); + result = PgPassParser.getPassword("localhost999", "5432", "any", "postgres11"); + assertNull(result); + }); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgServiceConfParserTest.java b/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgServiceConfParserTest.java new file mode 100644 index 0000000..b3a3d9e --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgServiceConfParserTest.java @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.jdbcurlresolver; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGEnvironment; +import org.postgresql.util.StubEnvironmentAndProperties; + +import org.junit.jupiter.api.Test; +import uk.org.webcompere.systemstubs.environment.EnvironmentVariables; +import uk.org.webcompere.systemstubs.properties.SystemProperties; +import uk.org.webcompere.systemstubs.resource.Resources; + +import java.net.URL; +import java.util.Properties; + +/** + * Service resource location used is decided based on availability of different environment + * variables and file existence in user home directory. Tests verify selection of proper resource. + * Also, resource content (section headers, comments, key-value pairs etc) can be written + * creatively. Test verify several cases. + * + * @author Marek Läll + */ +@StubEnvironmentAndProperties +class PgServiceConfParserTest { + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : missing + // ".pg_service.conf" : missing + // "PGSYSCONFDIR" : missing + @Test + void pgService11() throws Exception { + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), ""), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent") + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent"); + assertNull(result); + }); + } + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : missing + // ".pg_service.conf" : missing + // "PGSYSCONFDIR" : exist + // : missing + @Test + void pgService21() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent") + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent"); + assertNull(result); + result = PgServiceConfParser.getServiceProperties("empty-service1"); + assertNotNull(result); + assertTrue(result.isEmpty()); + }); + } + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : missing + // ".pg_service.conf" : missing + // "PGSYSCONFDIR" : exist + // : exist + @Test + void pgService22() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent") + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("test-service1"); + assertNotNull(result); + assertEquals("test_dbname", result.get("PGDBNAME")); + assertEquals("global-test-host.test.net", result.get("PGHOST")); + assertEquals("5433", result.get("PGPORT")); + assertEquals("admin", result.get("user")); + assertEquals(4, result.size()); + }); + } + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : missing + // ".pg_service.conf" : missing + // "PGSYSCONFDIR" : exist - but file itself is missing + // : exist + @Test + void pgService23() throws Exception { + String nonExistingDir = "non-existing-dir"; + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), nonExistingDir), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent") + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("test-service1"); + assertNull(result); + }); + } + + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : missing + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : missing + @Test + void pgService31() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent"); + assertNull(result); + result = PgServiceConfParser.getServiceProperties("empty-service1"); + assertNotNull(result); + assertTrue(result.isEmpty()); + }); + } + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : missing + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : exist + @Test + void pgService32() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", "APPDATA", urlPath.getPath(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("test-service1"); + assertNotNull(result); + assertEquals(" test_dbname", result.get("PGDBNAME")); + assertEquals("local-test-host.test.net", result.get("PGHOST")); + assertEquals("5433", result.get("PGPORT")); + assertEquals("admin", result.get("user")); + assertEquals(4, result.size()); + }); + } + + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : exist + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : missing + @Test + void pgService41() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf"); + assertNotNull(urlFileEnv); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent"); + assertNull(result); + result = PgServiceConfParser.getServiceProperties("empty-service1"); + assertNotNull(result); + assertTrue(result.isEmpty()); + }); + } + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : exist + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : exist + @Test + void pgService42() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf"); + assertNotNull(urlFileEnv); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("test-service1"); + assertNotNull(result); + assertEquals("test_dbname", result.get("PGDBNAME")); + assertEquals("pgservicefileEnv-test-host.test.net", result.get("PGHOST")); + assertEquals("5433", result.get("PGPORT")); + assertEquals("admin", result.get("user")); + assertEquals("disable", result.get("sslmode")); + assertEquals(5, result.size()); + }); + } + + // "org.postgresql.pgservicefile" : missing + // "PGSERVICEFILE" : exist - but file itself is missing + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : exist + @Test + void pgService43() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + String nonExistingFile = "non-existing-file.conf"; + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), nonExistingFile, PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("test-service1"); + assertNull(result); + }); + } + + + // "org.postgresql.pgservicefile" : exist + // "PGSERVICEFILE" : exist + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : missing + @Test + void pgService51() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf"); + assertNotNull(urlFileEnv); + URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf"); + assertNotNull(urlFileProps); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent"); + assertNull(result); + result = PgServiceConfParser.getServiceProperties("empty-service1"); + assertNotNull(result); + assertTrue(result.isEmpty()); + }); + } + + // "org.postgresql.pgservicefile" : exist + // "PGSERVICEFILE" : exist + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : exist + @Test + void pgService52() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf"); + assertNotNull(urlFileEnv); + URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf"); + assertNotNull(urlFileProps); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("test-service1"); + assertNotNull(result); + assertEquals("test_dbname", result.get("PGDBNAME")); + assertEquals("pgservicefileProps-test-host.test.net", result.get("PGHOST")); + assertEquals("5433", result.get("PGPORT")); + assertEquals("admin", result.get("user")); + assertEquals(4, result.size()); + }); + } + + // "org.postgresql.pgservicefile" : exist - but file itself is missing + // "PGSERVICEFILE" : exist + // ".pg_service.conf" : exist + // "PGSYSCONFDIR" : exist + // : exist + @Test + void pgService53() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf"); + assertNotNull(urlFileEnv); + String nonExistingFile = "non-existing-file.conf"; + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), nonExistingFile, "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result = PgServiceConfParser.getServiceProperties("test-service1"); + assertNull(result); + }); + } + + + // resource content read tests + @Test + void pgService61() throws Exception { + URL urlPath = getClass().getResource("/pg_service"); + assertNotNull(urlPath); + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", "APPDATA", urlPath.getPath(), PGEnvironment.PGSYSCONFDIR.getName(), ""), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath()) + ).execute(() -> { + Properties result; + // fail if there is space between key and equal sign + result = PgServiceConfParser.getServiceProperties("fail-case-1"); + assertNull(result); + // service name is case-sensitive + result = PgServiceConfParser.getServiceProperties("fail-case-2"); + assertNull(result); + // service name is case-sensitive + result = PgServiceConfParser.getServiceProperties("fail-case-2"); + assertNull(result); + // invalid line in the section + result = PgServiceConfParser.getServiceProperties("fail-case-3"); + assertNull(result); + // service name: space before and after name becomes part of name + result = PgServiceConfParser.getServiceProperties(" success-case-3 "); + assertNotNull(result); + assertEquals("local-somehost3", result.get("PGHOST")); + assertEquals(1, result.size()); + // service name: space inside name is part of name + result = PgServiceConfParser.getServiceProperties("success case 4"); + assertNotNull(result); + assertEquals("local-somehost4", result.get("PGHOST")); + assertEquals(1, result.size()); + }); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/CopyBothResponseTest.java b/pgjdbc/src/test/java/org/postgresql/replication/CopyBothResponseTest.java new file mode 100644 index 0000000..863647f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/CopyBothResponseTest.java @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +import org.postgresql.PGConnection; +import org.postgresql.copy.CopyDual; +import org.postgresql.copy.CopyManager; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; +import org.postgresql.test.annotations.tags.Replication; + +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.concurrent.TimeUnit; + +/** + * CopyBothResponse use since 9.1 PostgreSQL version for replication protocol. + */ +@Replication +@DisabledIfServerVersionBelow("9.4") +class CopyBothResponseTest { + private Connection sqlConnection; + private Connection replConnection; + + @BeforeAll + static void beforeClass() throws Exception { + Connection con = TestUtil.openDB(); + TestUtil.createTable(con, "testreplication", "pk serial primary key, name varchar(100)"); + con.close(); + } + + @AfterAll + static void testAfterClass() throws Exception { + Connection con = TestUtil.openDB(); + TestUtil.dropTable(con, "testreplication"); + con.close(); + } + + @BeforeEach + void setUp() throws Exception { + sqlConnection = TestUtil.openDB(); + replConnection = TestUtil.openReplicationConnection(); + replConnection.setAutoCommit(true); + } + + @AfterEach + void tearDown() throws Exception { + sqlConnection.close(); + replConnection.close(); + } + + @Test + void openConnectByReplicationProtocol() throws Exception { + CopyManager cm = ((PGConnection) replConnection).getCopyAPI(); + + LogSequenceNumber logSequenceNumber = getCurrentLSN(); + CopyDual copyDual = cm.copyDual( + "START_REPLICATION " + logSequenceNumber.asString()); + try { + assertThat( + "Replication protocol work via copy protocol and initialize as CopyBothResponse, " + + "we want that first initialize will work", + copyDual, CoreMatchers.notNullValue() + ); + } finally { + copyDual.endCopy(); + } + } + + @Test + void receiveKeepAliveMessage() throws Exception { + CopyManager cm = ((PGConnection) replConnection).getCopyAPI(); + + LogSequenceNumber logSequenceNumber = getCurrentLSN(); + CopyDual copyDual = cm.copyDual( + "START_REPLICATION " + logSequenceNumber.asString()); + + sendStandByUpdate(copyDual, logSequenceNumber, logSequenceNumber, logSequenceNumber, true); + ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy()); + + int code = buf.get(); + copyDual.endCopy(); + + assertThat( + "Streaming replication start with swap keep alive message, we want that first get package will be keep alive", + code, equalTo((int) 'k') + ); + } + + @Test + void keedAliveContainsCorrectLSN() throws Exception { + CopyManager cm = ((PGConnection) replConnection).getCopyAPI(); + + LogSequenceNumber startLsn = getCurrentLSN(); + CopyDual copyDual = + cm.copyDual("START_REPLICATION " + startLsn.asString()); + sendStandByUpdate(copyDual, startLsn, startLsn, startLsn, true); + + ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy()); + + int code = buf.get(); + LogSequenceNumber lastLSN = LogSequenceNumber.valueOf(buf.getLong()); + copyDual.endCopy(); + + assertThat( + "Keep alive message contain last lsn on server, we want that before start replication " + + "and get keep alive message not occurs wal modifications", + lastLSN, CoreMatchers.equalTo(startLsn) + ); + } + + @Test + void receiveXLogData() throws Exception { + CopyManager cm = ((PGConnection) replConnection).getCopyAPI(); + + LogSequenceNumber startLsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into testreplication(name) values('testing get changes')"); + st.close(); + + CopyDual copyDual = + cm.copyDual("START_REPLICATION " + startLsn.asString()); + sendStandByUpdate(copyDual, startLsn, startLsn, startLsn, false); + + ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy()); + + char code = (char) buf.get(); + copyDual.endCopy(); + + assertThat( + "When replication starts via slot and specify LSN that lower than last LSN on server, " + + "we should get all changes that occurs between two LSN", + code, equalTo('w') + ); + } + + private void sendStandByUpdate(CopyDual copyDual, LogSequenceNumber received, + LogSequenceNumber flushed, LogSequenceNumber applied, boolean replyRequired) + throws SQLException { + ByteBuffer response = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1); + response.put((byte) 'r'); + response.putLong(received.asLong()); //received + response.putLong(flushed.asLong()); //flushed + response.putLong(applied.asLong()); //applied + response.putLong(TimeUnit.MICROSECONDS.convert((System.currentTimeMillis() - 946674000000L), + TimeUnit.MICROSECONDS)); + response.put(replyRequired ? (byte) 1 : (byte) 0); //reply soon as possible + + byte[] standbyUpdate = response.array(); + copyDual.writeToCopy(standbyUpdate, 0, standbyUpdate.length); + copyDual.flushCopy(); + } + + private LogSequenceNumber getCurrentLSN() throws SQLException { + Statement st = sqlConnection.createStatement(); + ResultSet rs = null; + try { + rs = st.executeQuery("select " + + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "pg_current_wal_lsn()" : "pg_current_xlog_location()")); + + if (rs.next()) { + String lsn = rs.getString(1); + return LogSequenceNumber.valueOf(lsn); + } else { + return LogSequenceNumber.INVALID_LSN; + } + } finally { + if (rs != null) { + rs.close(); + } + st.close(); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/LogSequenceNumberTest.java b/pgjdbc/src/test/java/org/postgresql/replication/LogSequenceNumberTest.java new file mode 100644 index 0000000..2b62022 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/LogSequenceNumberTest.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +import org.postgresql.test.annotations.tags.Replication; + +import org.junit.jupiter.api.Test; + +@Replication +class LogSequenceNumberTest { + @Test + void notNullWhenCreateFromStr() throws Exception { + LogSequenceNumber result = LogSequenceNumber.valueOf("0/15D68C50"); + assertThat(result, notNullValue()); + } + + @Test + void parseNotValidLSNStr() throws Exception { + LogSequenceNumber result = LogSequenceNumber.valueOf("15D68C55"); + assertThat(result, equalTo(LogSequenceNumber.INVALID_LSN)); + } + + @Test + void parseLSNFromStringAndConvertToLong() throws Exception { + LogSequenceNumber result = LogSequenceNumber.valueOf("16/3002D50"); + assertThat("64-bit number use in replication protocol, " + + "that why we should can convert string represent LSN to long", + result.asLong(), equalTo(94539623760L) + ); + } + + @Test + void convertNumericLSNToString() throws Exception { + LogSequenceNumber result = LogSequenceNumber.valueOf(94539623760L); + + assertThat("64-bit number use in replication protocol, " + + "but more readable standard format use in logs where each 8-bit print in hex form via slash", + result.asString(), equalTo("16/3002D50") + ); + } + + @Test + void convertNumericLSNToString_2() throws Exception { + LogSequenceNumber result = LogSequenceNumber.valueOf(366383352L); + + assertThat("64-bit number use in replication protocol, " + + "but more readable standard format use in logs where each 8-bit print in hex form via slash", + result.asString(), equalTo("0/15D690F8") + ); + } + + @Test + void equalLSN() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8"); + LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8"); + + assertThat(first, equalTo(second)); + } + + @Test + void equalLSNCreateByDifferentWay() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8"); + LogSequenceNumber second = LogSequenceNumber.valueOf(366383352L); + + assertThat("LSN creates as 64-bit number and as string where each 8-bit print in hex form " + + "via slash represent same position in WAL should be equals", + first, equalTo(second) + ); + } + + @Test + void notEqualLSN() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8"); + LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D68C50"); + + assertThat(first, not(equalTo(second))); + } + + @Test + void differentLSNHaveDifferentHash() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8"); + LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D68C50"); + + assertThat(first.hashCode(), not(equalTo(second.hashCode()))); + } + + @Test + void sameLSNHaveSameHash() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8"); + LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8"); + + assertThat(first.hashCode(), equalTo(second.hashCode())); + } + + @Test + void compareToSameValue() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8"); + LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8"); + + assertThat(first.compareTo(second), equalTo(0)); + assertThat(second.compareTo(first), equalTo(0)); + } + + @Test + void compareToPositiveValues() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf(1234); + LogSequenceNumber second = LogSequenceNumber.valueOf(4321); + + assertThat(first.compareTo(second), equalTo(-1)); + assertThat(second.compareTo(first), equalTo(1)); + } + + @Test + void compareToNegativeValues() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf(0x8000000000000000L); + LogSequenceNumber second = LogSequenceNumber.valueOf(0x8000000000000001L); + + assertThat(first.compareTo(second), equalTo(-1)); + assertThat(second.compareTo(first), equalTo(1)); + } + + @Test + void compareToMixedSign() throws Exception { + LogSequenceNumber first = LogSequenceNumber.valueOf(1); + LogSequenceNumber second = LogSequenceNumber.valueOf(0x8000000000000001L); + + assertThat(first.compareTo(second), equalTo(-1)); + assertThat(second.compareTo(first), equalTo(1)); + } + + @Test + void compareToWithInvalid() throws Exception { + LogSequenceNumber first = LogSequenceNumber.INVALID_LSN; + LogSequenceNumber second = LogSequenceNumber.valueOf(1); + + assertThat(first.compareTo(second), equalTo(-1)); + assertThat(second.compareTo(first), equalTo(1)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationStatusTest.java b/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationStatusTest.java new file mode 100644 index 0000000..6876044 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationStatusTest.java @@ -0,0 +1,549 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsEqual.equalTo; + +import org.postgresql.PGConnection; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; +import org.postgresql.test.annotations.tags.Replication; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +@Replication +@DisabledIfServerVersionBelow("9.4") +class LogicalReplicationStatusTest { + private static final String SLOT_NAME = "pgjdbc_logical_replication_slot"; + + private Connection replicationConnection; + private Connection sqlConnection; + private Connection secondSqlConnection; + + @BeforeEach + void setUp() throws Exception { + //statistic available only for privileged user + sqlConnection = TestUtil.openPrivilegedDB(); + secondSqlConnection = TestUtil.openPrivilegedDB("test_2"); + //DriverManager.setLogWriter(new PrintWriter(System.out)); + replicationConnection = TestUtil.openReplicationConnection(); + TestUtil.createTable(sqlConnection, "test_logic_table", + "pk serial primary key, name varchar(100)"); + TestUtil.createTable(secondSqlConnection, "test_logic_table", + "pk serial primary key, name varchar(100)"); + + TestUtil.recreateLogicalReplicationSlot(sqlConnection, SLOT_NAME, "test_decoding"); + } + + @AfterEach + void tearDown() throws Exception { + replicationConnection.close(); + TestUtil.dropTable(sqlConnection, "test_logic_table"); + TestUtil.dropTable(secondSqlConnection, "test_logic_table"); + TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME); + secondSqlConnection.close(); + sqlConnection.close(); + } + + @Test + void sentLocationEqualToLastReceiveLSN() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + final int countMessage = 3; + + List received = receiveMessageWithoutBlock(stream, countMessage); + LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN(); + stream.forceUpdateStatus(); + + LogSequenceNumber sentByServer = getSentLocationOnView(); + + assertThat("When changes absent on server last receive by stream LSN " + + "should be equal to last sent by server LSN", + sentByServer, equalTo(lastReceivedLSN) + ); + } + + /** + * Test fail on PG version 9.4.5 because postgresql have bug. + */ + @Test + @DisabledIfServerVersionBelow("9.4.8") + void receivedLSNDependentOnProcessMessage() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 1); + LogSequenceNumber firstLSN = stream.getLastReceiveLSN(); + + receiveMessageWithoutBlock(stream, 1); + LogSequenceNumber secondLSN = stream.getLastReceiveLSN(); + + assertThat("After receive each new message current LSN updates in stream", + firstLSN, not(equalTo(secondLSN)) + ); + } + + @Test + void lastReceiveLSNCorrectOnView() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 2); + LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN(); + stream.forceUpdateStatus(); + + assertThat( + "Replication stream by execute forceUpdateStatus should send to view actual received position " + + "that allow monitoring lag", + lastReceivedLSN, equalTo(getWriteLocationOnView()) + ); + } + + @Test + void writeLocationCanBeLessThanSendLocation() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 2); + stream.forceUpdateStatus(); + + LogSequenceNumber writeLocation = getWriteLocationOnView(); + LogSequenceNumber sentLocation = getSentLocationOnView(); + + assertThat( + "In view pg_stat_replication column write_location define which position consume client " + + "but sent_location define which position was sent to client, so in current test we have 1 pending message, " + + "so write and sent can't be equals", + writeLocation, not(equalTo(sentLocation)) + ); + } + + @Test + void flushLocationEqualToSetLocation() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 1); + + LogSequenceNumber flushLSN = stream.getLastReceiveLSN(); + stream.setFlushedLSN(flushLSN); + + //consume another messages + receiveMessageWithoutBlock(stream, 2); + + stream.forceUpdateStatus(); + + LogSequenceNumber result = getFlushLocationOnView(); + + assertThat("Flush LSN use for define which wal can be recycled and it parameter should be " + + "specify manually on replication stream, because only client " + + "of replication stream now which wal not necessary. We wait that it status correct " + + "send to backend and available via view, because if status will " + + "not send it lead to problem when WALs never recycled", + result, equalTo(flushLSN) + ); + } + + @Test + void flushLocationDoNotChangeDuringReceiveMessage() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 1); + final LogSequenceNumber flushLSN = stream.getLastReceiveLSN(); + stream.setFlushedLSN(flushLSN); + receiveMessageWithoutBlock(stream, 2); + + assertThat( + "Flush LSN it parameter that specify manually on stream and they can not automatically " + + "change during receive another messages, " + + "because auto update can lead to problem when WAL recycled on postgres " + + "because we send feedback that current position successfully flush, but in real they not flush yet", + stream.getLastFlushedLSN(), equalTo(flushLSN) + ); + } + + @Test + void applyLocationEqualToSetLocation() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 1); + final LogSequenceNumber applyLSN = stream.getLastReceiveLSN(); + + stream.setAppliedLSN(applyLSN); + stream.setFlushedLSN(applyLSN); + + receiveMessageWithoutBlock(stream, 2); + stream.forceUpdateStatus(); + + LogSequenceNumber result = getReplayLocationOnView(); + + assertThat( + "During receive message from replication stream all feedback parameter " + + "that we set to stream should be sent to backend" + + "because it allow monitoring replication status and also recycle old WALs", + result, equalTo(applyLSN) + ); + } + + /** + * Test fail on PG version 9.4.5 because postgresql have bug. + */ + @Test + @DisabledIfServerVersionBelow("9.4.8") + void applyLocationDoNotDependOnFlushLocation() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 1); + stream.setAppliedLSN(stream.getLastReceiveLSN()); + stream.setFlushedLSN(stream.getLastReceiveLSN()); + + receiveMessageWithoutBlock(stream, 1); + stream.setFlushedLSN(stream.getLastReceiveLSN()); + + receiveMessageWithoutBlock(stream, 1); + stream.forceUpdateStatus(); + + LogSequenceNumber flushed = getFlushLocationOnView(); + LogSequenceNumber applied = getReplayLocationOnView(); + + assertThat( + "Last applied LSN and last flushed LSN it two not depends parameters and they can be not equal between", + applied, not(equalTo(flushed)) + ); + } + + @Test + void applyLocationDoNotChangeDuringReceiveMessage() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .start(); + + receiveMessageWithoutBlock(stream, 1); + final LogSequenceNumber applyLSN = stream.getLastReceiveLSN(); + stream.setAppliedLSN(applyLSN); + receiveMessageWithoutBlock(stream, 2); + + assertThat( + "Apply LSN it parameter that specify manually on stream and they can not automatically " + + "change during receive another messages, " + + "because auto update can lead to problem when WAL recycled on postgres " + + "because we send feedback that current position successfully flush, but in real they not flush yet", + stream.getLastAppliedLSN(), equalTo(applyLSN) + ); + } + + @Test + void statusCanBeSentToBackendAsynchronously() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + final int intervalTime = 100; + final TimeUnit timeFormat = TimeUnit.MILLISECONDS; + + LogSequenceNumber startLSN = getCurrentLSN(); + + insertPreviousChanges(sqlConnection); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withStatusInterval(intervalTime, timeFormat) + .start(); + + receiveMessageWithoutBlock(stream, 3); + + LogSequenceNumber waitLSN = stream.getLastReceiveLSN(); + + stream.setAppliedLSN(waitLSN); + stream.setFlushedLSN(waitLSN); + + timeFormat.sleep(intervalTime + 1); + + //get pending message and trigger update status by timeout + stream.readPending(); + + LogSequenceNumber flushLSN = getFlushLocationOnView(); + + assertThat("Status can be sent to backend by some time interval, " + + "by default it parameter equals to 10 second, but in current test we change it on few millisecond " + + "and wait that set status on stream will be auto send to backend", + flushLSN, equalTo(waitLSN) + ); + } + + private void insertPreviousChanges(Connection sqlConnection) throws SQLException { + try (Statement st = sqlConnection.createStatement()) { + st.execute("insert into test_logic_table(name) values('previous changes')"); + } + } + + @Test + void keepAliveServerLSNCanBeUsedToAdvanceFlushLSN() throws Exception { + PGConnection pgConnection = (PGConnection) replicationConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withStatusInterval(1, TimeUnit.SECONDS) + .start(); + + // create replication changes and poll for messages + insertPreviousChanges(sqlConnection); + + receiveMessageWithoutBlock(stream, 3); + + // client confirms flush of these changes. At this point we're in sync with server + LogSequenceNumber confirmedClientFlushLSN = stream.getLastReceiveLSN(); + stream.setFlushedLSN(confirmedClientFlushLSN); + stream.forceUpdateStatus(); + + // now insert something into other DB (without replication) to generate WAL + insertPreviousChanges(secondSqlConnection); + + TimeUnit.SECONDS.sleep(1); + + // read KeepAlive messages - lastServerLSN will have advanced and we can safely confirm it + stream.readPending(); + + LogSequenceNumber lastFlushedLSN = stream.getLastFlushedLSN(); + LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN(); + + assertThat("Activity in other database will generate WAL but no XLogData " + + " messages. Received LSN will begin to advance beyond of confirmed flushLSN", + confirmedClientFlushLSN, not(equalTo(lastReceivedLSN)) + ); + + assertThat("When all XLogData messages have been processed, we can confirm " + + " flush of Server LSNs in the KeepAlive messages", + lastFlushedLSN, equalTo(lastReceivedLSN) + ); + } + + private LogSequenceNumber getSentLocationOnView() throws Exception { + return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "sent_lsn" : "sent_location")); + } + + private LogSequenceNumber getWriteLocationOnView() throws Exception { + return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "write_lsn" : "write_location")); + } + + private LogSequenceNumber getFlushLocationOnView() throws Exception { + return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "flush_lsn" : "flush_location")); + } + + private LogSequenceNumber getReplayLocationOnView() throws Exception { + return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "replay_lsn" : "replay_location")); + } + + private List receiveMessageWithoutBlock(PGReplicationStream stream, int count) + throws Exception { + List result = new ArrayList<>(3); + for (int index = 0; index < count; index++) { + ByteBuffer message; + do { + message = stream.readPending(); + + if (message == null) { + TimeUnit.MILLISECONDS.sleep(2); + } + } while (message == null); + + result.add(toString(message)); + } + + return result; + } + + private String toString(ByteBuffer buffer) { + int offset = buffer.arrayOffset(); + byte[] source = buffer.array(); + int length = source.length - offset; + + return new String(source, offset, length); + } + + private LogSequenceNumber getLSNFromView(String columnName) throws Exception { + int pid = ((PGConnection) replicationConnection).getBackendPID(); + + int repeatCount = 0; + while (true) { + try ( + Statement st = sqlConnection.createStatement(); + ResultSet rs = st.executeQuery("select * from pg_stat_replication where pid = " + pid) + ) { + String result = null; + if (rs.next()) { + result = rs.getString(columnName); + } + + if (result == null || result.isEmpty()) { + //replication monitoring view updates with some delay, wait some time and try again + TimeUnit.MILLISECONDS.sleep(100L); + repeatCount++; + if (repeatCount == 10) { + return null; + } + } else { + return LogSequenceNumber.valueOf(result); + } + } + } + } + + private LogSequenceNumber getCurrentLSN() throws SQLException { + try (Statement st = sqlConnection.createStatement(); + ResultSet rs = st.executeQuery("select " + + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "pg_current_wal_lsn()" : "pg_current_xlog_location()")) + ) { + if (rs.next()) { + String lsn = rs.getString(1); + return LogSequenceNumber.valueOf(lsn); + } else { + return LogSequenceNumber.INVALID_LSN; + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationTest.java b/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationTest.java new file mode 100644 index 0000000..62dc510 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationTest.java @@ -0,0 +1,959 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.junit.MatcherAssume.assumeThat; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGConnection; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; +import org.postgresql.test.annotations.tags.Replication; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.nio.ByteBuffer; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +@Replication +@DisabledIfServerVersionBelow("9.4") +class LogicalReplicationTest { + private static final String SLOT_NAME = "pgjdbc_logical_replication_slot"; + + private Connection replConnection; + private Connection sqlConnection; + + private static String toString(ByteBuffer buffer) { + int offset = buffer.arrayOffset(); + byte[] source = buffer.array(); + int length = source.length - offset; + + return new String(source, offset, length); + } + + @BeforeEach + void setUp() throws Exception { + sqlConnection = TestUtil.openPrivilegedDB(); + //DriverManager.setLogWriter(new PrintWriter(System.out)); + replConnection = TestUtil.openReplicationConnection(); + TestUtil.createTable(sqlConnection, "test_logic_table", + "pk serial primary key, name varchar(100)"); + + TestUtil.recreateLogicalReplicationSlot(sqlConnection, SLOT_NAME, "test_decoding"); + } + + @AfterEach + void tearDown() throws Exception { + replConnection.close(); + TestUtil.dropTable(sqlConnection, "test_logic_table"); + TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME); + sqlConnection.close(); + } + + @Test + @Timeout(1) + void notAvailableStartNotExistReplicationSlot() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + try { + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName("notExistSlotName") + .withStartPosition(lsn) + .start(); + + fail("For logical decoding replication slot name it required parameter " + + "that should be create on server before start replication"); + + } catch (PSQLException e) { + String state = e.getSQLState(); + + assertThat("When replication slot doesn't exists, server can't start replication " + + "and should throw exception about it", + state, equalTo(PSQLState.UNDEFINED_OBJECT.getState()) + ); + } + } + + @Test + @Timeout(1) + void receiveChangesOccursBeforeStartReplication() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('previous value')"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .withSlotOption("include-xids", false) + .start(); + + String result = group(receiveMessage(stream, 3)); + + String wait = group( + Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'previous value'", + "COMMIT" + ) + ); + + assertThat("Logical replication can be start from some LSN position and all changes that " + + "occurs between last server LSN and specified LSN position should be available to read " + + "via stream in correct order", + result, equalTo(wait) + ); + } + + @Test + @Timeout(1) + void receiveChangesAfterStartReplication() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + List result = new ArrayList<>(); + + Statement st = sqlConnection.createStatement(); + st.execute( + "insert into test_logic_table(name) values('first message after start replication')"); + st.close(); + + result.addAll(receiveMessage(stream, 3)); + + st = sqlConnection.createStatement(); + st.execute( + "insert into test_logic_table(name) values('second message after start replication')"); + st.close(); + + result.addAll(receiveMessage(stream, 3)); + + String groupedResult = group(result); + + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first message after start replication'", + "COMMIT", + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second message after start replication'", + "COMMIT" + )); + + assertThat( + "After starting replication, from stream should be available also new changes that occurs after start replication", + groupedResult, equalTo(wait) + ); + } + + @Test + @Timeout(1) + void startFromCurrentServerLSNWithoutSpecifyLSNExplicitly() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('last server message')"); + st.close(); + + String result = group(receiveMessage(stream, 3)); + + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'last server message'", + "COMMIT" + )); + + assertThat( + "When start LSN position not specify explicitly, wal should be stream from actual server position", + result, equalTo(wait)); + } + + @Test + @Timeout(1) + void afterStartStreamingDBSlotStatusActive() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + boolean isActive = isActiveOnView(); + + assertThat( + "After start streaming, database status should be update on view pg_replication_slots to active", + isActive, equalTo(true) + ); + } + + /** + *

Bug in postgreSQL that should be fixed in 10 version after code review patch + * Stopping logical replication protocol.

+ * + *

If you try to run it test on version before 10 they fail with time out, because postgresql + * wait new changes and until waiting messages from client ignores.

+ */ + @Test + @Timeout(1) + @DisabledIfServerVersionBelow("11.1") + void afterCloseReplicationStreamDBSlotStatusNotActive() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + boolean isActive = isActiveOnView(); + assumeThat(isActive, equalTo(true)); + + stream.close(); + + isActive = isActiveOnView(); + assertThat("Execute close method on PGREplicationStream should lead to stop replication, " + + "as result we wait that on view pg_replication_slots status for slot will change to no active", + isActive, equalTo(false) + ); + } + + @Test + @Timeout(1) + void afterCloseConnectionDBSLotStatusNotActive() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + boolean isActive = isActiveOnView(); + assumeThat(isActive, equalTo(true)); + + replConnection.close(); + + isActive = isActiveOnView(); + //we doesn't wait replay from server about stop connection that why some delay exists on update view and should wait some time before check view + if (isActive) { + TimeUnit.MILLISECONDS.sleep(200L); + isActive = isActiveOnView(); + } + + assertThat( + "Execute close method on Connection should lead to stop replication as fast as possible, " + + "as result we wait that on view pg_replication_slots status for slot will change to no active", + isActive, equalTo(false) + ); + } + + /** + *

Bug in postgreSQL that should be fixed in 10 version after code review patch + * Stopping logical replication protocol.

+ * + *

If you try to run it test on version before 10 they fail with time out, because postgresql + * wait new changes and until waiting messages from client ignores.

+ */ + @Test + @Timeout(10) + @DisabledIfServerVersionBelow("12.1") + void duringSendBigTransactionConnectionCloseSlotStatusNotActive() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table\n" + + " select id, md5(random()::text) as name from generate_series(1, 200000) as id;"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withStartPosition(lsn) + .withSlotName(SLOT_NAME) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + //wait first message + stream.read(); + + replConnection.close(); + + boolean isActive = isActiveOnView(); + + /* + * we don't wait for replay from server about stop connection that's why some + * delay exists on update view and should wait some time before check view + */ + if (isActive) { + TimeUnit.SECONDS.sleep(2L); + isActive = isActiveOnView(); + } + + assertThat( + "Execute close method on Connection should lead to stop replication as fast as possible, " + + "as result we wait that on view pg_replication_slots status for slot will change to no active", + isActive, equalTo(false) + ); + } + + /** + *

Bug in postgreSQL that should be fixed in 10 version after code review patch + * Stopping logical replication protocol.

+ * + *

If you try to run it test on version before 10 they fail with time out, because postgresql + * wait new changes and until waiting messages from client ignores.

+ */ + @Test + @Timeout(60) + @DisabledIfServerVersionBelow("11.1") + void duringSendBigTransactionReplicationStreamCloseNotActive() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table\n" + + " select id, md5(random()::text) as name from generate_series(1, 200000) as id;"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withStartPosition(lsn) + .withSlotName(SLOT_NAME) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + //wait first message + stream.read(); + + stream.close(); + //after replay from server that replication stream stopped, view already should be updated + boolean isActive = isActiveOnView(); + assertThat("Execute close method on PGREplicationStream should lead to stop replication, " + + "as result we wait that on view pg_replication_slots status for slot will change to no active", + isActive, equalTo(false) + ); + } + + //todo fix, fail because backend for logical decoding not reply with CommandComplate & ReadyForQuery + @Test + @Timeout(5) + void repeatWalPositionTwice() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('message to repeat')"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + List result = new ArrayList<>(); + result.addAll(receiveMessage(stream, 3)); + + replConnection.close(); + waitStopReplicationSlot(); + + replConnection = TestUtil.openReplicationConnection(); + pgConnection = (PGConnection) replConnection; + + stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + result.addAll(receiveMessage(stream, 3)); + + String groupedResult = group(result); + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'message to repeat'", + "COMMIT", + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'message to repeat'", + "COMMIT" + )); + + assertThat("Logical replication stream after start streaming can be close and " + + "reopen on previous LSN, that allow reply wal logs, if they was not recycled yet", + groupedResult, equalTo(wait) + ); + } + + @Test + @Timeout(3) + void doesNotHavePendingMessageWhenStartFromLastLSN() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(getCurrentLSN()) + .start(); + + ByteBuffer result = stream.readPending(); + + assertThat("Read pending message allow without lock on socket read message, " + + "and if message absent return null. In current test we start replication from last LSN on server, " + + "so changes absent on server and readPending message will always lead to null ByteBuffer", + result, equalTo(null) + ); + } + + @Test + @Timeout(3) + void readPreviousChangesWithoutBlock() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('previous changes')"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + String received = group(receiveMessageWithoutBlock(stream, 3)); + + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'previous changes'", + "COMMIT" + )); + + assertThat( + "Messages from stream can be read by readPending method for avoid long block on Socket, " + + "in current test we wait that behavior will be same as for read message with block", + received, equalTo(wait) + ); + } + + @Test + @Timeout(3) + void readActualChangesWithoutBlock() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(getCurrentLSN()) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('actual changes')"); + st.close(); + + String received = group(receiveMessageWithoutBlock(stream, 3)); + + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'actual changes'", + "COMMIT" + )); + + assertThat( + "Messages from stream can be read by readPending method for avoid long block on Socket, " + + "in current test we wait that behavior will be same as for read message with block", + received, equalTo(wait) + ); + } + + @Test + @Timeout(10) + void avoidTimeoutDisconnectWithDefaultStatusInterval() throws Exception { + final int statusInterval = getKeepAliveTimeout(); + + ExecutorService executor = Executors.newSingleThreadExecutor(); + Future future = null; + boolean done; + try { + future = + executor.submit(new Callable() { + @Override + public Object call() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(getCurrentLSN()) + .withStatusInterval(Math.round(statusInterval / 3), TimeUnit.MILLISECONDS) + .start(); + + while (!Thread.interrupted()) { + stream.read(); + } + + return null; + } + }); + + future.get(5, TimeUnit.SECONDS); + done = future.isDone(); + } catch (TimeoutException timeout) { + done = future.isDone(); + } finally { + executor.shutdownNow(); + } + + assertThat( + "ReplicationStream should periodically send keep alive message to postgresql to avoid disconnect from server", + done, CoreMatchers.equalTo(false) + ); + } + + @Test + void restartReplicationFromRestartSlotLSNWhenFeedbackAbsent() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('first tx changes')"); + st.close(); + + st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('second tx change')"); + st.close(); + + List consumedData = new ArrayList<>(); + consumedData.addAll(receiveMessageWithoutBlock(stream, 3)); + + //emulate replication break + replConnection.close(); + waitStopReplicationSlot(); + + replConnection = TestUtil.openReplicationConnection(); + pgConnection = (PGConnection) replConnection; + stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */ + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + consumedData.addAll(receiveMessageWithoutBlock(stream, 3)); + String result = group(consumedData); + + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'", + "COMMIT", + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'", + "COMMIT" + )); + + assertThat( + "If was consume message via logical replication stream but wasn't send feedback about apply and flush " + + "consumed LSN, if replication crash, server should restart from last success applied lsn, " + + "in this case it lsn of start replication slot, so we should consume first 3 message twice", + result, equalTo(wait) + ); + } + + @Test + void replicationRestartFromLastFeedbackPosition() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('first tx changes')"); + st.close(); + + st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('second tx change')"); + st.close(); + + List consumedData = new ArrayList<>(); + consumedData.addAll(receiveMessageWithoutBlock(stream, 3)); + stream.setFlushedLSN(stream.getLastReceiveLSN()); + stream.setAppliedLSN(stream.getLastReceiveLSN()); + stream.forceUpdateStatus(); + + //emulate replication break + replConnection.close(); + waitStopReplicationSlot(); + + replConnection = TestUtil.openReplicationConnection(); + pgConnection = (PGConnection) replConnection; + stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */ + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + consumedData.addAll(receiveMessageWithoutBlock(stream, 3)); + String result = group(consumedData); + + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'", + "COMMIT", + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second tx change'", + "COMMIT" + )); + + assertThat( + "When we add feedback about applied lsn to replication stream(in this case it's force update status)" + + "after restart consume changes via this slot should be started from last success lsn that " + + "we send before via force status update, that why we wait consume both transaction without duplicates", + result, equalTo(wait)); + } + + @Test + void replicationRestartFromLastFeedbackPositionParallelTransaction() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber startLSN = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(startLSN) + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + Connection tx1Connection = TestUtil.openPrivilegedDB(); + tx1Connection.setAutoCommit(false); + + Connection tx2Connection = TestUtil.openPrivilegedDB(); + tx2Connection.setAutoCommit(false); + + Statement stTx1 = tx1Connection.createStatement(); + Statement stTx2 = tx2Connection.createStatement(); + + stTx1.execute("BEGIN"); + stTx2.execute("BEGIN"); + + stTx1.execute("insert into test_logic_table(name) values('first tx changes')"); + stTx2.execute("insert into test_logic_table(name) values('second tx changes')"); + + tx1Connection.commit(); + tx2Connection.commit(); + + tx1Connection.close(); + tx2Connection.close(); + + List consumedData = new ArrayList<>(); + consumedData.addAll(receiveMessageWithoutBlock(stream, 3)); + stream.setFlushedLSN(stream.getLastReceiveLSN()); + stream.setAppliedLSN(stream.getLastReceiveLSN()); + + stream.forceUpdateStatus(); + + //emulate replication break + replConnection.close(); + waitStopReplicationSlot(); + + replConnection = TestUtil.openReplicationConnection(); + pgConnection = (PGConnection) replConnection; + stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .logical() + .withSlotName(SLOT_NAME) + .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */ + .withSlotOption("include-xids", false) + .withSlotOption("skip-empty-xacts", true) + .start(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_logic_table(name) values('third tx changes')"); + st.close(); + + consumedData.addAll(receiveMessageWithoutBlock(stream, 3)); + String result = group(consumedData); + + String wait = group(Arrays.asList( + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'", + "COMMIT", + "BEGIN", + "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second tx changes'", + "COMMIT" + )); + + assertThat( + "When we add feedback about applied lsn to replication stream(in this case it's force update status)" + + "after restart consume changes via this slot should be started from last success lsn that " + + "we send before via force status update, that why we wait consume both transaction without duplicates", + result, equalTo(wait)); + } + + private void waitStopReplicationSlot() throws SQLException, InterruptedException { + while (true) { + PreparedStatement statement = + sqlConnection.prepareStatement( + "select 1 from pg_replication_slots where slot_name = ? and active = true" + ); + statement.setString(1, SLOT_NAME); + ResultSet rs = statement.executeQuery(); + boolean active = rs.next(); + rs.close(); + statement.close(); + + if (!active) { + return; + } + + TimeUnit.MILLISECONDS.sleep(10); + } + } + + private int getKeepAliveTimeout() throws SQLException { + Statement statement = sqlConnection.createStatement(); + ResultSet resultSet = statement.executeQuery( + "select setting, unit from pg_settings where name = 'wal_sender_timeout'"); + int result = 0; + if (resultSet.next()) { + result = resultSet.getInt(1); + String unit = resultSet.getString(2); + if ("sec".equals(unit)) { + result = (int) TimeUnit.SECONDS.toMillis(result); + } + } + + return result; + } + + private boolean isActiveOnView() throws SQLException { + boolean result = false; + Statement st = sqlConnection.createStatement(); + ResultSet rs = + st.executeQuery("select * from pg_replication_slots where slot_name = '" + SLOT_NAME + "'"); + if (rs.next()) { + result = rs.getBoolean("active"); + } + rs.close(); + st.close(); + return result; + } + + private String group(List messages) { + StringBuilder builder = new StringBuilder(); + boolean isFirst = true; + for (String str : messages) { + if (isFirst) { + isFirst = false; + } else { + builder.append("\n"); + } + + builder.append(str); + } + + return builder.toString(); + } + + private List receiveMessage(PGReplicationStream stream, int count) throws SQLException { + List result = new ArrayList<>(count); + for (int index = 0; index < count; index++) { + result.add(toString(stream.read())); + } + + return result; + } + + private List receiveMessageWithoutBlock(PGReplicationStream stream, int count) + throws Exception { + List result = new ArrayList<>(3); + for (int index = 0; index < count; index++) { + ByteBuffer message; + do { + message = stream.readPending(); + + if (message == null) { + TimeUnit.MILLISECONDS.sleep(2); + } + } while (message == null); + + result.add(toString(message)); + } + + return result; + } + + private LogSequenceNumber getCurrentLSN() throws SQLException { + Statement st = sqlConnection.createStatement(); + ResultSet rs = null; + try { + rs = st.executeQuery("select " + + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "pg_current_wal_lsn()" : "pg_current_xlog_location()")); + + if (rs.next()) { + String lsn = rs.getString(1); + return LogSequenceNumber.valueOf(lsn); + } else { + return LogSequenceNumber.INVALID_LSN; + } + } finally { + if (rs != null) { + rs.close(); + } + st.close(); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/PhysicalReplicationTest.java b/pgjdbc/src/test/java/org/postgresql/replication/PhysicalReplicationTest.java new file mode 100644 index 0000000..2f06741 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/PhysicalReplicationTest.java @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.junit.MatcherAssume.assumeThat; + +import org.postgresql.PGConnection; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; +import org.postgresql.test.annotations.tags.Replication; + +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; + +@Replication +@DisabledIfServerVersionBelow("9.4") +class PhysicalReplicationTest { + + private static final String SLOT_NAME = "pgjdbc_physical_replication_slot"; + + private Connection replConnection; + private Connection sqlConnection; + + @BeforeEach + void setUp() throws Exception { + sqlConnection = TestUtil.openPrivilegedDB(); + //DriverManager.setLogWriter(new PrintWriter(System.out)); + replConnection = TestUtil.openReplicationConnection(); + TestUtil.createTable(sqlConnection, "test_physic_table", + "pk serial primary key, name varchar(100)"); + TestUtil.recreatePhysicalReplicationSlot(sqlConnection, SLOT_NAME); + } + + @AfterEach + void tearDown() throws Exception { + replConnection.close(); + TestUtil.dropTable(sqlConnection, "test_physic_table"); + TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME); + sqlConnection.close(); + } + + @Test + void receiveChangesWithoutReplicationSlot() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_physic_table(name) values('previous value')"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withStartPosition(lsn) + .start(); + + ByteBuffer read = stream.read(); + + assertThat("Physical replication can be start without replication slot", + read, CoreMatchers.notNullValue() + ); + } + + @Test + void receiveChangesWithReplicationSlot() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_physic_table(name) values('previous value')"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .start(); + + ByteBuffer read = stream.read(); + + assertThat(read, CoreMatchers.notNullValue()); + } + + @Test + void afterStartStreamingDBSlotStatusActive() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .start(); + + boolean isActive = isActiveOnView(); + stream.close(); + + assertThat( + "After start streaming, database status should be update on view pg_replication_slots to active", + isActive, equalTo(true) + ); + } + + @Test + void afterCloseReplicationStreamDBSlotStatusNotActive() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .start(); + + boolean isActive = isActiveOnView(); + assumeThat(isActive, equalTo(true)); + + stream.close(); + + isActive = isActiveOnView(); + assertThat( + "Execute close method on PGREplicationStream should lead to stop replication, " + + "as result we wait that on view pg_replication_slots status for slot will change to no active", + isActive, equalTo(false) + ); + } + + @Test + void walRecordCanBeRepeatBeRestartReplication() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_physic_table(name) values('previous value')"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .start(); + + byte[] first = toByteArray(stream.read()); + stream.close(); + + //reopen stream + stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .start(); + + byte[] second = toByteArray(stream.read()); + stream.close(); + + boolean arrayEquals = Arrays.equals(first, second); + assertThat("On same replication connection we can restart replication from already " + + "received LSN if they not recycled yet on backend", + arrayEquals, CoreMatchers.equalTo(true) + ); + } + + @Test + void restartPhysicalReplicationWithoutRepeatMessage() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + LogSequenceNumber lsn = getCurrentLSN(); + + Statement st = sqlConnection.createStatement(); + st.execute("insert into test_physic_table(name) values('first value')"); + st.close(); + + PGReplicationStream stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withSlotName(SLOT_NAME) + .withStartPosition(lsn) + .start(); + + byte[] streamOneFirstPart = toByteArray(stream.read()); + LogSequenceNumber restartLSN = stream.getLastReceiveLSN(); + + st = sqlConnection.createStatement(); + st.execute("insert into test_physic_table(name) values('second value')"); + st.close(); + + byte[] streamOneSecondPart = toByteArray(stream.read()); + stream.close(); + + //reopen stream + stream = + pgConnection + .getReplicationAPI() + .replicationStream() + .physical() + .withSlotName(SLOT_NAME) + .withStartPosition(restartLSN) + .start(); + + byte[] streamTwoFirstPart = toByteArray(stream.read()); + stream.close(); + + boolean arrayEquals = Arrays.equals(streamOneSecondPart, streamTwoFirstPart); + assertThat("Interrupt physical replication and restart from lastReceiveLSN should not " + + "lead to repeat messages skip part of them", + arrayEquals, CoreMatchers.equalTo(true) + ); + } + + private boolean isActiveOnView() throws SQLException { + boolean result = false; + Statement st = sqlConnection.createStatement(); + ResultSet + rs = + st.executeQuery("select * from pg_replication_slots where slot_name = '" + SLOT_NAME + "'"); + if (rs.next()) { + result = rs.getBoolean("active"); + } + rs.close(); + st.close(); + return result; + } + + private byte[] toByteArray(ByteBuffer buffer) { + int offset = buffer.arrayOffset(); + byte[] source = buffer.array(); + return Arrays.copyOfRange(source, offset, source.length); + } + + private LogSequenceNumber getCurrentLSN() throws SQLException { + Statement st = sqlConnection.createStatement(); + ResultSet rs = null; + try { + rs = st.executeQuery("select " + + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10) + ? "pg_current_wal_lsn()" : "pg_current_xlog_location()")); + + if (rs.next()) { + String lsn = rs.getString(1); + return LogSequenceNumber.valueOf(lsn); + } else { + return LogSequenceNumber.INVALID_LSN; + } + } finally { + if (rs != null) { + rs.close(); + } + st.close(); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationConnectionTest.java b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationConnectionTest.java new file mode 100644 index 0000000..5325ff4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationConnectionTest.java @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +import org.postgresql.PGConnection; +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; +import org.postgresql.test.annotations.tags.Replication; + +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; + +@Replication +@DisabledIfServerVersionBelow("9.4") +class ReplicationConnectionTest { + private Connection replConnection; + + @BeforeEach + void setUp() throws Exception { + replConnection = TestUtil.openReplicationConnection(); + //DriverManager.setLogWriter(new PrintWriter(System.out)); + } + + @AfterEach + void tearDown() throws Exception { + replConnection.close(); + } + + @Test + void isValid() throws Exception { + boolean result = replConnection.isValid(3); + + PGConnection connection = (PGConnection) replConnection; + connection.getBackendPID(); + + assertThat("Replication connection as Simple connection can be check on valid", + result, equalTo(true) + ); + } + + @Test + void connectionNotValidWhenSessionTerminated() throws Exception { + TestUtil.terminateBackend(replConnection); + + boolean result = replConnection.isValid(3); + + assertThat("When postgresql terminate session with replication connection, " + + "isValid() should return false, because next query on this connection will fail", + result, equalTo(false) + ); + } + + @Test + void replicationCommandResultSetAccessByIndex() throws Exception { + Statement statement = replConnection.createStatement(); + ResultSet resultSet = statement.executeQuery("IDENTIFY_SYSTEM"); + + String xlogpos = null; + if (resultSet.next()) { + xlogpos = resultSet.getString(3); + } + + resultSet.close(); + statement.close(); + + assertThat("Replication protocol supports a limited number of commands, " + + "and it command can be execute via Statement(simple query protocol), " + + "and result fetch via ResultSet", + xlogpos, CoreMatchers.notNullValue() + ); + } + + @Test + void replicationCommandResultSetAccessByName() throws Exception { + Statement statement = replConnection.createStatement(); + ResultSet resultSet = statement.executeQuery("IDENTIFY_SYSTEM"); + + String xlogpos = null; + if (resultSet.next()) { + xlogpos = resultSet.getString("xlogpos"); + } + + resultSet.close(); + statement.close(); + + assertThat("Replication protocol supports a limited number of commands, " + + "and it command can be execute via Statement(simple query protocol), " + + "and result fetch via ResultSet", + xlogpos, CoreMatchers.notNullValue() + ); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationSlotTest.java b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationSlotTest.java new file mode 100644 index 0000000..5f3f18c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationSlotTest.java @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +import org.postgresql.PGConnection; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; +import org.postgresql.test.annotations.tags.Replication; + +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Statement; + +@Replication +@DisabledIfServerVersionBelow("9.4") +class ReplicationSlotTest { + private Connection sqlConnection; + private Connection replConnection; + + private String slotName; + + @BeforeEach + void setUp() throws Exception { + sqlConnection = TestUtil.openPrivilegedDB(); + replConnection = TestUtil.openReplicationConnection(); + //DriverManager.setLogWriter(new PrintWriter(System.out)); + } + + @AfterEach + void tearDown() throws Exception { + replConnection.close(); + dropReplicationSlot(); + slotName = null; + sqlConnection.close(); + } + + @Test + void notAvailableCreatePhysicalSlotWithoutSlotName() throws Exception { + assertThrows(IllegalArgumentException.class, () -> { + PGConnection pgConnection = (PGConnection) replConnection; + + pgConnection + .getReplicationAPI() + .createReplicationSlot() + .physical() + .make(); + + fail("Replication slot name it required parameter and can't be null"); + }); + } + + @Test + void createPhysicalSlot() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + slotName = "pgjdbc_test_create_physical_replication_slot"; + + pgConnection + .getReplicationAPI() + .createReplicationSlot() + .physical() + .withSlotName(slotName) + .make(); + + boolean result = isPhysicalSlotExists(slotName); + + assertThat("Slot should exist", result, CoreMatchers.equalTo(true)); + + result = isSlotTemporary(slotName); + + assertThat("Slot should not be temporary by default", result, CoreMatchers.equalTo(false)); + } + + @Test + void createTemporaryPhysicalSlotPg10AndHigher() + throws SQLException { + assumeTrue(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10)); + + BaseConnection baseConnection = (BaseConnection) replConnection; + + String slotName = "pgjdbc_test_create_temporary_physical_replication_slot_pg_10_or_higher"; + + assertDoesNotThrow(() -> { + + baseConnection + .getReplicationAPI() + .createReplicationSlot() + .physical() + .withSlotName(slotName) + .withTemporaryOption() + .make(); + + }, "PostgreSQL >= 10 should support temporary replication slots"); + + boolean result = isSlotTemporary(slotName); + + assertThat("Slot is not temporary", result, CoreMatchers.equalTo(true)); + } + + @Test + void createTemporaryPhysicalSlotPgLowerThan10() + throws SQLException { + assumeFalse(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10)); + + BaseConnection baseConnection = (BaseConnection) replConnection; + + String slotName = "pgjdbc_test_create_temporary_physical_replication_slot_pg_lower_than_10"; + + try { + + baseConnection + .getReplicationAPI() + .createReplicationSlot() + .physical() + .withSlotName(slotName) + .withTemporaryOption() + .make(); + + fail("PostgreSQL < 10 does not support temporary replication slots"); + + } catch (SQLFeatureNotSupportedException e) { + // success + } + } + + @Test + void dropPhysicalSlot() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + slotName = "pgjdbc_test_create_physical_replication_slot"; + + pgConnection + .getReplicationAPI() + .createReplicationSlot() + .physical() + .withSlotName(slotName) + .make(); + + pgConnection + .getReplicationAPI() + .dropReplicationSlot(slotName); + + boolean result = isPhysicalSlotExists(slotName); + + slotName = null; + + assertThat(result, CoreMatchers.equalTo(false)); + } + + @Test + void notAvailableCreateLogicalSlotWithoutSlotName() throws Exception { + assertThrows(IllegalArgumentException.class, () -> { + PGConnection pgConnection = (PGConnection) replConnection; + + pgConnection + .getReplicationAPI() + .createReplicationSlot() + .logical() + .withOutputPlugin("test_decoding") + .make(); + + fail("Replication slot name it required parameter and can't be null"); + }); + } + + @Test + void notAvailableCreateLogicalSlotWithoutOutputPlugin() throws Exception { + assertThrows(IllegalArgumentException.class, () -> { + PGConnection pgConnection = (PGConnection) replConnection; + + pgConnection + .getReplicationAPI() + .createReplicationSlot() + .logical() + .withSlotName("pgjdbc_test_create_logical_replication_slot") + .make(); + + fail("output plugin required parameter for logical replication slot and can't be null"); + }); + } + + @Test + void createLogicalSlot() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + slotName = "pgjdbc_test_create_logical_replication_slot"; + + pgConnection + .getReplicationAPI() + .createReplicationSlot() + .logical() + .withSlotName(slotName) + .withOutputPlugin("test_decoding") + .make(); + + boolean result = isLogicalSlotExists(slotName); + + assertThat("Slot should exist", result, CoreMatchers.equalTo(true)); + + result = isSlotTemporary(slotName); + + assertThat("Slot should not be temporary by default", result, CoreMatchers.equalTo(false)); + } + + @Test + void createLogicalSlotReturnedInfo() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + slotName = "pgjdbc_test_create_logical_replication_slot_info"; + + ReplicationSlotInfo info = pgConnection + .getReplicationAPI() + .createReplicationSlot() + .logical() + .withSlotName(slotName) + .withOutputPlugin("test_decoding") + .make(); + + assertEquals(slotName, info.getSlotName()); + assertEquals(ReplicationType.LOGICAL, info.getReplicationType()); + assertNotNull(info.getConsistentPoint()); + assertNotNull(info.getSnapshotName()); + assertEquals("test_decoding", info.getOutputPlugin()); + } + + @Test + void createPhysicalSlotReturnedInfo() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + slotName = "pgjdbc_test_create_physical_replication_slot_info"; + + ReplicationSlotInfo info = pgConnection + .getReplicationAPI() + .createReplicationSlot() + .physical() + .withSlotName(slotName) + .make(); + + assertEquals(slotName, info.getSlotName()); + assertEquals(ReplicationType.PHYSICAL, info.getReplicationType()); + assertNotNull(info.getConsistentPoint()); + assertNull(info.getSnapshotName()); + assertNull(info.getOutputPlugin()); + } + + @Test + void createTemporaryLogicalSlotPg10AndHigher() + throws SQLException { + assumeTrue(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10)); + + BaseConnection baseConnection = (BaseConnection) replConnection; + + String slotName = "pgjdbc_test_create_temporary_logical_replication_slot_pg_10_or_higher"; + + assertDoesNotThrow(() -> { + + baseConnection + .getReplicationAPI() + .createReplicationSlot() + .logical() + .withSlotName(slotName) + .withOutputPlugin("test_decoding") + .withTemporaryOption() + .make(); + + }, "PostgreSQL >= 10 should support temporary replication slots"); + + boolean result = isSlotTemporary(slotName); + + assertThat("Slot is not temporary", result, CoreMatchers.equalTo(true)); + } + + @Test + void createTemporaryLogicalSlotPgLowerThan10() + throws SQLException { + assumeFalse(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10)); + + BaseConnection baseConnection = (BaseConnection) replConnection; + + String slotName = "pgjdbc_test_create_temporary_logical_replication_slot_pg_lower_than_10"; + + try { + + baseConnection + .getReplicationAPI() + .createReplicationSlot() + .logical() + .withSlotName(slotName) + .withOutputPlugin("test_decoding") + .withTemporaryOption() + .make(); + + fail("PostgreSQL < 10 does not support temporary replication slots"); + + } catch (SQLFeatureNotSupportedException e) { + // success + } + } + + @Test + void dropLogicalSlot() throws Exception { + PGConnection pgConnection = (PGConnection) replConnection; + + slotName = "pgjdbc_test_create_logical_replication_slot"; + + pgConnection + .getReplicationAPI() + .createReplicationSlot() + .logical() + .withSlotName(slotName) + .withOutputPlugin("test_decoding") + .make(); + + pgConnection + .getReplicationAPI() + .dropReplicationSlot(slotName); + + boolean result = isLogicalSlotExists(slotName); + + slotName = null; + + assertThat(result, CoreMatchers.equalTo(false)); + } + + private boolean isPhysicalSlotExists(String slotName) throws SQLException { + boolean result; + + Statement st = sqlConnection.createStatement(); + ResultSet resultSet = st.executeQuery( + "select * from pg_replication_slots where slot_name = '" + slotName + + "' and slot_type = 'physical'"); + result = resultSet.next(); + resultSet.close(); + st.close(); + return result; + } + + private boolean isLogicalSlotExists(String slotName) throws SQLException { + boolean result; + + Statement st = sqlConnection.createStatement(); + ResultSet resultSet = st.executeQuery( + "select 1 from pg_replication_slots where slot_name = '" + slotName + + "' and slot_type = 'logical'"); + result = resultSet.next(); + resultSet.close(); + st.close(); + return result; + } + + private boolean isSlotTemporary(String slotName) throws SQLException { + if (!TestUtil.haveMinimumServerVersion(sqlConnection, ServerVersion.v10)) { + return false; + } + + boolean result; + + Statement st = sqlConnection.createStatement(); + ResultSet resultSet = st.executeQuery( + "select 1 from pg_replication_slots where slot_name = '" + slotName + + "' and temporary = true"); + result = resultSet.next(); + resultSet.close(); + st.close(); + return result; + } + + private void dropReplicationSlot() throws Exception { + if (slotName != null) { + TestUtil.dropReplicationSlot(sqlConnection, slotName); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationTestSuite.java b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationTestSuite.java new file mode 100644 index 0000000..3d17fd3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationTestSuite.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.replication; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.AssumptionViolatedException; +import org.junit.jupiter.api.BeforeAll; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + CopyBothResponseTest.class, + LogicalReplicationStatusTest.class, + LogicalReplicationTest.class, + LogSequenceNumberTest.class, + PhysicalReplicationTest.class, + ReplicationConnectionTest.class, + ReplicationSlotTest.class, +}) +class ReplicationTestSuite { + + @BeforeAll + static void setUp() throws Exception { + Connection connection = TestUtil.openDB(); + try { + if (TestUtil.haveMinimumServerVersion(connection, ServerVersion.v9_0)) { + assumeWalSenderEnabled(connection); + assumeReplicationRole(connection); + } else { + throw new AssumptionViolatedException( + "Skip replication test because current database version " + + "too old and don't contain replication API" + ); + } + } finally { + connection.close(); + } + } + + private static void assumeWalSenderEnabled(Connection connection) throws SQLException { + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery("SHOW max_wal_senders"); + rs.next(); + int maxWalSenders = rs.getInt(1); + rs.close(); + stmt.close(); + + if (maxWalSenders == 0) { + throw new AssumptionViolatedException( + "Skip replication test because max_wal_senders = 0"); + } + } + + private static void assumeReplicationRole(Connection connection) throws SQLException { + Statement stmt = connection.createStatement(); + ResultSet rs = + stmt.executeQuery("SELECT usename, userepl FROM pg_user WHERE usename = current_user"); + rs.next(); + String userName = rs.getString(1); + boolean replicationGrant = rs.getBoolean(2); + rs.close(); + stmt.close(); + + if (!replicationGrant) { + throw new AssumptionViolatedException( + "Skip replication test because user '" + userName + "' doesn't have replication role"); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/SlowTests.java b/pgjdbc/src/test/java/org/postgresql/test/SlowTests.java new file mode 100644 index 0000000..29e51d4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/SlowTests.java @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test; + +/** + * Declares interface to specify slow tests. + */ +public interface SlowTests { +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/FixedLengthOutputStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/FixedLengthOutputStreamTest.java new file mode 100644 index 0000000..219c88c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/core/FixedLengthOutputStreamTest.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.core; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.core.FixedLengthOutputStream; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.sql.SQLException; + +class FixedLengthOutputStreamTest { + + private ByteArrayOutputStream targetStream; + private FixedLengthOutputStream fixedLengthStream; + + @BeforeEach + void setUp() throws Exception { + targetStream = new ByteArrayOutputStream(); + fixedLengthStream = new FixedLengthOutputStream(10, targetStream); + } + + @AfterEach + void tearDown() throws SQLException { + } + + private void verifyExpectedOutput(byte[] expected) { + assertArrayEquals(expected, targetStream.toByteArray(), "Incorrect data written to target stream"); + } + + @Test + void singleByteWrites() throws IOException { + fixedLengthStream.write((byte) 1); + assertEquals(9, fixedLengthStream.remaining(), "Incorrect remaining value"); + fixedLengthStream.write((byte) 2); + assertEquals(8, fixedLengthStream.remaining(), "Incorrect remaining value"); + verifyExpectedOutput(new byte[]{1, 2}); + } + + @Test + void multipleByteWrites() throws IOException { + fixedLengthStream.write(new byte[]{1, 2, 3, 4}); + assertEquals(6, fixedLengthStream.remaining(), "Incorrect remaining value"); + fixedLengthStream.write(new byte[]{5, 6, 7, 8}); + assertEquals(2, fixedLengthStream.remaining(), "Incorrect remaining value"); + verifyExpectedOutput(new byte[]{1, 2, 3, 4, 5, 6, 7, 8}); + } + + @Test + void singleByteOverLimit() throws IOException { + byte[] data = new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}; + fixedLengthStream.write(data); + assertEquals(0, fixedLengthStream.remaining(), "Incorrect remaining value"); + try { + fixedLengthStream.write((byte) 'a'); + fail("Expected exception not thrown"); + } catch (IOException e) { + assertEquals("Attempt to write more than the specified 10 bytes", e.getMessage(), "Incorrect exception message"); + } + assertEquals(0, fixedLengthStream.remaining(), "Incorrect remaining value after exception"); + verifyExpectedOutput(data); + } + + @Test + void multipleBytesOverLimit() throws IOException { + byte[] data = new byte[]{1, 2, 3, 4, 5, 6, 7, 8}; + fixedLengthStream.write(data); + assertEquals(2, fixedLengthStream.remaining()); + try { + fixedLengthStream.write(new byte[]{'a', 'b', 'c', 'd'}); + fail("Expected exception not thrown"); + } catch (IOException e) { + assertEquals("Attempt to write more than the specified 10 bytes", e.getMessage(), "Incorrect exception message"); + } + assertEquals(2, fixedLengthStream.remaining(), "Incorrect remaining value after exception"); + verifyExpectedOutput(data); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/JavaVersionTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/JavaVersionTest.java new file mode 100644 index 0000000..6c8b335 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/core/JavaVersionTest.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.core.JavaVersion; + +import org.junit.jupiter.api.Test; + +class JavaVersionTest { + @Test + void getRuntimeVersion() { + String currentVersion = System.getProperty("java.version"); + String msg = "java.version = " + currentVersion + ", JavaVersion.getRuntimeVersion() = " + + JavaVersion.getRuntimeVersion(); + System.out.println(msg); + if (currentVersion.startsWith("1.8")) { + assertEquals(JavaVersion.v1_8, JavaVersion.getRuntimeVersion(), msg); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/LogServerMessagePropertyTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/LogServerMessagePropertyTest.java new file mode 100644 index 0000000..527117a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/core/LogServerMessagePropertyTest.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.Locale; +import java.util.Properties; + +class LogServerMessagePropertyTest { + private static final String PRIMARY_KEY_NAME = "lms_test_pk"; + private static final String CREATE_TABLE_SQL = + "CREATE TABLE pg_temp.lms_test (" + + " id text, " + + " CONSTRAINT " + PRIMARY_KEY_NAME + " PRIMARY KEY (id)" + + ")"; + private static final String SECRET_VALUE = "some_secret_value"; + private static final String INSERT_SQL = + "INSERT INTO pg_temp.lms_test (id) VALUES ('" + SECRET_VALUE + "')"; + + /** + * Creates a connection with the additional properties, use it to + * create a temp table with a primary key, run two inserts to generate + * a duplicate key error, and finally return the exception message. + */ + private static String testViolatePrimaryKey(Properties props, boolean batch) throws SQLException { + Connection conn = TestUtil.openDB(props); + Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_1)); + try { + TestUtil.execute(conn, CREATE_TABLE_SQL); + if (batch) { + PreparedStatement stmt = conn.prepareStatement(INSERT_SQL); + stmt.addBatch(); + stmt.addBatch(); + stmt.executeBatch(); + } else { + // First insert should work + TestUtil.execute(conn, INSERT_SQL); + // Second insert should throw a duplicate key error + TestUtil.execute(conn, INSERT_SQL); + } + } catch (SQLException e) { + assertEquals(PSQLState.UNIQUE_VIOLATION.getState(), e.getSQLState(), "SQL state must be for a unique violation"); + return e.getMessage(); + } finally { + conn.close(); + } + // Should never get here: + fail("A duplicate key exception should have occurred"); + return null; + } + + private static String testViolatePrimaryKey(Properties props) throws SQLException { + return testViolatePrimaryKey(props, false); + } + + private static void assertMessageContains(String message, String text) { + if (!message.toLowerCase(Locale.ROOT).contains(text.toLowerCase(Locale.ROOT))) { + fail(String.format("Message must contain text '%s': %s", text, message)); + } + } + + private static void assertMessageDoesNotContain(String message, String text) { + if (message.toLowerCase(Locale.ROOT).contains(text.toLowerCase(Locale.ROOT))) { + fail(String.format("Message must not contain text '%s': %s", text, message)); + } + } + + @Test + void withDefaults() throws SQLException { + Properties props = new Properties(); + String message = testViolatePrimaryKey(props); + assertMessageContains(message, PRIMARY_KEY_NAME); + // TODO: Detail is locale-specific assertMessageContains(message, "Detail:"); + assertMessageContains(message, SECRET_VALUE); + } + + /** + * NOTE: This should be the same as the default case as "true" is the default. + */ + @Test + void withExplicitlyEnabled() throws SQLException { + Properties props = new Properties(); + props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "true"); + String message = testViolatePrimaryKey(props); + assertMessageContains(message, PRIMARY_KEY_NAME); + // TODO: Detail is locale-specific assertMessageContains(message, "Detail:"); + assertMessageContains(message, SECRET_VALUE); + } + + @Test + void withLogServerErrorDetailDisabled() throws SQLException { + Properties props = new Properties(); + props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "false"); + String message = testViolatePrimaryKey(props); + assertMessageContains(message, PRIMARY_KEY_NAME); + assertMessageDoesNotContain(message, "Detail:"); + assertMessageDoesNotContain(message, SECRET_VALUE); + } + + @Test + void batchWithDefaults() throws SQLException { + Properties props = new Properties(); + String message = testViolatePrimaryKey(props, true); + assertMessageContains(message, PRIMARY_KEY_NAME); + // TODO: Detail is locale-specific assertMessageContains(message, "Detail:"); + assertMessageContains(message, SECRET_VALUE); + } + + /** + * NOTE: This should be the same as the default case as "true" is the default. + */ + @Test + void batchExplicitlyEnabled() throws SQLException { + Properties props = new Properties(); + props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "true"); + String message = testViolatePrimaryKey(props, true); + assertMessageContains(message, PRIMARY_KEY_NAME); + // TODO: Detail is locale-specific assertMessageContains(message, "Detail:"); + assertMessageContains(message, SECRET_VALUE); + } + + @Test + void batchWithLogServerErrorDetailDisabled() throws SQLException { + Properties props = new Properties(); + props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "false"); + String message = testViolatePrimaryKey(props, true); + assertMessageContains(message, PRIMARY_KEY_NAME); + // TODO: Detail is locale-specific assertMessageDoesNotContain(message, "Detail:"); + assertMessageDoesNotContain(message, SECRET_VALUE); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/NativeQueryBindLengthTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/NativeQueryBindLengthTest.java new file mode 100644 index 0000000..c333a2c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/core/NativeQueryBindLengthTest.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.core; + +import org.postgresql.core.NativeQuery; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(Parameterized.class) +public class NativeQueryBindLengthTest extends BaseTest4 { + private final int expected; + private final int bindCount; + + public NativeQueryBindLengthTest(String name, int expected, int bindCount) { + this.expected = expected; + this.bindCount = bindCount; + } + + @Test + public void testBindLengthCalculation() { + Assert.assertEquals(expected, NativeQuery.calculateBindLength(bindCount)); + } + + @Parameterized.Parameters(name = "{0} == {1}") + public static Iterable data() { + List res = new ArrayList<>(); + res.add(new Object[]{"'$1'.length = 2", 2, 1}); + res.add(new Object[]{"'$1$2...$9'.length = 2*9", 18, 9}); + res.add(new Object[]{"'$1$2...$9$10'.length = 2*9+3", 21, 10}); + res.add(new Object[]{"'$1$2...$9$10..$99'.length = 2*9+3*90", 288, 99}); + res.add(new Object[]{"'$1$2...$9$10..$99$100'.length = 2*9+3*90+4", 292, 100}); + res.add(new Object[]{"'$1$2...$9$10..$99$100$101'.length = 2*9+3*90+4+4", 296, 101}); + res.add(new Object[]{"'$1...$999'.length", 3888, 999}); + res.add(new Object[]{"'$1...$1000'.length", 3893, 1000}); + res.add(new Object[]{"'$1...$9999'.length", 48888, 9999}); + res.add(new Object[]{"'$1...$10000'.length", 48894, 10000}); + res.add(new Object[]{"'$1...$32767'.length", 185496, Short.MAX_VALUE}); + return res; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/OptionsPropertyTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/OptionsPropertyTest.java new file mode 100644 index 0000000..15d655c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/core/OptionsPropertyTest.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.core; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.Properties; + +class OptionsPropertyTest { + private static final String schemaName = "options_property_test"; + private static final String optionsValue = "-c search_path=" + schemaName; + + @BeforeEach + void setUp() throws Exception { + Connection con = TestUtil.openDB(); + Statement stmt = con.createStatement(); + stmt.execute("DROP SCHEMA IF EXISTS " + schemaName + ";"); + stmt.execute("CREATE SCHEMA " + schemaName + ";"); + stmt.close(); + TestUtil.closeDB(con); + } + + @Test + void optionsInProperties() throws Exception { + Properties props = new Properties(); + props.setProperty(PGProperty.OPTIONS.getName(), optionsValue); + + Connection con = TestUtil.openDB(props); + Statement stmt = con.createStatement(); + stmt.execute("SHOW search_path"); + + ResultSet rs = stmt.getResultSet(); + if (!rs.next()) { + fail("'options' connection initialization parameter should be passed to the database."); + } + assertEquals(schemaName, rs.getString(1), "'options' connection initialization parameter should be passed to the database."); + + stmt.close(); + TestUtil.closeDB(con); + } + + @AfterEach + void tearDown() throws Exception { + Connection con = TestUtil.openDB(); + Statement stmt = con.createStatement(); + stmt.execute("DROP SCHEMA " + schemaName + ";"); + stmt.close(); + TestUtil.closeDB(con); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/QueryExecutorTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/QueryExecutorTest.java new file mode 100644 index 0000000..3381def --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/core/QueryExecutorTest.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.core; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.QueryExecutor; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; + +import java.sql.SQLException; +import java.util.Set; + +/** + * TestCase to test handling of binary types. + */ +public class QueryExecutorTest extends BaseTest4 { + /** + * Make sure the functions for adding binary transfer OIDs for custom types are correct. + * + * @throws SQLException if a database error occurs + */ + @Test + public void testBinaryTransferOids() throws SQLException { + QueryExecutor queryExecutor = con.unwrap(BaseConnection.class).getQueryExecutor(); + // get current OIDs (make a copy of them) + @SuppressWarnings("deprecation") + Set oidsReceive = queryExecutor.getBinaryReceiveOids(); + @SuppressWarnings("deprecation") + Set oidsSend = queryExecutor.getBinarySendOids(); + // add a new OID to be transferred as binary data + int customTypeOid = 91716; + assertBinaryForReceive(customTypeOid, false, + () -> "Custom type OID should not be binary for receive by default"); + // first for receiving + queryExecutor.addBinaryReceiveOid(customTypeOid); + // Verify + assertBinaryForReceive(customTypeOid, true, + () -> "Just added oid via addBinaryReceiveOid"); + assertBinaryForSend(customTypeOid, false, + () -> "Just added oid via addBinaryReceiveOid"); + for (int oid : oidsReceive) { + assertBinaryForReceive(oid, true, + () -> "Previously registered BinaryReceiveOids should be intact after " + + "addBinaryReceiveOid(" + customTypeOid + ")"); + } + for (int oid : oidsSend) { + assertBinaryForSend(oid, true, + () -> "Previously registered BinarySendOids should be intact after " + + "addBinaryReceiveOid(" + customTypeOid + ")"); + } + // then for sending + queryExecutor.addBinarySendOid(customTypeOid); + // check new OID + assertBinaryForReceive(customTypeOid, true, () -> "added oid via addBinaryReceiveOid and " + + "addBinarySendOid"); + assertBinaryForSend(customTypeOid, true, () -> "added oid via addBinaryReceiveOid and " + + "addBinarySendOid"); + for (int oid : oidsReceive) { + assertBinaryForReceive(oid, true, () -> "Previously registered BinaryReceiveOids should be " + + "intact after addBinaryReceiveOid(" + customTypeOid + ") and addBinarySendOid(" + customTypeOid + ")"); + } + for (int oid : oidsSend) { + assertBinaryForSend(oid, true, () -> "Previously registered BinarySendOids should be intact" + + " after addBinaryReceiveOid(" + customTypeOid + ")"); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/extensions/ExtensionsTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/extensions/ExtensionsTestSuite.java new file mode 100644 index 0000000..898ac69 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/extensions/ExtensionsTestSuite.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.extensions; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/* + * Executes all known tests for PostgreSQL extensions supported by JDBC driver + */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + HStoreTest.class, +}) +public class ExtensionsTestSuite { +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/extensions/HStoreTest.java b/pgjdbc/src/test/java/org/postgresql/test/extensions/HStoreTest.java new file mode 100644 index 0000000..5db06f5 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/extensions/HStoreTest.java @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.extensions; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assume; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +// SELECT 'hstore'::regtype::oid +// SELECT 'hstore[]'::regtype::oid + +public class HStoreTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + Assume.assumeTrue("server has installed hstore", isHStoreEnabled(con)); + Assume.assumeFalse("hstore is not supported in simple protocol only mode", + preferQueryMode == PreferQueryMode.SIMPLE); + assumeMinimumServerVersion("hstore requires PostgreSQL 8.3+", ServerVersion.v8_3); + } + + private static boolean isHStoreEnabled(Connection conn) { + try { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT 'a=>1'::hstore::text"); + rs.close(); + stmt.close(); + return true; + } catch (SQLException sqle) { + return false; + } + } + + @Test + public void testHStoreSelect() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT 'a=>1,b=>2'::hstore"); + ResultSet rs = pstmt.executeQuery(); + assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1)); + assertTrue(rs.next()); + String str = rs.getString(1); + if (!("\"a\"=>\"1\", \"b\"=>\"2\"".equals(str) || "\"b\"=>\"2\", \"a\"=>\"1\"".equals(str))) { + fail("Expected " + "\"a\"=>\"1\", \"b\"=>\"2\"" + " but got " + str); + } + Map correct = new HashMap<>(); + correct.put("a", "1"); + correct.put("b", "2"); + assertEquals(correct, rs.getObject(1)); + } + + @Test + public void testHStoreSelectNullValue() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT 'a=>NULL'::hstore"); + ResultSet rs = pstmt.executeQuery(); + assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1)); + assertTrue(rs.next()); + assertEquals("\"a\"=>NULL", rs.getString(1)); + Map correct = Collections.singletonMap("a", null); + assertEquals(correct, rs.getObject(1)); + } + + @Test + public void testHStoreSend() throws SQLException { + Map correct = Collections.singletonMap("a", 1); + PreparedStatement pstmt = con.prepareStatement("SELECT ?::text"); + pstmt.setObject(1, correct); + ResultSet rs = pstmt.executeQuery(); + assertEquals(String.class.getName(), rs.getMetaData().getColumnClassName(1)); + assertTrue(rs.next()); + assertEquals("\"a\"=>\"1\"", rs.getString(1)); + } + + @Test + public void testHStoreUsingPSSetObject4() throws SQLException { + Map correct = Collections.singletonMap("a", 1); + PreparedStatement pstmt = con.prepareStatement("SELECT ?::text"); + pstmt.setObject(1, correct, Types.OTHER, -1); + ResultSet rs = pstmt.executeQuery(); + assertEquals(String.class.getName(), rs.getMetaData().getColumnClassName(1)); + assertTrue(rs.next()); + assertEquals("\"a\"=>\"1\"", rs.getString(1)); + } + + @Test + public void testHStoreSendEscaped() throws SQLException { + Map correct = Collections.singletonMap("a", "t'e\ns\"t"); + PreparedStatement pstmt = con.prepareStatement("SELECT ?"); + pstmt.setObject(1, correct); + ResultSet rs = pstmt.executeQuery(); + assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1)); + assertTrue(rs.next()); + assertEquals(correct, rs.getObject(1)); + assertEquals("\"a\"=>\"t'e\ns\\\"t\"", rs.getString(1)); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostTestSuite.java new file mode 100644 index 0000000..2f19cd4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostTestSuite.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.hostchooser; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/* + * Executes multi host tests (aka master/slave connectivity selection). + */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + MultiHostsConnectionTest.class, +}) +public class MultiHostTestSuite { +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostsConnectionTest.java b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostsConnectionTest.java new file mode 100644 index 0000000..e257328 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostsConnectionTest.java @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.hostchooser; + +import static java.lang.Integer.parseInt; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.postgresql.hostchooser.HostRequirement.any; +import static org.postgresql.hostchooser.HostRequirement.preferPrimary; +import static org.postgresql.hostchooser.HostRequirement.preferSecondary; +import static org.postgresql.hostchooser.HostRequirement.primary; +import static org.postgresql.hostchooser.HostRequirement.secondary; +import static org.postgresql.hostchooser.HostStatus.Primary; +import static org.postgresql.hostchooser.HostStatus.Secondary; +import static org.postgresql.test.TestUtil.closeDB; + +import org.postgresql.PGProperty; +import org.postgresql.hostchooser.GlobalHostStatusTracker; +import org.postgresql.hostchooser.HostRequirement; +import org.postgresql.test.TestUtil; +import org.postgresql.util.HostSpec; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Field; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +public class MultiHostsConnectionTest { + + private static final String user = TestUtil.getUser(); + private static final String password = TestUtil.getPassword(); + private static final String primary1 = TestUtil.getServer() + ":" + TestUtil.getPort(); + private static final String secondary1 = getSecondaryServer1() + ":" + getSecondaryPort1(); + private static final String secondary2 = getSecondaryServer2() + ":" + getSecondaryPort2(); + private static final String fake1 = "127.127.217.217:1"; + + private String primaryIp; + private String secondaryIP; + private String secondaryIP2; + private Connection con; + private Map hostStatusMap; + + @BeforeAll + static void setUpClass() { + assumeTrue(isReplicationInstanceAvailable()); + } + + @BeforeEach + void setUp() throws Exception { + Field field = GlobalHostStatusTracker.class.getDeclaredField("hostStatusMap"); + field.setAccessible(true); + hostStatusMap = (Map) field.get(null); + + con = TestUtil.openDB(); + primaryIp = getRemoteHostSpec(); + closeDB(con); + + con = openSecondaryDB(); + secondaryIP = getRemoteHostSpec(); + closeDB(con); + + con = openSecondaryDB2(); + secondaryIP2 = getRemoteHostSpec(); + closeDB(con); + } + + private static boolean isReplicationInstanceAvailable() { + try { + Connection connection = openSecondaryDB(); + closeDB(connection); + return true; + } catch (Exception e) { + return false; + } + } + + private static Connection openSecondaryDB() throws Exception { + TestUtil.initDriver(); + + Properties props = userAndPassword(); + + return DriverManager.getConnection(TestUtil.getURL(getSecondaryServer1(), getSecondaryPort1()), props); + } + + private static Properties userAndPassword() { + Properties props = new Properties(); + + PGProperty.USER.set(props, TestUtil.getUser()); + PGProperty.PASSWORD.set(props, TestUtil.getPassword()); + return props; + } + + private static String getSecondaryServer1() { + return System.getProperty("secondaryServer1", TestUtil.getServer()); + } + + private static int getSecondaryPort1() { + return Integer.parseInt(System.getProperty("secondaryPort1", String.valueOf(TestUtil.getPort() + 1))); + } + + private static Connection openSecondaryDB2() throws Exception { + TestUtil.initDriver(); + + Properties props = userAndPassword(); + return DriverManager.getConnection(TestUtil.getURL(getSecondaryServer2(), getSecondaryPort2()), props); + } + + private static String getSecondaryServer2() { + return System.getProperty("secondaryServer2", TestUtil.getServer()); + } + + private static int getSecondaryPort2() { + return Integer.parseInt(System.getProperty("secondaryPort2", String.valueOf(TestUtil.getPort() + 2))); + } + + private Connection getConnection(HostRequirement hostType, String... targets) + throws SQLException { + return getConnection(hostType, true, targets); + } + + private static HostSpec hostSpec(String host) { + int split = host.indexOf(':'); + return new HostSpec(host.substring(0, split), parseInt(host.substring(split + 1))); + } + + private Connection getConnection(HostRequirement hostType, boolean reset, + String... targets) throws SQLException { + return getConnection(hostType, reset, false, targets); + } + + private Connection getConnection(HostRequirement hostType, boolean reset, boolean lb, + String... targets) throws SQLException { + TestUtil.closeDB(con); + + if (reset) { + resetGlobalState(); + } + + Properties props = new Properties(); + PGProperty.USER.set(props, user); + PGProperty.PASSWORD.set(props, password); + PGProperty.TARGET_SERVER_TYPE.set(props, hostType.name()); + PGProperty.HOST_RECHECK_SECONDS.set(props, 2); + if (lb) { + PGProperty.LOAD_BALANCE_HOSTS.set(props, "true"); + } + + StringBuilder sb = new StringBuilder(); + sb.append("jdbc:postgresql://"); + for (String target : targets) { + sb.append(target).append(','); + } + sb.setLength(sb.length() - 1); + sb.append("/"); + sb.append(TestUtil.getDatabase()); + + return con = DriverManager.getConnection(sb.toString(), props); + } + + private void assertRemote(String expectedHost) throws SQLException { + assertEquals(expectedHost, getRemoteHostSpec()); + } + + private String getRemoteHostSpec() throws SQLException { + ResultSet rs = con.createStatement() + .executeQuery("select inet_server_addr() || ':' || inet_server_port()"); + rs.next(); + return rs.getString(1); + } + + public static boolean isMaster(Connection con) throws SQLException { + ResultSet rs = con.createStatement().executeQuery("show transaction_read_only"); + rs.next(); + return "off".equals(rs.getString(1)); + } + + private void assertGlobalState(String host, String status) { + HostSpec spec = hostSpec(host); + if (status == null) { + assertNull(hostStatusMap.get(spec)); + } else { + assertEquals(host + "=" + status, hostStatusMap.get(spec).toString()); + } + } + + private void resetGlobalState() { + hostStatusMap.clear(); + } + + @Test + void connectToAny() throws SQLException { + getConnection(any, fake1, primary1); + assertRemote(primaryIp); + assertGlobalState(primary1, "ConnectOK"); + assertGlobalState(fake1, "ConnectFail"); + + getConnection(any, fake1, secondary1); + assertRemote(secondaryIP); + assertGlobalState(secondary1, "ConnectOK"); + + getConnection(any, fake1, primary1); + assertRemote(primaryIp); + assertGlobalState(primary1, "ConnectOK"); + assertGlobalState(fake1, "ConnectFail"); + } + + @Test + void connectToMaster() throws SQLException { + getConnection(primary, true, fake1, primary1, secondary1); + assertRemote(primaryIp); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(primary1, "Primary"); + assertGlobalState(secondary1, null); + + getConnection(primary, false, fake1, secondary1, primary1); + assertRemote(primaryIp); + assertGlobalState(fake1, "ConnectFail"); // cached + assertGlobalState(primary1, "Primary"); // connected to primary + assertGlobalState(secondary1, "Secondary"); // was unknown, so tried to connect in order + } + + @Test + void connectToPrimaryFirst() throws SQLException { + getConnection(preferPrimary, true, fake1, primary1, secondary1); + assertRemote(primaryIp); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(primary1, "Primary"); + assertGlobalState(secondary1, null); + + getConnection(primary, false, fake1, secondary1, primary1); + assertRemote(primaryIp); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(primary1, "Primary"); + assertGlobalState(secondary1, "Secondary"); // tried as it was unknown + + getConnection(preferPrimary, true, fake1, secondary1, primary1); + assertRemote(primaryIp); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(primary1, "Primary"); + assertGlobalState(secondary1, "Secondary"); + } + + @Test + void connectToPrimaryWithReadonlyTransactionMode() throws SQLException { + con = TestUtil.openPrivilegedDB(); + con.createStatement().execute("ALTER DATABASE " + TestUtil.getDatabase() + " SET default_transaction_read_only=on;"); + try { + getConnection(primary, true, fake1, primary1, secondary1); + } catch (PSQLException e) { + assertEquals(PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState(), e.getSQLState()); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(primary1, "Secondary"); + assertGlobalState(secondary1, "Secondary"); + } finally { + con = TestUtil.openPrivilegedDB(); + con.createStatement().execute( + "BEGIN;" + + "SET TRANSACTION READ WRITE;" + + "ALTER DATABASE " + TestUtil.getDatabase() + " SET default_transaction_read_only=off;" + + "COMMIT;" + ); + TestUtil.closeDB(con); + } + } + + @Test + void connectToSecondary() throws SQLException { + getConnection(secondary, true, fake1, secondary1, primary1); + assertRemote(secondaryIP); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(secondary1, "Secondary"); + assertGlobalState(primary1, null); + + getConnection(secondary, false, fake1, primary1, secondary1); + assertRemote(secondaryIP); + assertGlobalState(fake1, "ConnectFail"); // cached + assertGlobalState(secondary1, "Secondary"); // connected + assertGlobalState(primary1, "Primary"); // tried as it was unknown + } + + @Test + void connectToSecondaryFirst() throws SQLException { + getConnection(preferSecondary, true, fake1, secondary1, primary1); + assertRemote(secondaryIP); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(secondary1, "Secondary"); + assertGlobalState(primary1, null); + + getConnection(secondary, false, fake1, primary1, secondary1); + assertRemote(secondaryIP); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(secondary1, "Secondary"); + assertGlobalState(primary1, "Primary"); // tried as it was unknown + + getConnection(preferSecondary, true, fake1, primary1, secondary1); + assertRemote(secondaryIP); + assertGlobalState(fake1, "ConnectFail"); + assertGlobalState(secondary1, "Secondary"); + assertGlobalState(primary1, "Primary"); + } + + @Test + void failedConnection() throws SQLException { + try { + getConnection(any, true, fake1); + fail(); + } catch (PSQLException ex) { + } + } + + @Test + void loadBalancing() throws SQLException { + Set connectedHosts = new HashSet<>(); + boolean fake1FoundTried = false; + for (int i = 0; i < 20; i++) { + getConnection(any, true, true, fake1, primary1, secondary1); + connectedHosts.add(getRemoteHostSpec()); + fake1FoundTried |= hostStatusMap.containsKey(hostSpec(fake1)); + if (connectedHosts.size() == 2 && fake1FoundTried) { + break; + } + } + assertEquals(new HashSet(asList(primaryIp, secondaryIP)), + connectedHosts, + "Never connected to all hosts"); + assertTrue(fake1FoundTried, "Never tried to connect to fake node"); + } + + @Test + void loadBalancing_preferPrimary() throws SQLException { + Set connectedHosts = new HashSet<>(); + Set tryConnectedHosts = new HashSet<>(); + for (int i = 0; i < 20; i++) { + getConnection(preferPrimary, true, true, fake1, secondary1, secondary2, primary1); + connectedHosts.add(getRemoteHostSpec()); + tryConnectedHosts.addAll(hostStatusMap.keySet()); + if (tryConnectedHosts.size() == 4) { + break; + } + } + + assertRemote(primaryIp); + assertEquals(new HashSet(asList(primaryIp)), + connectedHosts, + "Connected to hosts other than primary"); + assertEquals(4, tryConnectedHosts.size(), "Never tried to connect to fake node"); + + getConnection(preferPrimary, false, true, fake1, secondary1, primary1); + assertRemote(primaryIp); + + // connect to secondaries when there's no primary - with load balancing + connectedHosts.clear(); + for (int i = 0; i < 20; i++) { + getConnection(preferPrimary, false, true, fake1, secondary1, secondary2); + connectedHosts.add(getRemoteHostSpec()); + if (connectedHosts.size() == 2) { + break; + } + } + assertEquals(new HashSet(asList(secondaryIP, secondaryIP2)), + connectedHosts, + "Never connected to all secondary hosts"); + + // connect to secondary when there's no primary + getConnection(preferPrimary, true, true, fake1, secondary1); + assertRemote(secondaryIP); + + getConnection(preferPrimary, false, true, fake1, secondary1); + assertRemote(secondaryIP); + } + + @Test + void loadBalancing_preferSecondary() throws SQLException { + Set connectedHosts = new HashSet<>(); + Set tryConnectedHosts = new HashSet<>(); + for (int i = 0; i < 20; i++) { + getConnection(preferSecondary, true, true, fake1, primary1, secondary1, secondary2); + connectedHosts.add(getRemoteHostSpec()); + tryConnectedHosts.addAll(hostStatusMap.keySet()); + if (tryConnectedHosts.size() == 4) { + break; + } + } + assertEquals(new HashSet(asList(secondaryIP, secondaryIP2)), + connectedHosts, + "Never connected to all secondary hosts"); + assertEquals(4, tryConnectedHosts.size(), "Never tried to connect to fake node"); + + getConnection(preferSecondary, false, true, fake1, primary1, secondary1); + assertRemote(secondaryIP); + connectedHosts.clear(); + for (int i = 0; i < 20; i++) { + getConnection(preferSecondary, false, true, fake1, primary1, secondary1, secondary2); + connectedHosts.add(getRemoteHostSpec()); + if (connectedHosts.size() == 2) { + break; + } + } + assertEquals(new HashSet(asList(secondaryIP, secondaryIP2)), + connectedHosts, + "Never connected to all secondary hosts"); + + // connect to primary when there's no secondary + getConnection(preferSecondary, true, true, fake1, primary1); + assertRemote(primaryIp); + + getConnection(preferSecondary, false, true, fake1, primary1); + assertRemote(primaryIp); + } + + @Test + void loadBalancing_secondary() throws SQLException { + Set connectedHosts = new HashSet<>(); + Set tryConnectedHosts = new HashSet<>(); + for (int i = 0; i < 20; i++) { + getConnection(secondary, true, true, fake1, primary1, secondary1, secondary2); + connectedHosts.add(getRemoteHostSpec()); + tryConnectedHosts.addAll(hostStatusMap.keySet()); + if (tryConnectedHosts.size() == 4) { + break; + } + } + assertEquals(new HashSet(asList(secondaryIP, secondaryIP2)), + connectedHosts, + "Did not attempt to connect to all secondary hosts"); + assertEquals(4, tryConnectedHosts.size(), "Did not attempt to connect to primary and fake node"); + + getConnection(preferSecondary, false, true, fake1, primary1, secondary1); + assertRemote(secondaryIP); + connectedHosts.clear(); + for (int i = 0; i < 20; i++) { + getConnection(secondary, false, true, fake1, primary1, secondary1, secondary2); + connectedHosts.add(getRemoteHostSpec()); + if (connectedHosts.size() == 2) { + break; + } + } + assertEquals(new HashSet(asList(secondaryIP, secondaryIP2)), + connectedHosts, + "Did not connect to all secondary hosts"); + } + + @Test + void hostRechecks() throws SQLException, InterruptedException { + GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary); + GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Primary); + GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary); + + try { + getConnection(primary, false, fake1, secondary1, primary1); + fail(); + } catch (SQLException ex) { + } + + GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary); + GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Primary); + GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary); + + SECONDS.sleep(3); + + getConnection(primary, false, secondary1, fake1, primary1); + assertRemote(primaryIp); + } + + @Test + void noGoodHostsRechecksEverything() throws SQLException, InterruptedException { + GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary); + GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Secondary); + GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary); + + getConnection(primary, false, secondary1, fake1, primary1); + assertRemote(primaryIp); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ArrayTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ArrayTest.java new file mode 100644 index 0000000..c5179bb --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ArrayTest.java @@ -0,0 +1,907 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import org.postgresql.PGConnection; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.Oid; +import org.postgresql.geometric.PGbox; +import org.postgresql.geometric.PGpoint; +import org.postgresql.jdbc.PgArray; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLException; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.math.BigDecimal; +import java.sql.Array; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class ArrayTest extends BaseTest4 { + private Connection conn; + + public ArrayTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + conn = con; + TestUtil.createTable(conn, "arrtest", "intarr int[], decarr decimal(2,1)[], strarr text[]"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(conn, "arrtest"); + super.tearDown(); + } + + @Test + public void testSetNull() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + pstmt.setNull(1, Types.ARRAY); + pstmt.setNull(2, Types.ARRAY); + pstmt.setNull(3, Types.ARRAY); + pstmt.executeUpdate(); + + pstmt.setObject(1, null, Types.ARRAY); + pstmt.setObject(2, null); + pstmt.setObject(3, null); + pstmt.executeUpdate(); + + pstmt.setArray(1, null); + pstmt.setArray(2, null); + pstmt.setArray(3, null); + pstmt.executeUpdate(); + + pstmt.close(); + } + + @Test + public void testSetPrimitiveObjects() throws SQLException { + final String stringWithNonAsciiWhiteSpace = "a\u2001b"; + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + pstmt.setObject(1, new int[]{1, 2, 3}, Types.ARRAY); + pstmt.setObject(2, new double[]{3.1d, 1.4d}, Types.ARRAY); + pstmt.setObject(3, new String[]{stringWithNonAsciiWhiteSpace, "f'a", " \tfa\"b "}, Types.ARRAY); + pstmt.executeUpdate(); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest"); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + Assert.assertEquals(Types.INTEGER, arr.getBaseType()); + Integer[] intarr = (Integer[]) arr.getArray(); + assertEquals(3, intarr.length); + assertEquals(1, intarr[0].intValue()); + assertEquals(2, intarr[1].intValue()); + assertEquals(3, intarr[2].intValue()); + + arr = rs.getArray(2); + assertEquals(Types.NUMERIC, arr.getBaseType()); + BigDecimal[] decarr = (BigDecimal[]) arr.getArray(); + assertEquals(2, decarr.length); + assertEquals(new BigDecimal("3.1"), decarr[0]); + assertEquals(new BigDecimal("1.4"), decarr[1]); + + arr = rs.getArray(3); + assertEquals(Types.VARCHAR, arr.getBaseType()); + String[] strarr = (String[]) arr.getArray(2, 2); + assertEquals(2, strarr.length); + assertEquals("f'a", strarr[0]); + assertEquals(" \tfa\"b ", strarr[1]); + + strarr = (String[]) arr.getArray(); + assertEquals(stringWithNonAsciiWhiteSpace, strarr[0]); + + rs.close(); + } + + @Test + public void testIndexAccess() throws SQLException { + final int[][][] origIntArray = new int[2][2][2]; + final double[][][] origDblArray = new double[2][2][2]; + final String[][][] origStringArray = new String[2][2][2]; + final Object[][][] origIntObjArray = new Object[2][2][2]; + final Object[][][] origDblObjArray = new Object[2][2][2]; + final Object[][][] origStringObjArray = new Object[2][2][2]; + int i = 0; + for (int x = 0; x < 2; x++) { + for (int y = 0; y < 2; y++) { + for (int z = 0; z < 2; z++) { + origIntArray[x][y][z] = i; + origDblArray[x][y][z] = i / 10; + origStringArray[x][y][z] = Integer.toString(i); + origIntObjArray[x][y][z] = i; + origDblObjArray[x][y][z] = i / 10; + origStringObjArray[x][y][z] = Integer.toString(i); + i++; + } + } + } + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + pstmt.setObject(1, origIntArray[0][0], Types.ARRAY); + pstmt.setObject(2, origDblArray[0][0], Types.ARRAY); + pstmt.setObject(3, origStringArray[0][0], Types.ARRAY); + pstmt.executeUpdate(); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT intarr[1], decarr[1], strarr[1] FROM arrtest"); + Assert.assertTrue(rs.next()); + + assertEquals(origIntArray[0][0][0], rs.getInt(1)); + assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001); + assertEquals(origStringArray[0][0][0], rs.getString(3)); + rs.close(); + stmt.close(); + + pstmt = conn.prepareStatement("delete from arrtest"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray[0][0]), Types.ARRAY); + pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray[0][0]), Types.ARRAY); + pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray[0][0]), Types.ARRAY); + pstmt.executeUpdate(); + pstmt.close(); + + stmt = conn.createStatement(); + rs = stmt.executeQuery("SELECT intarr[1], decarr[1], strarr[1] FROM arrtest"); + Assert.assertTrue(rs.next()); + + assertEquals(origIntArray[0][0][0], rs.getInt(1)); + assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001); + assertEquals(origStringArray[0][0][0], rs.getString(3)); + rs.close(); + stmt.close(); + + pstmt = conn.prepareStatement("delete from arrtest"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + pstmt.setObject(1, conn.createArrayOf("int4", origIntArray[0]), Types.ARRAY); + pstmt.setObject(2, conn.createArrayOf("float8", origDblArray[0]), Types.ARRAY); + pstmt.setObject(3, conn.createArrayOf("varchar", origStringArray[0]), Types.ARRAY); + pstmt.executeUpdate(); + pstmt.close(); + + stmt = conn.createStatement(); + rs = stmt.executeQuery("SELECT intarr[1][1], decarr[1][1], strarr[1][1], intarr[2][1], decarr[2][1], strarr[2][1] FROM arrtest"); + Assert.assertTrue(rs.next()); + + assertEquals(origIntArray[0][0][0], rs.getInt(1)); + assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001); + assertEquals(origStringArray[0][0][0], rs.getString(3)); + assertEquals(origIntArray[0][1][0], rs.getInt(4)); + assertEquals(origDblArray[0][1][0], rs.getDouble(5), 0.001); + assertEquals(origStringArray[0][1][0], rs.getString(6)); + rs.close(); + stmt.close(); + + pstmt = conn.prepareStatement("delete from arrtest"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray[0]), Types.ARRAY); + pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray[0]), Types.ARRAY); + pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray[0]), Types.ARRAY); + pstmt.executeUpdate(); + pstmt.close(); + + stmt = conn.createStatement(); + rs = stmt.executeQuery("SELECT intarr[1][1], decarr[1][1], strarr[1][1], intarr[2][1], decarr[2][1], strarr[2][1] FROM arrtest"); + Assert.assertTrue(rs.next()); + + assertEquals(origIntArray[0][0][0], rs.getInt(1)); + assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001); + assertEquals(origStringArray[0][0][0], rs.getString(3)); + assertEquals(origIntArray[0][1][0], rs.getInt(4)); + assertEquals(origDblArray[0][1][0], rs.getDouble(5), 0.001); + assertEquals(origStringArray[0][1][0], rs.getString(6)); + rs.close(); + stmt.close(); + + pstmt = conn.prepareStatement("delete from arrtest"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + + pstmt.setObject(1, conn.createArrayOf("int4", origIntArray), Types.ARRAY); + pstmt.setObject(2, conn.createArrayOf("float8", origDblArray), Types.ARRAY); + pstmt.setObject(3, conn.createArrayOf("varchar", origStringArray), Types.ARRAY); + pstmt.executeUpdate(); + pstmt.close(); + + stmt = conn.createStatement(); + rs = stmt.executeQuery("SELECT intarr[1][1][1], decarr[1][1][1], strarr[1][1][1], intarr[2][1][1], decarr[2][1][1], strarr[2][1][1] FROM arrtest"); + Assert.assertTrue(rs.next()); + + assertEquals(origIntArray[0][0][0], rs.getInt(1)); + assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001); + assertEquals(origStringArray[0][0][0], rs.getString(3)); + assertEquals(origIntArray[1][0][0], rs.getInt(4)); + assertEquals(origDblArray[1][0][0], rs.getDouble(5), 0.001); + assertEquals(origStringArray[1][0][0], rs.getString(6)); + rs.close(); + stmt.close(); + + pstmt = conn.prepareStatement("delete from arrtest"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + + pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray), Types.ARRAY); + pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray), Types.ARRAY); + pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray), Types.ARRAY); + pstmt.executeUpdate(); + pstmt.close(); + + stmt = conn.createStatement(); + rs = stmt.executeQuery("SELECT intarr[1][1][1], decarr[1][1][1], strarr[1][1][1], intarr[2][1][1], decarr[2][1][1], strarr[2][1][1] FROM arrtest"); + Assert.assertTrue(rs.next()); + + assertEquals(origIntArray[0][0][0], rs.getInt(1)); + assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001); + assertEquals(origStringArray[0][0][0], rs.getString(3)); + assertEquals(origIntArray[1][0][0], rs.getInt(4)); + assertEquals(origDblArray[1][0][0], rs.getDouble(5), 0.001); + assertEquals(origStringArray[1][0][0], rs.getString(6)); + rs.close(); + stmt.close(); + } + + @Test + public void testSetPrimitiveArraysObjects() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + + final PGConnection arraySupport = conn.unwrap(PGConnection.class); + + pstmt.setArray(1, arraySupport.createArrayOf("int4", new int[]{1, 2, 3})); + pstmt.setObject(2, arraySupport.createArrayOf("float8", new double[]{3.1d, 1.4d})); + pstmt.setObject(3, arraySupport.createArrayOf("varchar", new String[]{"abc", "f'a", "fa\"b"})); + + pstmt.executeUpdate(); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest"); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + Assert.assertEquals(Types.INTEGER, arr.getBaseType()); + Integer[] intarr = (Integer[]) arr.getArray(); + Assert.assertEquals(3, intarr.length); + Assert.assertEquals(1, intarr[0].intValue()); + Assert.assertEquals(2, intarr[1].intValue()); + Assert.assertEquals(3, intarr[2].intValue()); + + arr = rs.getArray(2); + Assert.assertEquals(Types.NUMERIC, arr.getBaseType()); + BigDecimal[] decarr = (BigDecimal[]) arr.getArray(); + Assert.assertEquals(2, decarr.length); + Assert.assertEquals(new BigDecimal("3.1"), decarr[0]); + Assert.assertEquals(new BigDecimal("1.4"), decarr[1]); + + arr = rs.getArray(3); + Assert.assertEquals(Types.VARCHAR, arr.getBaseType()); + String[] strarr = (String[]) arr.getArray(2, 2); + Assert.assertEquals(2, strarr.length); + Assert.assertEquals("f'a", strarr[0]); + Assert.assertEquals("fa\"b", strarr[1]); + + try { + arraySupport.createArrayOf("int4", 1); + fail("not an array"); + } catch (PSQLException e) { + + } + + rs.close(); + } + + @Test + public void testSetArraysWithAnsiTypeNames() throws SQLException { + try { + TestUtil.createTable( + conn, + "ansiarraytest", + "floats double precision[], " + + "reals real[], " + + "varchars character varying(8)[], " + + "times time without time zone[], " + + "timestamps timestamp without time zone[], " + + "timestampstz timestamp with time zone[]"); + + PreparedStatement pstmt = + conn.prepareStatement("INSERT INTO ansiarraytest VALUES (?,?,?,?,?,?)"); + + final PGConnection arraySupport = conn.unwrap(PGConnection.class); + + pstmt.setArray(1, arraySupport.createArrayOf("double precision", new Object[]{1d, 4d})); + pstmt.setArray(2, arraySupport.createArrayOf("real", new Object[]{0f, 3f})); + pstmt.setObject( + 3, arraySupport.createArrayOf("character varying", new String[]{"abc", "f'a", "fa\"b"})); + pstmt.setObject( + 4, + arraySupport.createArrayOf( + "time without time zone", + new Object[]{Time.valueOf("12:34:56"), Time.valueOf("03:30:25")})); + pstmt.setObject( + 5, + arraySupport.createArrayOf( + "timestamp without time zone", + new Object[]{"2023-09-05 16:21:50", "2012-01-01 13:02:03"})); + pstmt.setObject( + 6, + arraySupport.createArrayOf( + "timestamp with time zone", + new Object[]{"1996-01-23 12:00:00-08", "1997-08-16 16:51:00-04"})); + + pstmt.executeUpdate(); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = + stmt.executeQuery( + "SELECT floats, reals, varchars, times, timestamps, timestampstz FROM ansiarraytest"); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + Assert.assertEquals(Types.DOUBLE, arr.getBaseType()); + Double[] doubles = (Double[]) arr.getArray(); + Assert.assertEquals(2, doubles.length); + Assert.assertEquals(1d, doubles[0], 0); + Assert.assertEquals(4d, doubles[1], 0); + + arr = rs.getArray(2); + Assert.assertEquals(Types.REAL, arr.getBaseType()); + Float[] floats = (Float[]) arr.getArray(); + Assert.assertEquals(2, floats.length); + Assert.assertEquals(0f, floats[0], 0); + Assert.assertEquals(3f, floats[1], 0); + + arr = rs.getArray(3); + Assert.assertEquals(Types.VARCHAR, arr.getBaseType()); + String[] strings = (String[]) arr.getArray(); + Assert.assertEquals(3, strings.length); + Assert.assertEquals("abc", strings[0]); + Assert.assertEquals("f'a", strings[1]); + Assert.assertEquals("fa\"b", strings[2]); + + arr = rs.getArray(4); + Assert.assertEquals(Types.TIME, arr.getBaseType()); + Time[] times = (Time[]) arr.getArray(); + Assert.assertEquals(2, times.length); + Assert.assertEquals(Time.valueOf("12:34:56"), times[0]); + Assert.assertEquals(Time.valueOf("03:30:25"), times[1]); + + arr = rs.getArray(5); + Assert.assertEquals(Types.TIMESTAMP, arr.getBaseType()); + Timestamp[] tzarr = (Timestamp[]) arr.getArray(); + Assert.assertEquals(2, times.length); + Assert.assertEquals(Timestamp.valueOf("2023-09-05 16:21:50"), tzarr[0]); + Assert.assertEquals(Timestamp.valueOf("2012-01-01 13:02:03"), tzarr[1]); + + arr = rs.getArray(6); + Assert.assertEquals(Types.TIMESTAMP, arr.getBaseType()); + tzarr = (Timestamp[]) arr.getArray(); + Assert.assertEquals(2, times.length); + Assert.assertEquals(822427200000L, tzarr[0].getTime()); + Assert.assertEquals(871764660000L, tzarr[1].getTime()); + + rs.close(); + } finally { + TestUtil.dropTable(conn, "ansiarraytest"); + } + } + + @Test + public void testSetNullArrays() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)"); + + final PGConnection arraySupport = conn.unwrap(PGConnection.class); + + pstmt.setArray(1, arraySupport.createArrayOf("int4", null)); + pstmt.setObject(2, conn.createArrayOf("float8", null)); + pstmt.setObject(3, arraySupport.createArrayOf("varchar", null)); + + pstmt.executeUpdate(); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest"); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + Assert.assertNull(arr); + + arr = rs.getArray(2); + Assert.assertNull(arr); + + arr = rs.getArray(3); + Assert.assertNull(arr); + + rs.close(); + } + + @Test + public void testRetrieveArrays() throws SQLException { + Statement stmt = conn.createStatement(); + + // you need a lot of backslashes to get a double quote in. + stmt.executeUpdate("INSERT INTO arrtest VALUES ('{1,2,3}','{3.1,1.4}', '" + + TestUtil.escapeString(conn, "{abc,f'a,\"fa\\\"b\",def, un quot\u000B \u2001 \r}") + "')"); + + ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest"); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + Assert.assertEquals(Types.INTEGER, arr.getBaseType()); + Integer[] intarr = (Integer[]) arr.getArray(); + Assert.assertEquals(3, intarr.length); + Assert.assertEquals(1, intarr[0].intValue()); + Assert.assertEquals(2, intarr[1].intValue()); + Assert.assertEquals(3, intarr[2].intValue()); + + arr = rs.getArray(2); + Assert.assertEquals(Types.NUMERIC, arr.getBaseType()); + BigDecimal[] decarr = (BigDecimal[]) arr.getArray(); + Assert.assertEquals(2, decarr.length); + Assert.assertEquals(new BigDecimal("3.1"), decarr[0]); + Assert.assertEquals(new BigDecimal("1.4"), decarr[1]); + + arr = rs.getArray(3); + Assert.assertEquals(Types.VARCHAR, arr.getBaseType()); + String[] strarr = (String[]) arr.getArray(2, 2); + Assert.assertEquals(2, strarr.length); + Assert.assertEquals("f'a", strarr[0]); + Assert.assertEquals("fa\"b", strarr[1]); + + strarr = (String[]) arr.getArray(); + assertEquals(5, strarr.length); + assertEquals("un quot\u000B \u2001", strarr[4]); + + rs.close(); + stmt.close(); + } + + @Test + public void testRetrieveResultSets() throws SQLException { + Statement stmt = conn.createStatement(); + + final String stringWithNonAsciiWhiteSpace = "a\u2001b"; + // you need a lot of backslashes to get a double quote in. + stmt.executeUpdate("INSERT INTO arrtest VALUES ('{1,2,3}','{3.1,1.4}', '" + + TestUtil.escapeString(conn, "{\"a\u2001b\",f'a,\"fa\\\"b\",def}") + "')"); + + ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest"); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + Assert.assertEquals(Types.INTEGER, arr.getBaseType()); + ResultSet arrrs = arr.getResultSet(); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(1, arrrs.getInt(1)); + Assert.assertEquals(1, arrrs.getInt(2)); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(2, arrrs.getInt(1)); + Assert.assertEquals(2, arrrs.getInt(2)); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(3, arrrs.getInt(1)); + Assert.assertEquals(3, arrrs.getInt(2)); + Assert.assertTrue(!arrrs.next()); + Assert.assertTrue(arrrs.previous()); + Assert.assertEquals(3, arrrs.getInt(2)); + arrrs.first(); + Assert.assertEquals(1, arrrs.getInt(2)); + arrrs.close(); + + arr = rs.getArray(2); + Assert.assertEquals(Types.NUMERIC, arr.getBaseType()); + arrrs = arr.getResultSet(); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(new BigDecimal("3.1"), arrrs.getBigDecimal(2)); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(new BigDecimal("1.4"), arrrs.getBigDecimal(2)); + arrrs.close(); + + arr = rs.getArray(3); + Assert.assertEquals(Types.VARCHAR, arr.getBaseType()); + arrrs = arr.getResultSet(2, 2); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(2, arrrs.getInt(1)); + Assert.assertEquals("f'a", arrrs.getString(2)); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(3, arrrs.getInt(1)); + Assert.assertEquals("fa\"b", arrrs.getString(2)); + Assert.assertTrue(!arrrs.next()); + arrrs.close(); + + arrrs = arr.getResultSet(1, 1); + Assert.assertTrue(arrrs.next()); + Assert.assertEquals(1, arrrs.getInt(1)); + Assert.assertEquals(stringWithNonAsciiWhiteSpace, arrrs.getString(2)); + Assert.assertFalse(arrrs.next()); + arrrs.close(); + + rs.close(); + stmt.close(); + } + + @Test + public void testSetArray() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet arrRS = stmt.executeQuery("SELECT '{1,2,3}'::int4[]"); + Assert.assertTrue(arrRS.next()); + Array arr = arrRS.getArray(1); + arrRS.close(); + stmt.close(); + + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest(intarr) VALUES (?)"); + pstmt.setArray(1, arr); + pstmt.executeUpdate(); + + pstmt.setObject(1, arr, Types.ARRAY); + pstmt.executeUpdate(); + + pstmt.setObject(1, arr); + pstmt.executeUpdate(); + + pstmt.close(); + + Statement select = conn.createStatement(); + ResultSet rs = select.executeQuery("SELECT intarr FROM arrtest"); + int resultCount = 0; + while (rs.next()) { + resultCount++; + Array result = rs.getArray(1); + Assert.assertEquals(Types.INTEGER, result.getBaseType()); + Assert.assertEquals("int4", result.getBaseTypeName()); + + Integer[] intarr = (Integer[]) result.getArray(); + Assert.assertEquals(3, intarr.length); + Assert.assertEquals(1, intarr[0].intValue()); + Assert.assertEquals(2, intarr[1].intValue()); + Assert.assertEquals(3, intarr[2].intValue()); + } + Assert.assertEquals(3, resultCount); + } + + /** + * Starting with 8.0 non-standard (beginning index isn't 1) bounds the dimensions are returned in + * the data. The following should return "[0:3]={0,1,2,3,4}" when queried. Older versions simply + * do not return the bounds. + */ + @Test + public void testNonStandardBounds() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("INSERT INTO arrtest (intarr) VALUES ('{1,2,3}')"); + stmt.executeUpdate("UPDATE arrtest SET intarr[0] = 0"); + ResultSet rs = stmt.executeQuery("SELECT intarr FROM arrtest"); + Assert.assertTrue(rs.next()); + Array result = rs.getArray(1); + Integer[] intarr = (Integer[]) result.getArray(); + Assert.assertEquals(4, intarr.length); + for (int i = 0; i < intarr.length; i++) { + Assert.assertEquals(i, intarr[i].intValue()); + } + } + + @Test + public void testMultiDimensionalArray() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]"); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Object[] oa = (Object[]) arr.getArray(); + Assert.assertEquals(2, oa.length); + Integer[] i0 = (Integer[]) oa[0]; + Assert.assertEquals(2, i0.length); + Assert.assertEquals(1, i0[0].intValue()); + Assert.assertEquals(2, i0[1].intValue()); + Integer[] i1 = (Integer[]) oa[1]; + Assert.assertEquals(2, i1.length); + Assert.assertEquals(3, i1[0].intValue()); + Assert.assertEquals(4, i1[1].intValue()); + rs.close(); + stmt.close(); + } + + @Test + public void testNullValues() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT ARRAY[1,NULL,3]"); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Integer[] i = (Integer[]) arr.getArray(); + Assert.assertEquals(3, i.length); + Assert.assertEquals(1, i[0].intValue()); + Assert.assertNull(i[1]); + Assert.assertEquals(3, i[2].intValue()); + } + + @Test + public void testNullFieldString() throws SQLException { + Array arr = new PgArray((BaseConnection) conn, 1, (String) null); + Assert.assertNull(arr.toString()); + } + + @Test + public void testDirectFieldString() throws SQLException { + Array arr = new PgArray((BaseConnection) conn, Oid.VARCHAR_ARRAY, + "{\" lead\t\", unquot\u000B \u2001 \r, \" \fnew \n \"\t, \f\" \" }"); + final String[] array = (String[]) arr.getArray(); + assertEquals(4, array.length); + assertEquals(" lead\t", array[0]); + assertEquals(" \fnew \n ", array[2]); + assertEquals(" ", array[3]); + + // PostgreSQL drops leading and trailing whitespace, so does the driver + assertEquals("unquot\u2001", array[1]); + } + + @Test + public void testStringEscaping() throws SQLException { + + final String stringArray = "{f'a,\"fa\\\"b\",def, un quot\u000B \u2001 \r, someString }"; + + final Statement stmt = conn.createStatement(); + try { + + stmt.executeUpdate("INSERT INTO arrtest VALUES (NULL, NULL, '" + TestUtil.escapeString(conn, stringArray) + "')"); + + final ResultSet rs = stmt.executeQuery("SELECT strarr FROM arrtest"); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + Assert.assertEquals(Types.VARCHAR, arr.getBaseType()); + String[] strarr = (String[]) arr.getArray(); + assertEquals(5, strarr.length); + assertEquals("f'a", strarr[0]); + assertEquals("fa\"b", strarr[1]); + assertEquals("def", strarr[2]); + assertEquals("un quot\u000B \u2001", strarr[3]); + assertEquals("someString", strarr[4]); + + rs.close(); + } finally { + stmt.close(); + } + + final Array directArray = new PgArray((BaseConnection) conn, Oid.VARCHAR_ARRAY, stringArray); + final String[] actual = (String[]) directArray.getArray(); + assertEquals(5, actual.length); + assertEquals("f'a", actual[0]); + assertEquals("fa\"b", actual[1]); + assertEquals("def", actual[2]); + assertEquals("someString", actual[4]); + + // the driver strips out ascii white spaces from an unescaped string, even in + // the middle of the value. while this does not exactly match the behavior of + // the backend, it will always quote values where ascii white spaces are + // present, making this difference not worth the complexity involved addressing. + assertEquals("unquot\u2001", actual[3]); + } + + @Test + public void testUnknownArrayType() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = + stmt.executeQuery("SELECT relacl FROM pg_class WHERE relacl IS NOT NULL LIMIT 1"); + ResultSetMetaData rsmd = rs.getMetaData(); + Assert.assertEquals(Types.ARRAY, rsmd.getColumnType(1)); + + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Assert.assertEquals("aclitem", arr.getBaseTypeName()); + + ResultSet arrRS = arr.getResultSet(); + ResultSetMetaData arrRSMD = arrRS.getMetaData(); + Assert.assertEquals("aclitem", arrRSMD.getColumnTypeName(2)); + } + + @Test + public void testRecursiveResultSets() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]"); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + + ResultSet arrRS = arr.getResultSet(); + ResultSetMetaData arrRSMD = arrRS.getMetaData(); + Assert.assertEquals(Types.ARRAY, arrRSMD.getColumnType(2)); + Assert.assertEquals("_int4", arrRSMD.getColumnTypeName(2)); + + Assert.assertTrue(arrRS.next()); + Assert.assertEquals(1, arrRS.getInt(1)); + Array a1 = arrRS.getArray(2); + ResultSet a1RS = a1.getResultSet(); + ResultSetMetaData a1RSMD = a1RS.getMetaData(); + Assert.assertEquals(Types.INTEGER, a1RSMD.getColumnType(2)); + Assert.assertEquals("int4", a1RSMD.getColumnTypeName(2)); + + Assert.assertTrue(a1RS.next()); + Assert.assertEquals(1, a1RS.getInt(2)); + Assert.assertTrue(a1RS.next()); + Assert.assertEquals(2, a1RS.getInt(2)); + Assert.assertTrue(!a1RS.next()); + a1RS.close(); + + Assert.assertTrue(arrRS.next()); + Assert.assertEquals(2, arrRS.getInt(1)); + Array a2 = arrRS.getArray(2); + ResultSet a2RS = a2.getResultSet(); + + Assert.assertTrue(a2RS.next()); + Assert.assertEquals(3, a2RS.getInt(2)); + Assert.assertTrue(a2RS.next()); + Assert.assertEquals(4, a2RS.getInt(2)); + Assert.assertTrue(!a2RS.next()); + a2RS.close(); + + arrRS.close(); + rs.close(); + stmt.close(); + } + + @Test + public void testNullString() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '{a,NULL}'::text[]"); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + + String[] s = (String[]) arr.getArray(); + Assert.assertEquals(2, s.length); + Assert.assertEquals("a", s[0]); + Assert.assertNull(s[1]); + } + + @Test + public void testEscaping() throws SQLException { + Statement stmt = conn.createStatement(); + String sql = "SELECT "; + sql += 'E'; + // Uggg. Three levels of escaping: Java, string literal, array. + sql += "'{{c\\\\\"d, ''}, {\"\\\\\\\\\",\"''\"}}'::text[]"; + + ResultSet rs = stmt.executeQuery(sql); + Assert.assertTrue(rs.next()); + + Array arr = rs.getArray(1); + String[][] s = (String[][]) arr.getArray(); + Assert.assertEquals("c\"d", s[0][0]); + Assert.assertEquals("'", s[0][1]); + Assert.assertEquals("\\", s[1][0]); + Assert.assertEquals("'", s[1][1]); + + ResultSet arrRS = arr.getResultSet(); + + Assert.assertTrue(arrRS.next()); + Array a1 = arrRS.getArray(2); + ResultSet rs1 = a1.getResultSet(); + Assert.assertTrue(rs1.next()); + Assert.assertEquals("c\"d", rs1.getString(2)); + Assert.assertTrue(rs1.next()); + Assert.assertEquals("'", rs1.getString(2)); + Assert.assertTrue(!rs1.next()); + + Assert.assertTrue(arrRS.next()); + Array a2 = arrRS.getArray(2); + ResultSet rs2 = a2.getResultSet(); + Assert.assertTrue(rs2.next()); + Assert.assertEquals("\\", rs2.getString(2)); + Assert.assertTrue(rs2.next()); + Assert.assertEquals("'", rs2.getString(2)); + Assert.assertTrue(!rs2.next()); + } + + @Test + public void testWriteMultiDimensional() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]"); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + rs.close(); + stmt.close(); + + String sql = "SELECT ?"; + if (preferQueryMode == PreferQueryMode.SIMPLE) { + sql = "SELECT ?::int[]"; + } + PreparedStatement pstmt = conn.prepareStatement(sql); + pstmt.setArray(1, arr); + rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + arr = rs.getArray(1); + + Integer[][] i = (Integer[][]) arr.getArray(); + Assert.assertEquals(1, i[0][0].intValue()); + Assert.assertEquals(2, i[0][1].intValue()); + Assert.assertEquals(3, i[1][0].intValue()); + Assert.assertEquals(4, i[1][1].intValue()); + } + + /* + * The box data type uses a semicolon as the array element delimiter instead of a comma which + * pretty much everything else uses. + */ + @Test + public void testNonStandardDelimiter() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '{(3,4),(1,2);(7,8),(5,6)}'::box[]"); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + + ResultSet arrRS = arr.getResultSet(); + + Assert.assertTrue(arrRS.next()); + PGbox box1 = (PGbox) arrRS.getObject(2); + PGpoint p1 = box1.point[0]; + Assert.assertEquals(3, p1.x, 0.001); + Assert.assertEquals(4, p1.y, 0.001); + + Assert.assertTrue(arrRS.next()); + PGbox box2 = (PGbox) arrRS.getObject(2); + PGpoint p2 = box2.point[1]; + Assert.assertEquals(5, p2.x, 0.001); + Assert.assertEquals(6, p2.y, 0.001); + + Assert.assertTrue(!arrRS.next()); + } + + @Test + public void testEmptyArray() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT '{}'::int[]"); + ResultSet rs = pstmt.executeQuery(); + + while (rs.next()) { + Array array = rs.getArray(1); + if (!rs.wasNull()) { + ResultSet ars = array.getResultSet(); + Assert.assertEquals("get columntype should return Types.INTEGER", java.sql.Types.INTEGER, + ars.getMetaData().getColumnType(1)); + } + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/AutoRollbackTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/AutoRollbackTestSuite.java new file mode 100644 index 0000000..e0a8e91 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/AutoRollbackTestSuite.java @@ -0,0 +1,423 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.PGConnection; +import org.postgresql.PGProperty; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ResultHandler; +import org.postgresql.core.ServerVersion; +import org.postgresql.core.TransactionState; +import org.postgresql.jdbc.AutoSave; +import org.postgresql.jdbc.PgConnection; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; +import java.util.EnumSet; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; + +@RunWith(Parameterized.class) +public class AutoRollbackTestSuite extends BaseTest4 { + private static final AtomicInteger counter = new AtomicInteger(); + + private enum CleanSavePoint { + TRUE, + FALSE + } + + private enum FailMode { + /** + * Executes "select 1/0" and causes transaction failure (if autocommit=no). + * Mitigation: "autosave=always" or "autocommit=true" + */ + SELECT, + /** + * Executes "alter table rollbacktest", thus it breaks a prepared select over that table. + * Mitigation: "autosave in (always, conservative)" + */ + ALTER, + /** + * Executes DEALLOCATE ALL. + * Mitigation: + * 1) QueryExecutor tracks "DEALLOCATE ALL" responses ({@see org.postgresql.core.QueryExecutor#setFlushCacheOnDeallocate(boolean)} + * 2) QueryExecutor tracks "prepared statement name is invalid" and unprepared relevant statements ({@link org.postgresql.core.v3.QueryExecutorImpl#processResults(ResultHandler, int)} + * 3) "autosave in (always, conservative)" + * 4) Non-transactional cases are healed by retry (when no transaction present, just retry is possible) + */ + DEALLOCATE, + /** + * Executes DISCARD ALL. + * Mitigation: the same as for {@link #DEALLOCATE} + */ + DISCARD, + /** + * Executes "insert ... select 1/0" in a batch statement, thus causing the transaction to fail. + */ + INSERT_BATCH, + } + + private enum ReturnColumns { + EXACT("a, str"), + STAR("*"); + + public final String cols; + + ReturnColumns(String cols) { + this.cols = cols; + } + } + + private enum TestStatement { + SELECT("select ${cols} from rollbacktest", 0), + WITH_INSERT_SELECT( + "with x as (insert into rollbacktest(a, str) values(43, 'abc') returning ${cols})" + + "select * from x", 1); + + private final String sql; + private final int rowsInserted; + + TestStatement(String sql, int rowsInserted) { + this.sql = sql; + this.rowsInserted = rowsInserted; + } + + public String getSql(ReturnColumns cols) { + return sql.replace("${cols}", cols.cols); + } + } + + private static final EnumSet DEALLOCATES = + EnumSet.of(FailMode.DEALLOCATE, FailMode.DISCARD); + + private static final EnumSet TRANS_KILLERS = + EnumSet.of(FailMode.SELECT, FailMode.INSERT_BATCH); + + private enum ContinueMode { + COMMIT, + IS_VALID, + SELECT, + } + + private final AutoSave autoSave; + private final CleanSavePoint cleanSavePoint; + private final AutoCommit autoCommit; + private final FailMode failMode; + private final ContinueMode continueMode; + private final boolean flushCacheOnDeallocate; + private final boolean trans; + private final TestStatement testSql; + private final ReturnColumns cols; + + public AutoRollbackTestSuite(AutoSave autoSave, CleanSavePoint cleanSavePoint, AutoCommit autoCommit, + FailMode failMode, ContinueMode continueMode, boolean flushCacheOnDeallocate, + boolean trans, TestStatement testSql, ReturnColumns cols) { + this.autoSave = autoSave; + this.cleanSavePoint = cleanSavePoint; + this.autoCommit = autoCommit; + this.failMode = failMode; + this.continueMode = continueMode; + this.flushCacheOnDeallocate = flushCacheOnDeallocate; + this.trans = trans; + this.testSql = testSql; + this.cols = cols; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + if (testSql == TestStatement.WITH_INSERT_SELECT) { + assumeMinimumServerVersion(ServerVersion.v9_1); + } + + TestUtil.createTable(con, "rollbacktest", "a int, str text"); + con.setAutoCommit(autoCommit == AutoCommit.YES); + BaseConnection baseConnection = con.unwrap(BaseConnection.class); + baseConnection.setFlushCacheOnDeallocate(flushCacheOnDeallocate); + Assume.assumeTrue("DEALLOCATE ALL requires PostgreSQL 8.3+", + failMode != FailMode.DEALLOCATE || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)); + Assume.assumeTrue("DISCARD ALL requires PostgreSQL 8.3+", + failMode != FailMode.DISCARD || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)); + Assume.assumeTrue("Plan invalidation on table redefinition requires PostgreSQL 8.3+", + failMode != FailMode.ALTER || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)); + } + + @Override + public void tearDown() throws SQLException { + try { + con.setAutoCommit(true); + TestUtil.dropTable(con, "rollbacktest"); + } catch (Exception e) { + e.printStackTrace(); + } + super.tearDown(); + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.AUTOSAVE.set(props, autoSave.value()); + PGProperty.CLEANUP_SAVEPOINTS.set(props, cleanSavePoint.toString()); + PGProperty.PREPARE_THRESHOLD.set(props, 1); + } + + @Parameterized.Parameters(name = "{index}: autorollback(autoSave={0}, cleanSavePoint={1}, autoCommit={2}, failMode={3}, continueMode={4}, flushOnDeallocate={5}, hastransaction={6}, sql={7}, columns={8})") + public static Iterable data() { + Collection ids = new ArrayList<>(); + boolean[] booleans = new boolean[]{true, false}; + for (AutoSave autoSave : AutoSave.values()) { + for (CleanSavePoint cleanSavePoint:CleanSavePoint.values()) { + for (AutoCommit autoCommit : AutoCommit.values()) { + for (FailMode failMode : FailMode.values()) { + // ERROR: DISCARD ALL cannot run inside a transaction block + if (failMode == FailMode.DISCARD && autoCommit == AutoCommit.NO) { + continue; + } + for (ContinueMode continueMode : ContinueMode.values()) { + if (failMode == FailMode.ALTER && continueMode != ContinueMode.SELECT) { + continue; + } + for (boolean flushCacheOnDeallocate : booleans) { + if (!(flushCacheOnDeallocate || DEALLOCATES.contains(failMode))) { + continue; + } + + for (boolean trans : new boolean[]{true, false}) { + // continueMode would commit, and autoCommit=YES would commit, + // so it does not make sense to test trans=true for those cases + if (trans && (continueMode == ContinueMode.COMMIT + || autoCommit != AutoCommit.NO)) { + continue; + } + for (TestStatement statement : TestStatement.values()) { + for (ReturnColumns columns : ReturnColumns.values()) { + ids.add(new Object[]{autoSave, cleanSavePoint, autoCommit, failMode, continueMode, + flushCacheOnDeallocate, trans, statement, columns}); + } + } + } + } + } + } + } + } + } + return ids; + } + + @Test + public void run() throws SQLException { + if (continueMode == ContinueMode.IS_VALID) { + // make "isValid" a server-prepared statement + con.isValid(4); + } else if (continueMode == ContinueMode.COMMIT) { + doCommit(); + } else if (continueMode == ContinueMode.SELECT) { + assertRows("rollbacktest", 0); + } + + Statement statement = con.createStatement(); + statement.executeUpdate("insert into rollbacktest(a, str) values (0, 'test')"); + int rowsExpected = 1; + + PreparedStatement ps = con.prepareStatement(testSql.getSql(cols)); + // Server-prepare the testSql + ps.executeQuery().close(); + rowsExpected += testSql.rowsInserted; + + if (trans) { + statement.executeUpdate("update rollbacktest set a=a"); + } + + switch (failMode) { + case SELECT: + try { + statement.execute("select 1/0"); + Assert.fail("select 1/0 should fail"); + } catch (SQLException e) { + Assert.assertEquals("division by zero expected", + PSQLState.DIVISION_BY_ZERO.getState(), e.getSQLState()); + } + break; + case DEALLOCATE: + statement.executeUpdate("DEALLOCATE ALL"); + break; + case DISCARD: + statement.executeUpdate("DISCARD ALL"); + break; + case ALTER: + statement.executeUpdate("alter table rollbacktest add q int"); + break; + case INSERT_BATCH: + try { + statement.addBatch("insert into rollbacktest(a, str) values (1/0, 'test')"); + statement.executeBatch(); + Assert.fail("select 1/0 should fail"); + } catch (SQLException e) { + Assert.assertEquals("division by zero expected", + PSQLState.DIVISION_BY_ZERO.getState(), e.getSQLState()); + } + break; + default: + Assert.fail("Fail mode " + failMode + " is not implemented"); + } + + PgConnection pgConnection = con.unwrap(PgConnection.class); + if (autoSave == AutoSave.ALWAYS) { + Assert.assertNotEquals("In AutoSave.ALWAYS, transaction should not fail", + TransactionState.FAILED, pgConnection.getTransactionState()); + } + if (autoCommit == AutoCommit.NO) { + Assert.assertNotEquals("AutoCommit == NO, thus transaction should be active (open or failed)", + TransactionState.IDLE, pgConnection.getTransactionState()); + } + statement.close(); + + switch (continueMode) { + case COMMIT: + try { + doCommit(); + // No assert here: commit should always succeed with exception of well known failure cases in catch + } catch (SQLException e) { + if (!flushCacheOnDeallocate && DEALLOCATES.contains(failMode) + && autoSave == AutoSave.NEVER) { + Assert.assertEquals( + "flushCacheOnDeallocate is disabled, thus " + failMode + " should cause 'prepared statement \"...\" does not exist'" + + " error message is " + e.getMessage(), + PSQLState.INVALID_SQL_STATEMENT_NAME.getState(), e.getSQLState()); + return; + } + throw e; + } + return; + case IS_VALID: + if (!flushCacheOnDeallocate && autoSave == AutoSave.NEVER + && DEALLOCATES.contains(failMode) && autoCommit == AutoCommit.NO + && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) { + Assert.assertFalse("Connection.isValid should return false since failMode=" + failMode + + ", flushCacheOnDeallocate=false, and autosave=NEVER", + con.isValid(4)); + } else { + Assert.assertTrue("Connection.isValid should return true unless the connection is closed", + con.isValid(4)); + } + return; + default: + break; + } + + try { + // Try execute server-prepared statement again + ps.executeQuery().close(); + rowsExpected += testSql.rowsInserted; + executeSqlSuccess(); + } catch (SQLException e) { + if (autoSave != AutoSave.ALWAYS && TRANS_KILLERS.contains(failMode) && autoCommit == AutoCommit.NO) { + Assert.assertEquals( + "AutoSave==" + autoSave + ", thus statements should fail with 'current transaction is aborted...', " + + " error message is " + e.getMessage(), + PSQLState.IN_FAILED_SQL_TRANSACTION.getState(), e.getSQLState()); + return; + } + + if (autoSave == AutoSave.NEVER && autoCommit == AutoCommit.NO) { + if (DEALLOCATES.contains(failMode) && !flushCacheOnDeallocate) { + Assert.assertEquals( + "flushCacheOnDeallocate is disabled, thus " + failMode + " should cause 'prepared statement \"...\" does not exist'" + + " error message is " + e.getMessage(), + PSQLState.INVALID_SQL_STATEMENT_NAME.getState(), e.getSQLState()); + } else if (failMode == FailMode.ALTER) { + Assert.assertEquals( + "AutoSave==NEVER, autocommit=NO, thus ALTER TABLE causes SELECT * to fail with " + + "'cached plan must not change result type', " + + " error message is " + e.getMessage(), + PSQLState.NOT_IMPLEMENTED.getState(), e.getSQLState()); + } else { + throw e; + } + } else { + throw e; + } + } + + try { + assertRows("rollbacktest", rowsExpected); + executeSqlSuccess(); + } catch (SQLException e) { + if (autoSave == AutoSave.NEVER && autoCommit == AutoCommit.NO) { + if (DEALLOCATES.contains(failMode) && !flushCacheOnDeallocate + || failMode == FailMode.ALTER) { + // The above statement failed with "prepared statement does not exist", thus subsequent one should fail with + // transaction aborted. + Assert.assertEquals( + "AutoSave==NEVER, thus statements should fail with 'current transaction is aborted...', " + + " error message is " + e.getMessage(), + PSQLState.IN_FAILED_SQL_TRANSACTION.getState(), e.getSQLState()); + } + } else { + throw e; + } + } + } + + private void executeSqlSuccess() throws SQLException { + if (autoCommit == AutoCommit.YES) { + // in autocommit everything should just work + } else if (TRANS_KILLERS.contains(failMode)) { + if (autoSave != AutoSave.ALWAYS) { + Assert.fail( + "autosave= " + autoSave + " != ALWAYS, thus the transaction should be killed"); + } + } else if (DEALLOCATES.contains(failMode)) { + if (autoSave == AutoSave.NEVER && !flushCacheOnDeallocate + && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) { + Assert.fail("flushCacheOnDeallocate == false, thus DEALLOCATE ALL should kill the transaction"); + } + } else if (failMode == FailMode.ALTER) { + if (autoSave == AutoSave.NEVER + && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE + && cols == ReturnColumns.STAR) { + Assert.fail("autosave=NEVER, thus the transaction should be killed"); + } + } else { + Assert.fail("It is not specified why the test should pass, thus marking a failure"); + } + } + + private void assertRows(String tableName, int nrows) throws SQLException { + Statement st = con.createStatement(); + ResultSet rs = st.executeQuery("select count(*) from " + tableName); + rs.next(); + Assert.assertEquals("Table " + tableName, nrows, rs.getInt(1)); + } + + private void doCommit() throws SQLException { + // Such a dance is required since "commit" checks "current transaction state", + // so we need some pending changes, so "commit" query would be sent to the database + if (con.getAutoCommit()) { + con.setAutoCommit(false); + Statement st = con.createStatement(); + st.executeUpdate( + "insert into rollbacktest(a, str) values (42, '" + System.currentTimeMillis() + "," + counter.getAndIncrement() + "')"); + st.close(); + } + con.commit(); + con.setAutoCommit(autoCommit == AutoCommit.YES); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BaseTest4.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BaseTest4.java new file mode 100644 index 0000000..2f7982b --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BaseTest4.java @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.PGConnection; +import org.postgresql.PGProperty; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.Oid; +import org.postgresql.core.Version; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; + +import org.junit.After; +import org.junit.Assume; +import org.junit.Before; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Locale; +import java.util.Properties; +import java.util.function.Supplier; + +public class BaseTest4 { + + public enum BinaryMode { + REGULAR, FORCE + } + + public enum ReWriteBatchedInserts { + YES, NO + } + + public enum AutoCommit { + YES, NO + } + + public enum StringType { + UNSPECIFIED, VARCHAR + } + + protected Connection con; + protected BinaryMode binaryMode; + private ReWriteBatchedInserts reWriteBatchedInserts; + protected PreferQueryMode preferQueryMode; + private StringType stringType; + + protected void updateProperties(Properties props) { + if (binaryMode == BinaryMode.FORCE) { + forceBinary(props); + } + if (reWriteBatchedInserts == ReWriteBatchedInserts.YES) { + PGProperty.REWRITE_BATCHED_INSERTS.set(props, true); + } + if (stringType != null) { + PGProperty.STRING_TYPE.set(props, stringType.name().toLowerCase(Locale.ROOT)); + } + } + + protected void forceBinary(Properties props) { + PGProperty.PREPARE_THRESHOLD.set(props, -1); + PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.BOOL); + } + + public final void setBinaryMode(BinaryMode binaryMode) { + this.binaryMode = binaryMode; + } + + public StringType getStringType() { + return stringType; + } + + public void setStringType(StringType stringType) { + this.stringType = stringType; + } + + public void setReWriteBatchedInserts( + ReWriteBatchedInserts reWriteBatchedInserts) { + this.reWriteBatchedInserts = reWriteBatchedInserts; + } + + @Before + public void setUp() throws Exception { + Properties props = new Properties(); + updateProperties(props); + con = TestUtil.openDB(props); + PGConnection pg = con.unwrap(PGConnection.class); + preferQueryMode = pg == null ? PreferQueryMode.EXTENDED : pg.getPreferQueryMode(); + } + + @After + public void tearDown() throws SQLException { + TestUtil.closeDB(con); + } + + public void assumeByteaSupported() { + Assume.assumeTrue("bytea is not supported in simple protocol execution mode", + preferQueryMode != PreferQueryMode.SIMPLE); + } + + public static void assumeCallableStatementsSupported(Connection con) throws SQLException { + PreferQueryMode preferQueryMode = con.unwrap(PGConnection.class).getPreferQueryMode(); + Assume.assumeTrue("callable statements are not fully supported in simple protocol execution mode", + preferQueryMode != PreferQueryMode.SIMPLE); + } + + public void assumeCallableStatementsSupported() { + Assume.assumeTrue("callable statements are not fully supported in simple protocol execution mode", + preferQueryMode != PreferQueryMode.SIMPLE); + } + + public void assumeBinaryModeRegular() { + Assume.assumeTrue(binaryMode == BinaryMode.REGULAR); + } + + public void assumeBinaryModeForce() { + Assume.assumeTrue(binaryMode == BinaryMode.FORCE); + Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE); + } + + public void assumeNotSimpleQueryMode() { + Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE); + } + + /** + * Shorthand for {@code Assume.assumeTrue(TestUtil.haveMinimumServerVersion(conn, version)}. + */ + public void assumeMinimumServerVersion(String message, Version version) throws SQLException { + Assume.assumeTrue(message, TestUtil.haveMinimumServerVersion(con, version)); + } + + /** + * Shorthand for {@code Assume.assumeTrue(TestUtil.haveMinimumServerVersion(conn, version)}. + */ + public void assumeMinimumServerVersion(Version version) throws SQLException { + Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, version)); + } + + protected void assertBinaryForReceive(int oid, boolean expected, Supplier message) throws SQLException { + assertEquals(message.get() + ", useBinaryForReceive(oid=" + oid + ")", expected, + con.unwrap(BaseConnection.class).getQueryExecutor().useBinaryForReceive(oid)); + } + + protected void assertBinaryForSend(int oid, boolean expected, Supplier message) throws SQLException { + assertEquals(message.get() + ", useBinaryForSend(oid=" + oid + ")", expected, + con.unwrap(BaseConnection.class).getQueryExecutor().useBinaryForSend(oid)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchExecuteTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchExecuteTest.java new file mode 100644 index 0000000..c121b6f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchExecuteTest.java @@ -0,0 +1,1386 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.PGProperty; +import org.postgresql.PGStatement; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.BatchUpdateException; +import java.sql.DatabaseMetaData; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; + +/* + * TODO tests that can be added to this test case - SQLExceptions chained to a BatchUpdateException + * - test PreparedStatement as thoroughly as Statement + */ + +/* + * Test case for Statement.batchExecute() + */ +@RunWith(Parameterized.class) +public class BatchExecuteTest extends BaseTest4 { + + private boolean insertRewrite; + + public BatchExecuteTest(BinaryMode binaryMode, boolean insertRewrite) { + this.insertRewrite = insertRewrite; + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}, insertRewrite = {1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + for (boolean insertRewrite : new boolean[]{false, true}) { + ids.add(new Object[]{binaryMode, insertRewrite}); + } + } + return ids; + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite); + } + + // Set up the fixture for this testcase: a connection to a database with + // a table for this test. + @Override + public void setUp() throws Exception { + super.setUp(); + Statement stmt = con.createStatement(); + + // Drop the test table if it already exists for some reason. It is + // not an error if it doesn't exist. + TestUtil.createTempTable(con, "testbatch", "pk INTEGER, col1 INTEGER"); + + stmt.executeUpdate("INSERT INTO testbatch VALUES (1, 0)"); + + TestUtil.createTempTable(con, "prep", "a integer, b integer, d date"); + + TestUtil.createTempTable(con, "batchUpdCnt", "id varchar(512) primary key, data varchar(512)"); + stmt.executeUpdate("INSERT INTO batchUpdCnt(id) VALUES ('key-2')"); + + stmt.close(); + + // Generally recommended with batch updates. By default we run all + // tests in this test case with autoCommit disabled. + con.setAutoCommit(false); + } + + // Tear down the fixture for this test case. + @Override + public void tearDown() throws SQLException { + con.setAutoCommit(true); + + TestUtil.dropTable(con, "testbatch"); + super.tearDown(); + } + + @Test + public void testSupportsBatchUpdates() throws Exception { + DatabaseMetaData dbmd = con.getMetaData(); + Assert.assertTrue("Expected that Batch Updates are supported", dbmd.supportsBatchUpdates()); + } + + @Test + public void testEmptyClearBatch() throws Exception { + Statement stmt = con.createStatement(); + stmt.clearBatch(); // No-op. + + PreparedStatement ps = con.prepareStatement("SELECT ?"); + ps.clearBatch(); // No-op. + } + + private void assertCol1HasValue(int expected) throws Exception { + Statement getCol1 = con.createStatement(); + try { + ResultSet rs = getCol1.executeQuery("SELECT col1 FROM testbatch WHERE pk = 1"); + Assert.assertTrue(rs.next()); + + int actual = rs.getInt("col1"); + + Assert.assertEquals(expected, actual); + Assert.assertFalse(rs.next()); + + rs.close(); + } finally { + TestUtil.closeQuietly(getCol1); + } + } + + @Test + public void testExecuteEmptyBatch() throws Exception { + Statement stmt = con.createStatement(); + try { + int[] updateCount = stmt.executeBatch(); + Assert.assertEquals(0, updateCount.length); + + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1"); + stmt.clearBatch(); + updateCount = stmt.executeBatch(); + Assert.assertEquals(0, updateCount.length); + stmt.close(); + } finally { + TestUtil.closeQuietly(stmt); + } + } + + @Test + public void testExecuteEmptyPreparedBatch() throws Exception { + PreparedStatement ps = con.prepareStatement("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1"); + try { + int[] updateCount = ps.executeBatch(); + Assert.assertEquals("Empty batch should update empty result", 0, updateCount.length); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testPreparedNoParameters() throws SQLException { + PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)"); + try { + ps.addBatch(); + ps.addBatch(); + ps.addBatch(); + ps.addBatch(); + int[] actual = ps.executeBatch(); + assertBatchResult("4 rows inserted via batch", new int[]{1, 1, 1, 1}, actual); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testClearBatch() throws Exception { + Statement stmt = con.createStatement(); + try { + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1"); + assertCol1HasValue(0); + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1"); + assertCol1HasValue(0); + stmt.clearBatch(); + assertCol1HasValue(0); + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 4 WHERE pk = 1"); + assertCol1HasValue(0); + stmt.executeBatch(); + assertCol1HasValue(4); + con.commit(); + assertCol1HasValue(4); + } finally { + TestUtil.closeQuietly(stmt); + } + } + + @Test + public void testClearPreparedNoArgBatch() throws Exception { + PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)"); + try { + ps.addBatch(); + ps.clearBatch(); + int[] updateCount = ps.executeBatch(); + Assert.assertEquals("Empty batch should update empty result", 0, updateCount.length); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testClearPreparedEmptyBatch() throws Exception { + PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)"); + try { + ps.clearBatch(); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testSelectInBatch() throws Exception { + Statement stmt = stmt = con.createStatement(); + try { + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1"); + stmt.addBatch("SELECT col1 FROM testbatch WHERE pk = 1"); + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1"); + + // There's no reason to Assert.fail + int[] updateCounts = stmt.executeBatch(); + + Assert.assertTrue("First update should succeed, thus updateCount should be 1 or SUCCESS_NO_INFO" + + ", actual value: " + updateCounts[0], + updateCounts[0] == 1 || updateCounts[0] == Statement.SUCCESS_NO_INFO); + Assert.assertTrue("For SELECT, number of modified rows should be either 0 or SUCCESS_NO_INFO" + + ", actual value: " + updateCounts[1], + updateCounts[1] == 0 || updateCounts[1] == Statement.SUCCESS_NO_INFO); + Assert.assertTrue("Second update should succeed, thus updateCount should be 1 or SUCCESS_NO_INFO" + + ", actual value: " + updateCounts[2], + updateCounts[2] == 1 || updateCounts[2] == Statement.SUCCESS_NO_INFO); + } finally { + TestUtil.closeQuietly(stmt); + } + } + + @Test + public void testSelectInBatchThrowsAutoCommit() throws Exception { + con.setAutoCommit(true); + testSelectInBatchThrows(); + } + + @Test + public void testSelectInBatchThrows() throws Exception { + Statement stmt = con.createStatement(); + try { + int oldValue = getCol1Value(); + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1"); + stmt.addBatch("SELECT 0/0 FROM testbatch WHERE pk = 1"); + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1"); + + int[] updateCounts; + try { + updateCounts = stmt.executeBatch(); + Assert.fail("0/0 should throw BatchUpdateException"); + } catch (BatchUpdateException be) { + updateCounts = be.getUpdateCounts(); + } + + if (!con.getAutoCommit()) { + con.commit(); + } + + int newValue = getCol1Value(); + boolean firstOk = updateCounts[0] == 1 || updateCounts[0] == Statement.SUCCESS_NO_INFO; + boolean lastOk = updateCounts[2] == 1 || updateCounts[2] == Statement.SUCCESS_NO_INFO; + + Assert.assertEquals("testbatch.col1 should account +1 and +2 for the relevant successful rows: " + + Arrays.toString(updateCounts), + oldValue + (firstOk ? 1 : 0) + (lastOk ? 2 : 0), newValue); + + Assert.assertEquals("SELECT 0/0 should be marked as Statement.EXECUTE_FAILED", + Statement.EXECUTE_FAILED, + updateCounts[1]); + + } finally { + TestUtil.closeQuietly(stmt); + } + } + + private int getCol1Value() throws SQLException { + Statement stmt = con.createStatement(); + try { + ResultSet rs = stmt.executeQuery("select col1 from testbatch where pk=1"); + rs.next(); + return rs.getInt(1); + } finally { + stmt.close(); + } + } + + @Test + public void testStringAddBatchOnPreparedStatement() throws Exception { + PreparedStatement pstmt = + con.prepareStatement("UPDATE testbatch SET col1 = col1 + ? WHERE PK = ?"); + pstmt.setInt(1, 1); + pstmt.setInt(2, 1); + pstmt.addBatch(); + + try { + pstmt.addBatch("UPDATE testbatch SET col1 = 3"); + Assert.fail( + "Should have thrown an exception about using the string addBatch method on a prepared statement."); + } catch (SQLException sqle) { + } + + pstmt.close(); + } + + @Test + public void testPreparedStatement() throws Exception { + PreparedStatement pstmt = + con.prepareStatement("UPDATE testbatch SET col1 = col1 + ? WHERE PK = ?"); + + // Note that the first parameter changes for every statement in the + // batch, whereas the second parameter remains constant. + pstmt.setInt(1, 1); + pstmt.setInt(2, 1); + pstmt.addBatch(); + assertCol1HasValue(0); + + pstmt.setInt(1, 2); + pstmt.addBatch(); + assertCol1HasValue(0); + + pstmt.setInt(1, 4); + pstmt.addBatch(); + assertCol1HasValue(0); + + pstmt.executeBatch(); + assertCol1HasValue(7); + + // now test to see that we can still use the statement after the execute + pstmt.setInt(1, 3); + pstmt.addBatch(); + assertCol1HasValue(7); + + pstmt.executeBatch(); + assertCol1HasValue(10); + + con.commit(); + assertCol1HasValue(10); + + con.rollback(); + assertCol1HasValue(10); + + pstmt.close(); + } + + @Test + public void testTransactionalBehaviour() throws Exception { + Statement stmt = con.createStatement(); + + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1"); + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1"); + stmt.executeBatch(); + con.rollback(); + assertCol1HasValue(0); + + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 4 WHERE pk = 1"); + stmt.addBatch("UPDATE testbatch SET col1 = col1 + 8 WHERE pk = 1"); + + // The statement has been added to the batch, but it should not yet + // have been executed. + assertCol1HasValue(0); + + int[] updateCounts = stmt.executeBatch(); + Assert.assertEquals(2, updateCounts.length); + Assert.assertEquals(1, updateCounts[0]); + Assert.assertEquals(1, updateCounts[1]); + + assertCol1HasValue(12); + con.commit(); + assertCol1HasValue(12); + con.rollback(); + assertCol1HasValue(12); + + TestUtil.closeQuietly(stmt); + } + + @Test + public void testWarningsAreCleared() throws SQLException { + Statement stmt = con.createStatement(); + stmt.addBatch("CREATE TEMP TABLE unused (a int primary key)"); + stmt.executeBatch(); + // Execute an empty batch to clear warnings. + stmt.executeBatch(); + Assert.assertNull(stmt.getWarnings()); + TestUtil.closeQuietly(stmt); + } + + @Test + public void testBatchEscapeProcessing() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("CREATE TEMP TABLE batchescape (d date)"); + + stmt.addBatch("INSERT INTO batchescape (d) VALUES ({d '2007-11-20'})"); + stmt.executeBatch(); + + PreparedStatement pstmt = + con.prepareStatement("INSERT INTO batchescape (d) VALUES ({d '2007-11-20'})"); + pstmt.addBatch(); + pstmt.executeBatch(); + pstmt.close(); + + ResultSet rs = stmt.executeQuery("SELECT d FROM batchescape"); + Assert.assertTrue(rs.next()); + Assert.assertEquals("2007-11-20", rs.getString(1)); + Assert.assertTrue(rs.next()); + Assert.assertEquals("2007-11-20", rs.getString(1)); + Assert.assertTrue(!rs.next()); + TestUtil.closeQuietly(stmt); + } + + @Test + public void testBatchWithEmbeddedNulls() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("CREATE TEMP TABLE batchstring (a text)"); + + con.commit(); + + PreparedStatement pstmt = con.prepareStatement("INSERT INTO batchstring VALUES (?)"); + + try { + pstmt.setString(1, "a"); + pstmt.addBatch(); + pstmt.setString(1, "\u0000"); + pstmt.addBatch(); + pstmt.setString(1, "b"); + pstmt.addBatch(); + pstmt.executeBatch(); + Assert.fail("Should have thrown an exception."); + } catch (SQLException sqle) { + con.rollback(); + } + pstmt.close(); + + ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM batchstring"); + Assert.assertTrue(rs.next()); + Assert.assertEquals(0, rs.getInt(1)); + TestUtil.closeQuietly(stmt); + } + + @Test + public void testMixedBatch() throws SQLException { + try { + Statement st = con.createStatement(); + st.executeUpdate("DELETE FROM prep;"); + st.close(); + + st = con.createStatement(); + st.addBatch("INSERT INTO prep (a, b) VALUES (1,2)"); + st.addBatch("INSERT INTO prep (a, b) VALUES (100,200)"); + st.addBatch("DELETE FROM prep WHERE a = 1 AND b = 2"); + st.addBatch("CREATE TEMPORARY TABLE waffles(sauce text)"); + st.addBatch("INSERT INTO waffles(sauce) VALUES ('cream'), ('strawberry jam')"); + int[] batchResult = st.executeBatch(); + Assert.assertEquals(1, batchResult[0]); + Assert.assertEquals(1, batchResult[1]); + Assert.assertEquals(1, batchResult[2]); + Assert.assertEquals(0, batchResult[3]); + Assert.assertEquals(2, batchResult[4]); + } catch (SQLException ex) { + ex.getNextException().printStackTrace(); + throw ex; + } + } + + /* + * A user reported that a query that uses RETURNING (via getGeneratedKeys) in a batch, and a + * 'text' field value in a table is assigned NULL in the first execution of the batch then + * non-NULL afterwards using PreparedStatement.setObject(int, Object) (i.e. no Types param or + * setString call) the batch may Assert.fail with: + * + * "Received resultset tuples, but no field structure for them" + * + * at org.postgresql.core.v3.QueryExecutorImpl.processResults + * + * Prior to 245b388 it would instead Assert.fail with a NullPointerException in + * AbstractJdbc2ResultSet.checkColumnIndex + * + * The cause is complicated. The Assert.failure arises because the query gets re-planned mid-batch. This + * re-planning clears the cached information about field types. The field type information for + * parameters gets re-acquired later but the information for *returned* values does not. + * + * (The reason why the returned value types aren't recalculated is not yet known.) + * + * The re-plan's cause is its self complicated. + * + * The first bind of the parameter, which is null, gets the type oid 0 (unknown/unspecified). + * Unless Types.VARCHAR is specified or setString is used, in which case the oid is set to 1043 + * (varchar). + * + * The second bind identifies the object class as String so it calls setString internally. This + * sets the type to 1043 (varchar). + * + * The third and subsequent binds, whether null or non-null, will get type 1043, because there's + * logic to avoid overwriting a known parameter type with the unknown type oid. This is why the + * issue can only occur when null is the first entry. + * + * When executed the first time a describe is run. This reports the parameter oid to be 25 (text), + * because that's the type of the table column the param is being assigned to. That's why the cast + * to ?::varchar works - because it overrides the type for the parameter to 1043 (varchar). + * + * The second execution sees that the bind parameter type is already known to PgJDBC as 1043 + * (varchar). PgJDBC doesn't see that text and varchar are the same - and, in fact, under some + * circumstances they aren't exactly the same. So it discards the planned query and re-plans. + * + * This issue can be reproduced with any pair of implicitly or assignment castable types; for + * example, using Integer in JDBC and bigint in the Pg table will do it. + */ + @Test + public void testBatchReturningMixedNulls() throws SQLException { + String[] testData = new String[]{null, "test", null, null, null}; + + try { + Statement setup = con.createStatement(); + setup.execute("DROP TABLE IF EXISTS mixednulltest;"); + // It's significant that "value' is 'text' not 'varchar' here; + // if 'varchar' is used then everything works fine. + setup.execute("CREATE TABLE mixednulltest (key serial primary key, value text);"); + setup.close(); + + // If the parameter is given as ?::varchar then this issue + // does not arise. + PreparedStatement st = + con.prepareStatement("INSERT INTO mixednulltest (value) VALUES (?)", new String[]{"key"}); + + for (String val : testData) { + /* + * This is the crucial bit. It's set to null first time around, so the RETURNING clause's + * type oid is undefined. + * + * The second time around the value is assigned so Pg reports the type oid is TEXT, like the + * table. But we expected VARCHAR. + * + * This causes PgJDBC to replan the query, and breaks other things. + */ + st.setObject(1, val); + st.addBatch(); + } + st.executeBatch(); + ResultSet rs = st.getGeneratedKeys(); + for (int i = 1; i <= testData.length; i++) { + rs.next(); + Assert.assertEquals(i, rs.getInt(1)); + } + Assert.assertTrue(!rs.next()); + } catch (SQLException ex) { + ex.getNextException().printStackTrace(); + throw ex; + } + } + + @Test + public void testBatchWithAlternatingAndUnknownTypes0() throws SQLException { + testBatchWithAlternatingAndUnknownTypesN(0); + } + + @Test + public void testBatchWithAlternatingAndUnknownTypes1() throws SQLException { + testBatchWithAlternatingAndUnknownTypesN(1); + } + + @Test + public void testBatchWithAlternatingAndUnknownTypes2() throws SQLException { + testBatchWithAlternatingAndUnknownTypesN(2); + } + + @Test + public void testBatchWithAlternatingAndUnknownTypes3() throws SQLException { + testBatchWithAlternatingAndUnknownTypesN(3); + } + + @Test + public void testBatchWithAlternatingAndUnknownTypes4() throws SQLException { + testBatchWithAlternatingAndUnknownTypesN(4); + } + + @Test + public void testBatchWithAlternatingAndUnknownTypes5() throws SQLException { + testBatchWithAlternatingAndUnknownTypesN(5); + } + + @Test + public void testBatchWithAlternatingAndUnknownTypes6() throws SQLException { + testBatchWithAlternatingAndUnknownTypesN(6); + } + + /** + *

This one is reproduced in regular (non-force binary) mode.

+ * + *

As of 9.4.1208 the following tests fail: + * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes3 + * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes4 + * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes5 + * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes6

+ * @param numPreliminaryInserts number of preliminary inserts to make so the statement gets + * prepared + * @throws SQLException in case of failure + */ + public void testBatchWithAlternatingAndUnknownTypesN(int numPreliminaryInserts) + throws SQLException { + PreparedStatement ps = null; + try { + con.setAutoCommit(true); + // This test requires autoCommit false to reproduce + ps = con.prepareStatement("insert into prep(a, d) values(?, ?)"); + for (int i = 0; i < numPreliminaryInserts; i++) { + ps.setNull(1, Types.SMALLINT); + ps.setObject(2, new Date(42)); + ps.addBatch(); + ps.executeBatch(); + } + + ps.setObject(1, 43.0); + ps.setObject(2, new Date(43)); + ps.addBatch(); + ps.setNull(1, Types.SMALLINT); + ps.setObject(2, new Date(44)); + ps.addBatch(); + ps.executeBatch(); + + ps.setObject(1, 45.0); + ps.setObject(2, new Date(45)); // <-- this causes "oid of bind unknown, send Describe" + ps.addBatch(); + ps.setNull(1, Types.SMALLINT); + ps.setNull(2, Types.DATE); // <-- this uses Oid.DATE, thus no describe message + // As the same query object was reused the describe from Date(45) overwrites + // parameter types, thus Double(45)'s type (double) comes instead of SMALLINT. + // Thus pgjdbc thinks the prepared statement is prepared for (double, date) types + // however in reality the statement is prepared for (smallint, date) types. + + ps.addBatch(); + ps.executeBatch(); + + // This execution with (double, unknown) passes isPreparedForTypes check, and causes + // the failure + ps.setObject(1, 47.0); + ps.setObject(2, new Date(47)); + ps.addBatch(); + ps.executeBatch(); + } catch (BatchUpdateException e) { + throw e.getNextException(); + } finally { + TestUtil.closeQuietly(ps); + } + /* +Here's the log +11:33:10.708 (1) FE=> Parse(stmt=null,query="CREATE TABLE prep (a integer, b integer, d date) ",oids={}) +11:33:10.708 (1) FE=> Bind(stmt=null,portal=null) +11:33:10.708 (1) FE=> Describe(portal=null) +11:33:10.708 (1) FE=> Execute(portal=null,limit=1) +11:33:10.708 (1) FE=> Sync +11:33:10.710 (1) <=BE ParseComplete [null] +11:33:10.711 (1) <=BE BindComplete [unnamed] +11:33:10.711 (1) <=BE NoData +11:33:10.711 (1) <=BE CommandStatus(CREATE TABLE) +11:33:10.711 (1) <=BE ReadyForQuery(I) +11:33:10.716 (1) batch execute 1 queries, handler=org.postgresql.jdbc.PgStatement$BatchResultHandler@4629104a, maxRows=0, fetchSize=0, flags=5 +11:33:10.716 (1) FE=> Parse(stmt=null,query="BEGIN",oids={}) +11:33:10.717 (1) FE=> Bind(stmt=null,portal=null) +11:33:10.717 (1) FE=> Execute(portal=null,limit=0) +11:33:10.718 (1) FE=> Parse(stmt=null,query="insert into prep(a, d) values($1, $2)",oids={21,0}) +11:33:10.718 (1) FE=> Bind(stmt=null,portal=null,$1=:B:21,$2=<'1970-1-1 +3:0:0'>:T:0) +11:33:10.719 (1) FE=> Describe(portal=null) +11:33:10.719 (1) FE=> Execute(portal=null,limit=1) +11:33:10.719 (1) FE=> Sync +11:33:10.720 (1) <=BE ParseComplete [null] +11:33:10.720 (1) <=BE BindComplete [unnamed] +11:33:10.720 (1) <=BE CommandStatus(BEGIN) +11:33:10.720 (1) <=BE ParseComplete [null] +11:33:10.720 (1) <=BE BindComplete [unnamed] +11:33:10.720 (1) <=BE NoData +11:33:10.720 (1) <=BE CommandStatus(INSERT 0 1) +11:33:10.720 (1) <=BE ReadyForQuery(T) +11:33:10.721 (1) batch execute 2 queries, handler=org.postgresql.jdbc.PgStatement$BatchResultHandler@27f8302d, maxRows=0, fetchSize=0, flags=5 +11:33:10.721 (1) FE=> Parse(stmt=null,query="insert into prep(a, d) values($1, $2)",oids={701,0}) +11:33:10.723 (1) FE=> Bind(stmt=null,portal=null,$1=<43.0>:B:701,$2=<'1970-1-1 +3:0:0'>:T:0) +11:33:10.723 (1) FE=> Describe(portal=null) +11:33:10.723 (1) FE=> Execute(portal=null,limit=1) +11:33:10.723 (1) FE=> Parse(stmt=null,query="insert into prep(a, d) values($1, $2)",oids={21,0}) +11:33:10.723 (1) FE=> Bind(stmt=null,portal=null,$1=:B:21,$2=<'1970-1-1 +3:0:0'>:T:0) +11:33:10.723 (1) FE=> Describe(portal=null) +11:33:10.723 (1) FE=> Execute(portal=null,limit=1) +11:33:10.723 (1) FE=> Sync +11:33:10.723 (1) <=BE ParseComplete [null] +11:33:10.723 (1) <=BE BindComplete [unnamed] +11:33:10.725 (1) <=BE NoData +11:33:10.726 (1) <=BE CommandStatus(INSERT 0 1) +11:33:10.726 (1) <=BE ParseComplete [null] +11:33:10.726 (1) <=BE BindComplete [unnamed] +11:33:10.726 (1) <=BE NoData +11:33:10.726 (1) <=BE CommandStatus(INSERT 0 1) +11:33:10.726 (1) <=BE ReadyForQuery(T) +11:33:10.726 (1) batch execute 2 queries, handler=org.postgresql.jdbc.PgStatement$BatchResultHandler@4d76f3f8, maxRows=0, fetchSize=0, flags=516 +11:33:10.726 (1) FE=> Parse(stmt=S_1,query="insert into prep(a, d) values($1, $2)",oids={701,0}) +11:33:10.727 (1) FE=> Describe(statement=S_1) +11:33:10.728 (1) FE=> Bind(stmt=S_1,portal=null,$1=<45.0>:B:701,$2=<'1970-1-1 +3:0:0'>:T:0) +11:33:10.728 (1) FE=> Execute(portal=null,limit=1) +11:33:10.729 (1) FE=> CloseStatement(S_1) +11:33:10.729 (1) FE=> Parse(stmt=S_2,query="insert into prep(a, d) values($1, $2)",oids={21,1082}) +11:33:10.729 (1) FE=> Bind(stmt=S_2,portal=null,$1=:B:21,$2=:B:1082) +11:33:10.729 (1) FE=> Describe(portal=null) +11:33:10.729 (1) FE=> Execute(portal=null,limit=1) +11:33:10.729 (1) FE=> Sync +11:33:10.730 (1) <=BE ParseComplete [S_2] +11:33:10.730 (1) <=BE ParameterDescription +11:33:10.730 (1) <=BE NoData +11:33:10.730 (1) <=BE BindComplete [unnamed] +11:33:10.730 (1) <=BE CommandStatus(INSERT 0 1) +11:33:10.730 (1) <=BE CloseComplete +11:33:10.730 (1) <=BE ParseComplete [S_2] +11:33:10.730 (1) <=BE BindComplete [unnamed] +11:33:10.730 (1) <=BE NoData +11:33:10.731 (1) <=BE CommandStatus(INSERT 0 1) +11:33:10.731 (1) <=BE ReadyForQuery(T) +11:33:10.731 (1) batch execute 1 queries, handler=org.postgresql.jdbc.PgStatement$BatchResultHandler@4534b60d, maxRows=0, fetchSize=0, flags=516 +11:33:10.731 (1) FE=> Bind(stmt=S_2,portal=null,$1=<47.0>:B:701,$2=<'1970-1-1 +3:0:0'>:T:1082) +11:33:10.731 (1) FE=> Describe(portal=null) +11:33:10.731 (1) FE=> Execute(portal=null,limit=1) +11:33:10.731 (1) FE=> Sync +11:33:10.732 (1) <=BE ErrorMessage(ERROR: incorrect binary data format in bind parameter 1) +org.postgresql.util.PSQLException: ERROR: incorrect binary data format in bind parameter 1 + at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2185) + at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:1914) + at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:338) + at org.postgresql.jdbc.PgStatement.executeBatch(PgStatement.java:2534) + at org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes2(BatchExecuteTest.java:460) + */ + } + + /** + * Tests {@link PreparedStatement#addBatch} in case types of parameters change from one batch to + * another. Change of the datatypes causes re-prepare server-side statement, thus exactly the same + * query object might have different statement names. + */ + @Test + public void testBatchWithAlternatingTypes() throws SQLException { + try { + Statement s = con.createStatement(); + s.execute("BEGIN"); + PreparedStatement ps; + ps = con.prepareStatement("insert into prep(a,b) values(?::int4,?)"); + ps.setInt(1, 2); + ps.setInt(2, 2); + ps.addBatch(); + ps.addBatch(); + ps.addBatch(); + ps.addBatch(); + ps.addBatch(); + ps.setString(1, "1"); + ps.setInt(2, 2); + ps.addBatch(); + ps.executeBatch(); + ps.setString(1, "2"); + ps.setInt(2, 2); + ps.addBatch(); + ps.executeBatch(); + ps.close(); + s.execute("COMMIT"); + } catch (BatchUpdateException e) { + throw e.getNextException(); + } + /* +Key part is (see "before the fix"): + 23:00:30.354 (1) <=BE ParseComplete [S_2] + 23:00:30.356 (1) <=BE ParseComplete [S_2] +The problem is ParseRequest is reusing the same Query object and it updates StatementName in place. +This dodges ParseComplete message as previously QueryExecutor just picked statementName from Query object. +Eventually this causes closing of "new" statement instead of old S_1 + 23:00:30.356 (1) FE=> CloseStatement(S_2) + +The fix is to make ParseComplete a no-op, so as soon as the driver allocates a statement name, it registers + the name for cleanup. + +Trace before the fix: +23:00:30.261 (1) PostgreSQL 9.4 JDBC4.1 (build 1206) +23:00:30.266 (1) Trying to establish a protocol version 3 connection to localhost:5432 +23:00:30.280 (1) Receive Buffer Size is 408300 +23:00:30.281 (1) Send Buffer Size is 146988 +23:00:30.283 (1) FE=> StartupPacket(user=postgres, database=vladimirsitnikov, client_encoding=UTF8, DateStyle=ISO, TimeZone=Europe/Volgograd, extra_float_digits=2) +23:00:30.289 (1) <=BE AuthenticationOk +23:00:30.300 (1) <=BE ParameterStatus(application_name = ) +23:00:30.300 (1) <=BE ParameterStatus(client_encoding = UTF8) +23:00:30.300 (1) <=BE ParameterStatus(DateStyle = ISO, DMY) +23:00:30.300 (1) <=BE ParameterStatus(integer_datetimes = on) +23:00:30.300 (1) <=BE ParameterStatus(IntervalStyle = postgres) +23:00:30.301 (1) <=BE ParameterStatus(is_superuser = on) +23:00:30.301 (1) <=BE ParameterStatus(server_encoding = SQL_ASCII) +23:00:30.301 (1) <=BE ParameterStatus(server_version = 9.4.5) +23:00:30.301 (1) <=BE ParameterStatus(session_authorization = postgres) +23:00:30.301 (1) <=BE ParameterStatus(standard_conforming_strings = on) +23:00:30.301 (1) <=BE ParameterStatus(TimeZone = Europe/Volgograd) +23:00:30.301 (1) <=BE BackendKeyData(pid=81221,ckey=2048823749) +23:00:30.301 (1) <=BE ReadyForQuery(I) +23:00:30.304 (1) simple execute, handler=org.postgresql.core.SetupQueryRunner$SimpleResultHandler@531d72ca, maxRows=0, fetchSize=0, flags=23 +23:00:30.304 (1) FE=> Parse(stmt=null,query="SET extra_float_digits = 3",oids={}) +23:00:30.304 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.305 (1) FE=> Execute(portal=null,limit=1) +23:00:30.305 (1) FE=> Sync +23:00:30.306 (1) <=BE ParseComplete [null] +23:00:30.306 (1) <=BE BindComplete [unnamed] +23:00:30.306 (1) <=BE CommandStatus(SET) +23:00:30.306 (1) <=BE ReadyForQuery(I) +23:00:30.306 (1) compatible = 90400 +23:00:30.306 (1) loglevel = 10 +23:00:30.307 (1) prepare threshold = 5 +23:00:30.309 (1) types using binary send = TIMESTAMPTZ,UUID,INT2_ARRAY,INT4_ARRAY,BYTEA,TEXT_ARRAY,TIMETZ,INT8,INT2,INT4,VARCHAR_ARRAY,INT8_ARRAY,POINT,TIMESTAMP,TIME,BOX,FLOAT4,FLOAT8,FLOAT4_ARRAY,FLOAT8_ARRAY +23:00:30.310 (1) types using binary receive = TIMESTAMPTZ,UUID,INT2_ARRAY,INT4_ARRAY,BYTEA,TEXT_ARRAY,TIMETZ,INT8,INT2,INT4,VARCHAR_ARRAY,INT8_ARRAY,POINT,DATE,TIMESTAMP,TIME,BOX,FLOAT4,FLOAT8,FLOAT4_ARRAY,FLOAT8_ARRAY +23:00:30.310 (1) integer date/time = true +23:00:30.331 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@255316f2, maxRows=0, fetchSize=0, flags=21 +23:00:30.331 (1) FE=> Parse(stmt=null,query="DROP TABLE testbatch CASCADE ",oids={}) +23:00:30.331 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.331 (1) FE=> Describe(portal=null) +23:00:30.331 (1) FE=> Execute(portal=null,limit=1) +23:00:30.331 (1) FE=> Sync +23:00:30.332 (1) <=BE ParseComplete [null] +23:00:30.332 (1) <=BE BindComplete [unnamed] +23:00:30.332 (1) <=BE NoData +23:00:30.334 (1) <=BE ErrorMessage(ERROR: table "testbatch" does not exist +Location: File: tablecmds.c, Routine: DropErrorMsgNonExistent, Line: 727 +Server SQLState: 42P01) +23:00:30.335 (1) <=BE ReadyForQuery(I) +23:00:30.335 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@4b9af9a9, maxRows=0, fetchSize=0, flags=21 +23:00:30.336 (1) FE=> Parse(stmt=null,query="CREATE TABLE testbatch (pk INTEGER, col1 INTEGER) ",oids={}) +23:00:30.336 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.336 (1) FE=> Describe(portal=null) +23:00:30.336 (1) FE=> Execute(portal=null,limit=1) +23:00:30.336 (1) FE=> Sync +23:00:30.339 (1) <=BE ParseComplete [null] +23:00:30.339 (1) <=BE BindComplete [unnamed] +23:00:30.339 (1) <=BE NoData +23:00:30.339 (1) <=BE CommandStatus(CREATE TABLE) +23:00:30.339 (1) <=BE ReadyForQuery(I) +23:00:30.339 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@5387f9e0, maxRows=0, fetchSize=0, flags=21 +23:00:30.339 (1) FE=> Parse(stmt=null,query="INSERT INTO testbatch VALUES (1, 0)",oids={}) +23:00:30.340 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.340 (1) FE=> Describe(portal=null) +23:00:30.340 (1) FE=> Execute(portal=null,limit=1) +23:00:30.340 (1) FE=> Sync +23:00:30.341 (1) <=BE ParseComplete [null] +23:00:30.341 (1) <=BE BindComplete [unnamed] +23:00:30.341 (1) <=BE NoData +23:00:30.341 (1) <=BE CommandStatus(INSERT 0 1) +23:00:30.341 (1) <=BE ReadyForQuery(I) +23:00:30.341 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@6e5e91e4, maxRows=0, fetchSize=0, flags=21 +23:00:30.341 (1) FE=> Parse(stmt=null,query="DROP TABLE prep CASCADE ",oids={}) +23:00:30.341 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.341 (1) FE=> Describe(portal=null) +23:00:30.342 (1) FE=> Execute(portal=null,limit=1) +23:00:30.342 (1) FE=> Sync +23:00:30.343 (1) <=BE ParseComplete [null] +23:00:30.343 (1) <=BE BindComplete [unnamed] +23:00:30.343 (1) <=BE NoData +23:00:30.344 (1) <=BE CommandStatus(DROP TABLE) +23:00:30.344 (1) <=BE ReadyForQuery(I) +23:00:30.344 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@2cdf8d8a, maxRows=0, fetchSize=0, flags=21 +23:00:30.344 (1) FE=> Parse(stmt=null,query="CREATE TABLE prep (a integer, b integer) ",oids={}) +23:00:30.344 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.344 (1) FE=> Describe(portal=null) +23:00:30.344 (1) FE=> Execute(portal=null,limit=1) +23:00:30.344 (1) FE=> Sync +23:00:30.345 (1) <=BE ParseComplete [null] +23:00:30.345 (1) <=BE BindComplete [unnamed] +23:00:30.345 (1) <=BE NoData +23:00:30.345 (1) <=BE CommandStatus(CREATE TABLE) +23:00:30.346 (1) <=BE ReadyForQuery(I) +23:00:30.346 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@30946e09, maxRows=0, fetchSize=0, flags=1 +23:00:30.346 (1) FE=> Parse(stmt=null,query="BEGIN",oids={}) +23:00:30.346 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.346 (1) FE=> Execute(portal=null,limit=0) +23:00:30.347 (1) FE=> Parse(stmt=null,query="BEGIN",oids={}) +23:00:30.347 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.347 (1) FE=> Describe(portal=null) +23:00:30.347 (1) FE=> Execute(portal=null,limit=0) +23:00:30.347 (1) FE=> Sync +23:00:30.348 (1) <=BE ParseComplete [null] +23:00:30.348 (1) <=BE BindComplete [unnamed] +23:00:30.348 (1) <=BE CommandStatus(BEGIN) +23:00:30.348 (1) <=BE ParseComplete [null] +23:00:30.348 (1) <=BE BindComplete [unnamed] +23:00:30.348 (1) <=BE NoData +23:00:30.348 (1) <=BE NoticeResponse(WARNING: there is already a transaction in progress +Location: File: xact.c, Routine: BeginTransactionBlock, Line: 3279 +Server SQLState: 25001) +23:00:30.348 (1) <=BE CommandStatus(BEGIN) +23:00:30.348 (1) <=BE ReadyForQuery(T) +23:00:30.351 (1) batch execute 6 queries, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$BatchResultHandler@5cb0d902, maxRows=0, fetchSize=0, flags=516 +23:00:30.351 (1) FE=> Parse(stmt=S_1,query="insert into prep(a,b) values($1::int4,$2)",oids={23,23}) +23:00:30.351 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:00:30.351 (1) FE=> Describe(portal=null) +23:00:30.351 (1) FE=> Execute(portal=null,limit=1) +23:00:30.351 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:00:30.351 (1) FE=> Describe(portal=null) +23:00:30.351 (1) FE=> Execute(portal=null,limit=1) +23:00:30.352 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:00:30.352 (1) FE=> Describe(portal=null) +23:00:30.352 (1) FE=> Execute(portal=null,limit=1) +23:00:30.352 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:00:30.352 (1) FE=> Describe(portal=null) +23:00:30.352 (1) FE=> Execute(portal=null,limit=1) +23:00:30.352 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:00:30.352 (1) FE=> Describe(portal=null) +23:00:30.352 (1) FE=> Execute(portal=null,limit=1) +23:00:30.352 (1) FE=> Parse(stmt=S_2,query="insert into prep(a,b) values($1::int4,$2)",oids={1043,23}) +23:00:30.353 (1) FE=> Bind(stmt=S_2,portal=null,$1=<'1'>,$2=<2>) +23:00:30.353 (1) FE=> Describe(portal=null) +23:00:30.353 (1) FE=> Execute(portal=null,limit=1) +23:00:30.353 (1) FE=> Sync +23:00:30.354 (1) <=BE ParseComplete [S_2] +23:00:30.354 (1) <=BE BindComplete [unnamed] +23:00:30.354 (1) <=BE NoData +23:00:30.354 (1) <=BE CommandStatus(INSERT 0 1) +23:00:30.355 (1) <=BE BindComplete [unnamed] +23:00:30.355 (1) <=BE NoData +23:00:30.355 (1) <=BE CommandStatus(INSERT 0 1) +23:00:30.355 (1) <=BE BindComplete [unnamed] +23:00:30.355 (1) <=BE NoData +23:00:30.355 (1) <=BE CommandStatus(INSERT 0 1) +23:00:30.355 (1) <=BE BindComplete [unnamed] +23:00:30.355 (1) <=BE NoData +23:00:30.355 (1) <=BE CommandStatus(INSERT 0 1) +23:00:30.355 (1) <=BE BindComplete [unnamed] +23:00:30.355 (1) <=BE NoData +23:00:30.356 (1) <=BE CommandStatus(INSERT 0 1) +23:00:30.356 (1) <=BE ParseComplete [S_2] +23:00:30.356 (1) <=BE BindComplete [unnamed] +23:00:30.356 (1) <=BE NoData +23:00:30.356 (1) <=BE CommandStatus(INSERT 0 1) +23:00:30.356 (1) <=BE ReadyForQuery(T) +23:00:30.356 (1) batch execute 1 queries, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$BatchResultHandler@5ef04b5, maxRows=0, fetchSize=0, flags=516 +23:00:30.356 (1) FE=> CloseStatement(S_2) +23:00:30.356 (1) FE=> Bind(stmt=S_2,portal=null,$1=<'2'>,$2=<2>) +23:00:30.356 (1) FE=> Describe(portal=null) +23:00:30.356 (1) FE=> Execute(portal=null,limit=1) +23:00:30.357 (1) FE=> Sync +23:00:30.357 (1) <=BE CloseComplete +23:00:30.357 (1) <=BE ErrorMessage(ERROR: prepared statement "S_2" does not exist +Location: File: prepare.c, Routine: FetchPreparedStatement, Line: 505 +Server SQLState: 26000) +23:00:30.358 (1) <=BE ReadyForQuery(E) +23:00:30.358 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Connection$TransactionCommandHandler@5f4da5c3, maxRows=0, fetchSize=0, flags=22 +23:00:30.358 (1) FE=> Parse(stmt=S_3,query="COMMIT",oids={}) +23:00:30.358 (1) FE=> Bind(stmt=S_3,portal=null) +23:00:30.358 (1) FE=> Execute(portal=null,limit=1) +23:00:30.358 (1) FE=> Sync +23:00:30.359 (1) <=BE ParseComplete [S_3] +23:00:30.359 (1) <=BE BindComplete [unnamed] +23:00:30.359 (1) <=BE CommandStatus(ROLLBACK) +23:00:30.359 (1) <=BE ReadyForQuery(I) +23:00:30.359 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@14514713, maxRows=0, fetchSize=0, flags=21 +23:00:30.359 (1) FE=> Parse(stmt=null,query="DROP TABLE testbatch CASCADE ",oids={}) +23:00:30.359 (1) FE=> Bind(stmt=null,portal=null) +23:00:30.359 (1) FE=> Describe(portal=null) +23:00:30.359 (1) FE=> Execute(portal=null,limit=1) +23:00:30.359 (1) FE=> Sync +23:00:30.360 (1) <=BE ParseComplete [null] +23:00:30.360 (1) <=BE BindComplete [unnamed] +23:00:30.360 (1) <=BE NoData +23:00:30.361 (1) <=BE CommandStatus(DROP TABLE) +23:00:30.361 (1) <=BE ReadyForQuery(I) +23:00:30.361 (1) FE=> Terminate + +org.postgresql.util.PSQLException: ERROR: prepared statement "S_2" does not exist +Location: File: prepare.c, Routine: FetchPreparedStatement, Line: 505 +Server SQLState: 26000 + +at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2183) +at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:1912) +at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:338) +at org.postgresql.jdbc2.AbstractJdbc2Statement.executeBatch(AbstractJdbc2Statement.java:2959) +at org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes(BatchExecuteTest.java:457) + */ + + /* + Trace after the fix: +23:15:33.776 (1) PostgreSQL 9.4 JDBC4.1 (build 1206) +23:15:33.785 (1) Trying to establish a protocol version 3 connection to localhost:5432 +23:15:33.804 (1) Receive Buffer Size is 408300 +23:15:33.804 (1) Send Buffer Size is 146988 +23:15:33.813 (1) FE=> StartupPacket(user=postgres, database=vladimirsitnikov, client_encoding=UTF8, DateStyle=ISO, TimeZone=Europe/Volgograd, extra_float_digits=2) +23:15:33.816 (1) <=BE AuthenticationOk +23:15:33.827 (1) <=BE ParameterStatus(application_name = ) +23:15:33.827 (1) <=BE ParameterStatus(client_encoding = UTF8) +23:15:33.827 (1) <=BE ParameterStatus(DateStyle = ISO, DMY) +23:15:33.827 (1) <=BE ParameterStatus(integer_datetimes = on) +23:15:33.827 (1) <=BE ParameterStatus(IntervalStyle = postgres) +23:15:33.827 (1) <=BE ParameterStatus(is_superuser = on) +23:15:33.827 (1) <=BE ParameterStatus(server_encoding = SQL_ASCII) +23:15:33.828 (1) <=BE ParameterStatus(server_version = 9.4.5) +23:15:33.828 (1) <=BE ParameterStatus(session_authorization = postgres) +23:15:33.828 (1) <=BE ParameterStatus(standard_conforming_strings = on) +23:15:33.828 (1) <=BE ParameterStatus(TimeZone = Europe/Volgograd) +23:15:33.828 (1) <=BE BackendKeyData(pid=82726,ckey=1081936502) +23:15:33.828 (1) <=BE ReadyForQuery(I) +23:15:33.832 (1) simple execute, handler=org.postgresql.core.SetupQueryRunner$SimpleResultHandler@531d72ca, maxRows=0, fetchSize=0, flags=23 +23:15:33.832 (1) FE=> Parse(stmt=null,query="SET extra_float_digits = 3",oids={}) +23:15:33.833 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.833 (1) FE=> Execute(portal=null,limit=1) +23:15:33.834 (1) FE=> Sync +23:15:33.836 (1) <=BE ParseComplete [null] +23:15:33.836 (1) <=BE BindComplete [unnamed] +23:15:33.836 (1) <=BE CommandStatus(SET) +23:15:33.836 (1) <=BE ReadyForQuery(I) +23:15:33.837 (1) compatible = 90400 +23:15:33.837 (1) loglevel = 10 +23:15:33.837 (1) prepare threshold = 5 +23:15:33.839 (1) types using binary send = TIMESTAMPTZ,UUID,INT2_ARRAY,INT4_ARRAY,BYTEA,TEXT_ARRAY,TIMETZ,INT8,INT2,INT4,VARCHAR_ARRAY,INT8_ARRAY,POINT,TIMESTAMP,TIME,BOX,FLOAT4,FLOAT8,FLOAT4_ARRAY,FLOAT8_ARRAY +23:15:33.841 (1) types using binary receive = TIMESTAMPTZ,UUID,INT2_ARRAY,INT4_ARRAY,BYTEA,TEXT_ARRAY,TIMETZ,INT8,INT2,INT4,VARCHAR_ARRAY,INT8_ARRAY,POINT,DATE,TIMESTAMP,TIME,BOX,FLOAT4,FLOAT8,FLOAT4_ARRAY,FLOAT8_ARRAY +23:15:33.841 (1) integer date/time = true +23:15:33.899 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@255316f2, maxRows=0, fetchSize=0, flags=21 +23:15:33.899 (1) FE=> Parse(stmt=null,query="DROP TABLE testbatch CASCADE ",oids={}) +23:15:33.899 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.899 (1) FE=> Describe(portal=null) +23:15:33.900 (1) FE=> Execute(portal=null,limit=1) +23:15:33.900 (1) FE=> Sync +23:15:33.900 (1) <=BE ParseComplete [null] +23:15:33.900 (1) <=BE BindComplete [unnamed] +23:15:33.900 (1) <=BE NoData +23:15:33.905 (1) <=BE ErrorMessage(ERROR: table "testbatch" does not exist +Location: File: tablecmds.c, Routine: DropErrorMsgNonExistent, Line: 727 +Server SQLState: 42P01) +23:15:33.906 (1) <=BE ReadyForQuery(I) +23:15:33.906 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@4b9af9a9, maxRows=0, fetchSize=0, flags=21 +23:15:33.906 (1) FE=> Parse(stmt=null,query="CREATE TABLE testbatch (pk INTEGER, col1 INTEGER) ",oids={}) +23:15:33.907 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.907 (1) FE=> Describe(portal=null) +23:15:33.907 (1) FE=> Execute(portal=null,limit=1) +23:15:33.907 (1) FE=> Sync +23:15:33.911 (1) <=BE ParseComplete [null] +23:15:33.912 (1) <=BE BindComplete [unnamed] +23:15:33.912 (1) <=BE NoData +23:15:33.912 (1) <=BE CommandStatus(CREATE TABLE) +23:15:33.912 (1) <=BE ReadyForQuery(I) +23:15:33.912 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@5387f9e0, maxRows=0, fetchSize=0, flags=21 +23:15:33.912 (1) FE=> Parse(stmt=null,query="INSERT INTO testbatch VALUES (1, 0)",oids={}) +23:15:33.913 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.913 (1) FE=> Describe(portal=null) +23:15:33.913 (1) FE=> Execute(portal=null,limit=1) +23:15:33.913 (1) FE=> Sync +23:15:33.914 (1) <=BE ParseComplete [null] +23:15:33.914 (1) <=BE BindComplete [unnamed] +23:15:33.914 (1) <=BE NoData +23:15:33.914 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.914 (1) <=BE ReadyForQuery(I) +23:15:33.914 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@6e5e91e4, maxRows=0, fetchSize=0, flags=21 +23:15:33.914 (1) FE=> Parse(stmt=null,query="DROP TABLE prep CASCADE ",oids={}) +23:15:33.914 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.914 (1) FE=> Describe(portal=null) +23:15:33.914 (1) FE=> Execute(portal=null,limit=1) +23:15:33.915 (1) FE=> Sync +23:15:33.916 (1) <=BE ParseComplete [null] +23:15:33.916 (1) <=BE BindComplete [unnamed] +23:15:33.916 (1) <=BE NoData +23:15:33.917 (1) <=BE CommandStatus(DROP TABLE) +23:15:33.917 (1) <=BE ReadyForQuery(I) +23:15:33.917 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@2cdf8d8a, maxRows=0, fetchSize=0, flags=21 +23:15:33.917 (1) FE=> Parse(stmt=null,query="CREATE TABLE prep (a integer, b integer) ",oids={}) +23:15:33.917 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.917 (1) FE=> Describe(portal=null) +23:15:33.917 (1) FE=> Execute(portal=null,limit=1) +23:15:33.917 (1) FE=> Sync +23:15:33.919 (1) <=BE ParseComplete [null] +23:15:33.919 (1) <=BE BindComplete [unnamed] +23:15:33.919 (1) <=BE NoData +23:15:33.919 (1) <=BE CommandStatus(CREATE TABLE) +23:15:33.919 (1) <=BE ReadyForQuery(I) +23:15:33.919 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@30946e09, maxRows=0, fetchSize=0, flags=1 +23:15:33.919 (1) FE=> Parse(stmt=null,query="BEGIN",oids={}) +23:15:33.920 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.920 (1) FE=> Execute(portal=null,limit=0) +23:15:33.920 (1) FE=> Parse(stmt=null,query="BEGIN",oids={}) +23:15:33.920 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.920 (1) FE=> Describe(portal=null) +23:15:33.920 (1) FE=> Execute(portal=null,limit=0) +23:15:33.921 (1) FE=> Sync +23:15:33.921 (1) <=BE ParseComplete [null] +23:15:33.921 (1) <=BE BindComplete [unnamed] +23:15:33.921 (1) <=BE CommandStatus(BEGIN) +23:15:33.921 (1) <=BE ParseComplete [null] +23:15:33.921 (1) <=BE BindComplete [unnamed] +23:15:33.921 (1) <=BE NoData +23:15:33.921 (1) <=BE NoticeResponse(WARNING: there is already a transaction in progress +Location: File: xact.c, Routine: BeginTransactionBlock, Line: 3279 +Server SQLState: 25001) +23:15:33.922 (1) <=BE CommandStatus(BEGIN) +23:15:33.922 (1) <=BE ReadyForQuery(T) +23:15:33.924 (1) batch execute 6 queries, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$BatchResultHandler@5cb0d902, maxRows=0, fetchSize=0, flags=516 +23:15:33.924 (1) FE=> Parse(stmt=S_1,query="insert into prep(a,b) values($1::int4,$2)",oids={23,23}) +23:15:33.925 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:15:33.925 (1) FE=> Describe(portal=null) +23:15:33.925 (1) FE=> Execute(portal=null,limit=1) +23:15:33.925 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:15:33.925 (1) FE=> Describe(portal=null) +23:15:33.925 (1) FE=> Execute(portal=null,limit=1) +23:15:33.925 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:15:33.925 (1) FE=> Describe(portal=null) +23:15:33.925 (1) FE=> Execute(portal=null,limit=1) +23:15:33.925 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:15:33.926 (1) FE=> Describe(portal=null) +23:15:33.926 (1) FE=> Execute(portal=null,limit=1) +23:15:33.926 (1) FE=> Bind(stmt=S_1,portal=null,$1=<2>,$2=<2>) +23:15:33.926 (1) FE=> Describe(portal=null) +23:15:33.926 (1) FE=> Execute(portal=null,limit=1) +23:15:33.926 (1) FE=> CloseStatement(S_1) +23:15:33.926 (1) FE=> Parse(stmt=S_2,query="insert into prep(a,b) values($1::int4,$2)",oids={1043,23}) +23:15:33.926 (1) FE=> Bind(stmt=S_2,portal=null,$1=<'1'>,$2=<2>) +23:15:33.927 (1) FE=> Describe(portal=null) +23:15:33.927 (1) FE=> Execute(portal=null,limit=1) +23:15:33.927 (1) FE=> Sync +23:15:33.928 (1) <=BE ParseComplete [S_2] +23:15:33.928 (1) <=BE BindComplete [unnamed] +23:15:33.928 (1) <=BE NoData +23:15:33.928 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.928 (1) <=BE BindComplete [unnamed] +23:15:33.928 (1) <=BE NoData +23:15:33.928 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.929 (1) <=BE BindComplete [unnamed] +23:15:33.929 (1) <=BE NoData +23:15:33.929 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.929 (1) <=BE BindComplete [unnamed] +23:15:33.929 (1) <=BE NoData +23:15:33.929 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.929 (1) <=BE BindComplete [unnamed] +23:15:33.929 (1) <=BE NoData +23:15:33.929 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.929 (1) <=BE CloseComplete +23:15:33.929 (1) <=BE ParseComplete [S_2] +23:15:33.929 (1) <=BE BindComplete [unnamed] +23:15:33.929 (1) <=BE NoData +23:15:33.930 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.930 (1) <=BE ReadyForQuery(T) +23:15:33.930 (1) batch execute 1 queries, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$BatchResultHandler@5ef04b5, maxRows=0, fetchSize=0, flags=516 +23:15:33.930 (1) FE=> Bind(stmt=S_2,portal=null,$1=<'2'>,$2=<2>) +23:15:33.930 (1) FE=> Describe(portal=null) +23:15:33.930 (1) FE=> Execute(portal=null,limit=1) +23:15:33.930 (1) FE=> Sync +23:15:33.930 (1) <=BE BindComplete [unnamed] +23:15:33.931 (1) <=BE NoData +23:15:33.931 (1) <=BE CommandStatus(INSERT 0 1) +23:15:33.931 (1) <=BE ReadyForQuery(T) +23:15:33.931 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@5f4da5c3, maxRows=0, fetchSize=0, flags=1 +23:15:33.931 (1) FE=> Parse(stmt=null,query="COMMIT",oids={}) +23:15:33.931 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.931 (1) FE=> Describe(portal=null) +23:15:33.931 (1) FE=> Execute(portal=null,limit=0) +23:15:33.931 (1) FE=> Sync +23:15:33.932 (1) <=BE ParseComplete [null] +23:15:33.932 (1) <=BE BindComplete [unnamed] +23:15:33.932 (1) <=BE NoData +23:15:33.932 (1) <=BE CommandStatus(COMMIT) +23:15:33.932 (1) <=BE ReadyForQuery(I) +23:15:33.932 (1) simple execute, handler=org.postgresql.jdbc2.AbstractJdbc2Statement$StatementResultHandler@443b7951, maxRows=0, fetchSize=0, flags=21 +23:15:33.932 (1) FE=> Parse(stmt=null,query="DROP TABLE testbatch CASCADE ",oids={}) +23:15:33.933 (1) FE=> Bind(stmt=null,portal=null) +23:15:33.933 (1) FE=> Describe(portal=null) +23:15:33.933 (1) FE=> Execute(portal=null,limit=1) +23:15:33.933 (1) FE=> Sync +23:15:33.934 (1) <=BE ParseComplete [null] +23:15:33.934 (1) <=BE BindComplete [unnamed] +23:15:33.934 (1) <=BE NoData +23:15:33.934 (1) <=BE CommandStatus(DROP TABLE) +23:15:33.934 (1) <=BE ReadyForQuery(I) +23:15:33.934 (1) FE=> Terminate +<<<<<<< HEAD + */ + } + + @Test + public void testSmallBatchUpdateFailureSimple() throws SQLException { + con.setAutoCommit(true); + + // update as batch + PreparedStatement batchSt = con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)"); + batchSt.setString(1, "key-1"); + batchSt.addBatch(); + + batchSt.setString(1, "key-2"); + batchSt.addBatch(); + + int[] batchResult; + try { + batchResult = batchSt.executeBatch(); + Assert.fail("Expecting BatchUpdateException as key-2 is duplicated in batchUpdCnt.id. " + + " executeBatch returned " + Arrays.toString(batchResult)); + } catch (BatchUpdateException ex) { + batchResult = ex.getUpdateCounts(); + } finally { + TestUtil.closeQuietly(batchSt); + } + + int newCount = getBatchUpdCount(); + if (newCount == 2) { + // key-1 did succeed + Assert.assertTrue("batchResult[0] should be 1 or SUCCESS_NO_INFO since 'key-1' was inserted," + + " actual result is " + Arrays.toString(batchResult), + batchResult[0] == 1 || batchResult[0] == Statement.SUCCESS_NO_INFO); + } else { + Assert.assertTrue("batchResult[0] should be 0 or EXECUTE_FAILED since 'key-1' was NOT inserted," + + " actual result is " + Arrays.toString(batchResult), + batchResult[0] == 0 || batchResult[0] == Statement.EXECUTE_FAILED); + } + + Assert.assertEquals("'key-2' insertion should have Assert.failed", + Statement.EXECUTE_FAILED, batchResult[1]); + } + + private int getBatchUpdCount() throws SQLException { + PreparedStatement ps = con.prepareStatement("select count(*) from batchUpdCnt"); + ResultSet rs = ps.executeQuery(); + Assert.assertTrue("count(*) must return 1 row", rs.next()); + return rs.getInt(1); + } + + /** + * Check batching using two individual statements that are both the same type. + * Test coverage to check default behaviour is not broken. + * @throws SQLException for issues during test + */ + @Test + public void testBatchWithRepeatedInsertStatement() throws SQLException { + PreparedStatement pstmt = null; + /* Optimization to re-write insert statements is disabled by default. + * Do nothing here. + */ + try { + pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?)"); + pstmt.setInt(1, 1); + pstmt.setInt(2, 1); + pstmt.addBatch(); //statement one + pstmt.setInt(1, 2); + pstmt.setInt(2, 2); + pstmt.addBatch();//statement two + int[] outcome = pstmt.executeBatch(); + + Assert.assertNotNull(outcome); + Assert.assertEquals(2, outcome.length); + int rowsInserted = insertRewrite ? Statement.SUCCESS_NO_INFO : 1; + Assert.assertEquals(rowsInserted, outcome[0]); + Assert.assertEquals(rowsInserted, outcome[1]); + } catch (SQLException sqle) { + Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test case to make sure the update counter is correct for the + * one statement executed. Test coverage to check default behaviour is + * not broken. + * @throws SQLException for issues during test + */ + @Test + public void testBatchWithMultiInsert() throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?),(?,?)"); + pstmt.setInt(1, 1); + pstmt.setInt(2, 1); + pstmt.setInt(3, 2); + pstmt.setInt(4, 2); + pstmt.addBatch();//statement one + int[] outcome = pstmt.executeBatch(); + Assert.assertNotNull(outcome); + Assert.assertEquals(1, outcome.length); + Assert.assertEquals(2, outcome[0]); + } catch (SQLException sqle) { + Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test case to make sure the update counter is correct for the + * two double-row statements executed. Test coverage to check default behaviour is + * not broken. + * @throws SQLException for issues during test + */ + @Test + public void testBatchWithTwoMultiInsertStatements() throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?),(?,?)"); + pstmt.setInt(1, 1); + pstmt.setInt(2, 1); + pstmt.setInt(3, 2); + pstmt.setInt(4, 2); + pstmt.addBatch(); //statement one + pstmt.setInt(1, 3); + pstmt.setInt(2, 3); + pstmt.setInt(3, 4); + pstmt.setInt(4, 4); + pstmt.addBatch(); //statement two + int[] outcome = pstmt.executeBatch(); + int rowsInserted = insertRewrite ? Statement.SUCCESS_NO_INFO : 2; + Assert.assertEquals( + "Inserting two multi-valued statements with two rows each. Expecting {2, 2} rows inserted (or SUCCESS_NO_INFO)", + Arrays.toString(new int[]{rowsInserted, rowsInserted}), + Arrays.toString(outcome)); + } catch (SQLException sqle) { + Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + public static void assertSimpleInsertBatch(int n, int[] actual) { + int[] expected = new int[n]; + Arrays.fill(expected, 1); + assertBatchResult(n + " addBatch, 1 row each", expected, actual); + } + + public static void assertBatchResult(String message, int[] expected, int[] actual) { + int[] clone = expected.clone(); + boolean hasChanges = false; + for (int i = 0; i < actual.length; i++) { + int a = actual[i]; + if (a == Statement.SUCCESS_NO_INFO && expected[i] >= 0) { + clone[i] = a; + hasChanges = true; + } + } + if (hasChanges) { + message += ", original expectation: " + Arrays.toString(expected); + } + Assert.assertEquals( + message, + Arrays.toString(clone), + Arrays.toString(actual)); + } + + @Test + public void testServerPrepareMultipleRows() throws SQLException { + PreparedStatement ps = null; + try { + ps = con.prepareStatement("INSERT INTO prep(a) VALUES (?)"); + // 2 is not enough for insertRewrite=true case since it would get executed as a single multi-insert statement + for (int i = 0; i < 3; i++) { + ps.setInt(1, i); + ps.addBatch(); + } + int[] actual = ps.executeBatch(); + Assert.assertTrue( + "More than 1 row is inserted via executeBatch, it should lead to multiple server statements, thus the statements should be server-prepared", + ((PGStatement) ps).isUseServerPrepare()); + assertBatchResult("3 rows inserted via batch", new int[]{1, 1, 1}, actual); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testNoServerPrepareOneRow() throws SQLException { + PreparedStatement ps = null; + try { + ps = con.prepareStatement("INSERT INTO prep(a) VALUES (?)"); + ps.setInt(1, 1); + ps.addBatch(); + int[] actual = ps.executeBatch(); + int prepareThreshold = ((PGStatement) ps).getPrepareThreshold(); + if (prepareThreshold == 1) { + Assert.assertTrue( + "prepareThreshold=" + prepareThreshold + + " thus the statement should be server-prepared", + ((PGStatement) ps).isUseServerPrepare()); + } else { + Assert.assertFalse( + "Just one row inserted via executeBatch, prepareThreshold=" + prepareThreshold + + " thus the statement should not be server-prepared", + ((PGStatement) ps).isUseServerPrepare()); + } + assertBatchResult("1 rows inserted via batch", new int[]{1}, actual); + } finally { + TestUtil.closeQuietly(ps); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchFailureTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchFailureTest.java new file mode 100644 index 0000000..3d25615 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchFailureTest.java @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.BatchUpdateException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; + +@RunWith(Parameterized.class) +public class BatchFailureTest extends BaseTest4 { + private final BatchType batchType; + private final AutoCommit autoCommit; + private final FailMode failMode; + private final FailPosition failPosition; + private final BinaryMode binaryMode; + private final boolean insertRewrite; + + enum BatchType { + SIMPLE { + @Override + public Statement createStatement(Connection con) throws SQLException { + return con.createStatement(); + } + }, + PREPARED { + @Override + public Statement createStatement(Connection con) throws SQLException { + return con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)"); + } + }, + PREPARED_WITH_GENERATED { + @Override + public Statement createStatement(Connection con) throws SQLException { + return con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)", new String[]{"id"}); + } + }; + + public abstract Statement createStatement(Connection con) throws SQLException; + + public void addRow(Statement statement, String value) throws SQLException { + switch (this) { + case SIMPLE: + statement.addBatch("INSERT INTO batchUpdCnt(id) VALUES ('" + value + "')"); + break; + case PREPARED: + case PREPARED_WITH_GENERATED: + PreparedStatement ps = (PreparedStatement) statement; + ps.setString(1, value); + ps.addBatch(); + break; + } + } + } + + private enum FailMode { + NO_FAIL_JUST_INSERTS, NO_FAIL_SELECT, + FAIL_VIA_SELECT_PARSE, FAIL_VIA_SELECT_RUNTIME, + FAIL_VIA_DUP_KEY; + + public boolean supports(BatchType batchType) { + return batchType != BatchType.SIMPLE ^ this.name().contains("SELECT"); + } + + public void injectFailure(Statement statement, BatchType batchType) throws SQLException { + switch (this) { + case NO_FAIL_JUST_INSERTS: + break; + case NO_FAIL_SELECT: + statement.addBatch("select 1 union all select 2"); + break; + case FAIL_VIA_SELECT_RUNTIME: + statement.addBatch("select 0/count(*) where 1=2"); + break; + case FAIL_VIA_SELECT_PARSE: + statement.addBatch("seeeeleeeect 1"); + break; + case FAIL_VIA_DUP_KEY: + batchType.addRow(statement, "key-2"); + break; + default: + throw new IllegalArgumentException("Unexpected value " + this); + } + } + } + + private enum FailPosition { + NONE, FIRST_ROW, SECOND_ROW, MIDDLE, ALMOST_LAST_ROW, LAST_ROW; + + public boolean supports(FailMode mode) { + return this == NONE ^ mode.name().startsWith("FAIL"); + } + } + + public BatchFailureTest(BatchType batchType, AutoCommit autoCommit, + FailMode failMode, FailPosition failPosition, BinaryMode binaryMode, + boolean insertRewrite) { + this.batchType = batchType; + this.autoCommit = autoCommit; + this.failMode = failMode; + this.failPosition = failPosition; + this.binaryMode = binaryMode; + this.insertRewrite = insertRewrite; + } + + @Parameterized.Parameters(name = "{index}: batchTest(mode={2}, position={3}, autoCommit={1}, batchType={0}, generateKeys={1}, binary={4}, insertRewrite={5})") + public static Iterable data() { + Collection ids = new ArrayList<>(); + boolean[] booleans = new boolean[]{true, false}; + for (BatchType batchType : BatchType.values()) { + for (FailMode failMode : FailMode.values()) { + if (!failMode.supports(batchType)) { + continue; + } + for (FailPosition failPosition : FailPosition.values()) { + if (!failPosition.supports(failMode)) { + continue; + } + for (AutoCommit autoCommit : AutoCommit.values()) { + for (BinaryMode binaryMode : BinaryMode.values()) { + for (boolean insertRewrite : booleans) { + ids.add(new Object[]{batchType, autoCommit, failMode, failPosition, binaryMode, insertRewrite}); + } + } + } + } + } + } + return ids; + } + + @Override + protected void updateProperties(Properties props) { + if (binaryMode == BinaryMode.FORCE) { + forceBinary(props); + } + PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTempTable(con, "batchUpdCnt", "id varchar(512) primary key, data varchar(512)"); + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO batchUpdCnt(id) VALUES ('key-2')"); + stmt.close(); + con.setAutoCommit(autoCommit == AutoCommit.YES); + } + + @Test + public void run() throws SQLException { + Statement statement = batchType.createStatement(con); + + int minBatchResults = 0; + int pos = 0; + if (failPosition == FailPosition.FIRST_ROW) { + failMode.injectFailure(statement, batchType); + pos++; + minBatchResults = pos; + } + + batchType.addRow(statement, "key-1"); + pos++; + + if (failPosition == FailPosition.SECOND_ROW) { + failMode.injectFailure(statement, batchType); + pos++; + minBatchResults = pos; + } + + for (int i = 0; i < 1000; i++) { + batchType.addRow(statement, "key_" + i); + pos++; + if (failPosition == FailPosition.ALMOST_LAST_ROW && i == 997 + || failPosition == FailPosition.MIDDLE && i == 500) { + failMode.injectFailure(statement, batchType); + pos++; + minBatchResults = pos; + } + } + + if (failPosition == FailPosition.LAST_ROW) { + failMode.injectFailure(statement, batchType); + pos++; + minBatchResults = pos; + } + + List keys = new ArrayList<>(); + int[] batchResult; + int expectedRows = 1; + try { + batchResult = statement.executeBatch(); + Assert.assertTrue("Expecting BatchUpdateException due to " + failMode + + ", executeBatch returned " + Arrays.toString(batchResult), + failPosition == FailPosition.NONE); + expectedRows = pos + 1; // +1 since key-2 is already in the DB + } catch (BatchUpdateException ex) { + batchResult = ex.getUpdateCounts(); + Assert.assertTrue("Should not fail since fail mode should be " + failMode + + ", executeBatch returned " + Arrays.toString(batchResult), + failPosition != FailPosition.NONE); + + for (int i : batchResult) { + if (i != Statement.EXECUTE_FAILED) { + expectedRows++; + } + } + + Assert.assertTrue("Batch should fail at row " + minBatchResults + + ", thus at least " + minBatchResults + + " items should be returned, actual result is " + batchResult.length + " items, " + + Arrays.toString(batchResult), + batchResult.length >= minBatchResults); + } finally { + if (batchType == BatchType.PREPARED_WITH_GENERATED) { + ResultSet rs = statement.getGeneratedKeys(); + while (rs.next()) { + keys.add(rs.getString(1)); + } + } + statement.close(); + } + + if (!con.getAutoCommit()) { + con.commit(); + } + + int finalCount = getBatchUpdCount(); + Assert.assertEquals( + "Number of new rows in batchUpdCnt should match number of non-error batchResult items" + + Arrays.toString(batchResult), + expectedRows - 1, finalCount - 1); + + if (batchType != BatchType.PREPARED_WITH_GENERATED) { + return; + } + + if (finalCount > 1) { + Assert.assertFalse((finalCount - 1) + " rows were inserted, thus expecting generated keys", + keys.isEmpty()); + } + Set uniqueKeys = new HashSet<>(keys); + Assert.assertEquals("Generated keys should be unique: " + keys, keys.size(), uniqueKeys.size()); + Assert.assertEquals("Number of generated keys should match the number of inserted rows" + keys, + keys.size(), finalCount - 1); + } + + private int getBatchUpdCount() throws SQLException { + PreparedStatement ps = con.prepareStatement("select count(*) from batchUpdCnt"); + ResultSet rs = ps.executeQuery(); + Assert.assertTrue("count(*) must return 1 row", rs.next()); + return rs.getInt(1); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchedInsertReWriteEnabledTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchedInsertReWriteEnabledTest.java new file mode 100644 index 0000000..b804e05 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchedInsertReWriteEnabledTest.java @@ -0,0 +1,449 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.PGProperty; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.BatchUpdateException; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class BatchedInsertReWriteEnabledTest extends BaseTest4 { + private final AutoCommit autoCommit; + + public BatchedInsertReWriteEnabledTest(AutoCommit autoCommit, + BinaryMode binaryMode) { + this.autoCommit = autoCommit; + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "{index}: autoCommit={0}, binary={1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (AutoCommit autoCommit : AutoCommit.values()) { + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{autoCommit, binaryMode}); + } + } + return ids; + } + + /* Set up the fixture for this testcase: a connection to a database with + a table for this test. */ + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "testbatch", "pk INTEGER, col1 VARCHAR, col2 INTEGER"); + con.setAutoCommit(autoCommit == AutoCommit.YES); + } + + // Tear down the fixture for this test case. + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "testbatch"); + super.tearDown(); + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.REWRITE_BATCHED_INSERTS.set(props, true); + } + + /** + * Check batching using two individual statements that are both the same type. + * Test to check the re-write optimisation behaviour. + */ + + @Test + public void testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled() + throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?)"); + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted"); + + /* + * Now check the ps can be reused. The batched statement should be reset + * and have no knowledge of prior re-written batch. This test uses a + * different batch size. To test if the driver detects the different size + * and prepares the statement on with the backend. If not then an + * exception will be thrown for an unknown prepared statement. + */ + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + pstmt.setInt(1, 7); + pstmt.setInt(2, 8); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch()); + TestUtil.assertNumberOfRows(con, "testbatch", 7, "3+4 rows inserted"); + + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + pstmt.setInt(1, 7); + pstmt.setInt(2, 8); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch()); + TestUtil.assertNumberOfRows(con, "testbatch", 11, "3+4+4 rows inserted"); + + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Check batching using a statement with fixed parameter. + */ + @Test + public void testBatchWithReWrittenBatchStatementWithFixedParameter() + throws SQLException { + String[] odd = new String[]{ + "INSERT INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx", + // "INSERT /*xxx*/INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx", + }; + for (String s : odd) { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement(s); + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.addBatch(); + pstmt.setInt(1, 5); + pstmt.setInt(2, 6); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted"); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + } + + /** + * Check batching using a statement with fixed parameters only. + */ + @Test + public void testBatchWithReWrittenBatchStatementWithFixedParametersOnly() + throws SQLException { + String[] odd = new String[]{ + "INSERT INTO testbatch VALUES (9, '1, (, $1234, a''n?d )' /*xxxx)*/, 7) -- xxx", + // "INSERT /*xxx*/INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx", + }; + for (String s : odd) { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement(s); + pstmt.addBatch(); + pstmt.addBatch(); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted"); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + } + + /** + * Test to make sure a statement with a semicolon is not broken. + */ + private void simpleRewriteBatch(String values, String suffix) + throws SQLException { + PreparedStatement pstmt = null; + try { + PreparedStatement clean = con.prepareStatement("truncate table testbatch"); + clean.execute(); + clean.close(); + + pstmt = con.prepareStatement("INSERT INTO testbatch " + values + "(?,?,?)" + suffix); + pstmt.setInt(1, 1); + pstmt.setString(2, "a"); + pstmt.setInt(3, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setString(2, "b"); + pstmt.setInt(3, 4); + pstmt.addBatch(); + pstmt.setInt(1, 5); + pstmt.setString(2, "c"); + pstmt.setInt(3, 6); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted"); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test to make sure a statement with a semicolon is not broken. + */ + @Test + public void testBatchWithReWrittenBatchStatementWithSemiColon() + throws SQLException { + simpleRewriteBatch("values", ";"); + } + + /** + * Test to make sure a statement with a semicolon is not broken. + */ + @Test + public void testBatchWithReWrittenSpaceAfterValues() + throws SQLException { + simpleRewriteBatch("values ", ""); + simpleRewriteBatch("values ", ""); + simpleRewriteBatch("values\t", ""); + } + + /** + * Test VALUES word with mixed case. + */ + @Test + public void testBatchWithReWrittenMixedCaseValues() + throws SQLException { + simpleRewriteBatch("vAlues", ""); + simpleRewriteBatch("vaLUES", ""); + simpleRewriteBatch("VALUES", ""); + } + + /** + * Test to make sure a statement with a semicolon is not broken. + */ + @Test + public void testBindsInNestedParens() + throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO testbatch VALUES ((?),((?)),?);"); + pstmt.setInt(1, 1); + pstmt.setString(2, "a"); + pstmt.setInt(3, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setString(2, "b"); + pstmt.setInt(3, 4); + pstmt.addBatch(); + pstmt.setInt(1, 5); + pstmt.setString(2, "c"); + pstmt.setInt(3, 6); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test to make sure a statement with a semicolon is not broken. + */ + @Test + public void testMultiValues1bind() + throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO testbatch (pk) VALUES (?), (?)"); + pstmt.setInt(1, 100); + pstmt.setInt(2, 200); + pstmt.addBatch(); + pstmt.setInt(1, 300); + pstmt.setInt(2, 400); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test case to check the outcome for a batch with a single row/batch is + * consistent across calls to executeBatch. Especially after a batch + * has been re-written. + */ + @Test + public void testConsistentOutcome() throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?,?);"); + pstmt.setInt(1, 1); + pstmt.setString(2, "a"); + pstmt.setInt(3, 2); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch()); + + pstmt.setInt(1, 1); + pstmt.setString(2, "b"); + pstmt.setInt(3, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setString(2, "c"); + pstmt.setInt(3, 4); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch()); + + pstmt.setInt(1, 1); + pstmt.setString(2, "d"); + pstmt.setInt(3, 2); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test to check statement with named columns still work as expected. + */ + @Test + public void testINSERTwithNamedColumnsNotBroken() throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con + .prepareStatement("INSERT INTO testbatch (pk, col1, col2) VALUES (?,?,?);"); + pstmt.setInt(1, 1); + pstmt.setString(2, "a"); + pstmt.setInt(3, 2); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + @Test + public void testMixedCaseInSeRtStatement() throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("InSeRt INTO testbatch VALUES (?,?,?);"); + pstmt.setInt(1, 1); + pstmt.setString(2, "a"); + pstmt.setInt(3, 2); + pstmt.addBatch(); + pstmt.setInt(1, 3); + pstmt.setString(2, "b"); + pstmt.setInt(3, 4); + pstmt.addBatch(); + BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch()); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + @Test + public void testReWriteDisabledForPlainBatch() throws Exception { + Statement stmt = null; + try { + con = TestUtil.openDB(new Properties()); + stmt = con.createStatement(); + stmt.addBatch("INSERT INTO testbatch VALUES (100,'a',200);"); + stmt.addBatch("INSERT INTO testbatch VALUES (300,'b',400);"); + Assert.assertEquals( + "Expected outcome not returned by batch execution. The driver" + + " allowed re-write in combination with plain statements.", + Arrays.toString(new int[]{1, 1}), Arrays.toString(stmt.executeBatch())); + } finally { + TestUtil.closeQuietly(stmt); + } + } + + @Test + public void test32767Binds() throws Exception { + testNBinds(32767); + } + + @Test + public void test32768Binds() throws Exception { + testNBinds(32768); + } + + @Test + public void test65535Binds() throws Exception { + testNBinds(65535); + } + + public void testNBinds(int nBinds) throws Exception { + PreparedStatement pstmt = null; + try { + StringBuilder sb = new StringBuilder(); + sb.append("INSERT INTO testbatch(pk) VALUES (coalesce(?"); + for (int i = 0; i < nBinds - 1 /* note one ? above */; i++) { + sb.append(",?"); + } + sb.append("))"); + pstmt = con.prepareStatement(sb.toString()); + for (int k = 0; k < 2; k++) { + for (int i = 1; i <= nBinds; i++) { + pstmt.setInt(i, i + k * nBinds); + } + pstmt.addBatch(); + } + if (nBinds * 2 <= 65535 || preferQueryMode == PreferQueryMode.SIMPLE) { + Assert.assertEquals( + "Insert with " + nBinds + " binds should be rewritten into multi-value insert" + + ", so expecting Statement.SUCCESS_NO_INFO == -2", + Arrays.toString(new int[]{Statement.SUCCESS_NO_INFO, Statement.SUCCESS_NO_INFO}), + Arrays.toString(pstmt.executeBatch())); + } else { + Assert.assertEquals( + "Insert with " + nBinds + " binds can't be rewritten into multi-value insert" + + " since write format allows 65535 binds maximum" + + ", so expecting batch to be executed as individual statements", + Arrays.toString(new int[]{1, 1}), + Arrays.toString(pstmt.executeBatch())); + } + } catch (BatchUpdateException be) { + SQLException e = be; + while (true) { + e.printStackTrace(); + SQLException next = e.getNextException(); + if (next == null) { + break; + } + e = next; + } + throw e; + } finally { + TestUtil.closeQuietly(pstmt); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTest.java new file mode 100644 index 0000000..2f835d9 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTest.java @@ -0,0 +1,538 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.largeobject.LargeObject; +import org.postgresql.largeobject.LargeObjectManager; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; + +/** + * Some simple tests based on problems reported by users. Hopefully these will help prevent previous + * problems from re-occurring ;-) + */ +class BlobTest { + private static final String TEST_FILE = "/test-file.xml"; + + private static final int LOOP = 0; // LargeObject API using loop + private static final int NATIVE_STREAM = 1; // LargeObject API using OutputStream + + private Connection con; + + /* + Only do this once + */ + @BeforeAll + static void createLargeBlob() throws Exception { + try (Connection con = TestUtil.openDB()) { + TestUtil.createTable(con, "testblob", "id name,lo oid"); + con.setAutoCommit(false); + LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI(); + long oid = lom.createLO(LargeObjectManager.READWRITE); + LargeObject blob = lom.open(oid); + + byte[] buf = new byte[256]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) i; + } + // I want to create a large object + int i = 1024 / buf.length; + for (int j = i; j > 0; j--) { + blob.write(buf, 0, buf.length); + } + assertEquals(1024, blob.size()); + blob.close(); + try (PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)")) { + pstmt.setString(1, "l1"); + pstmt.setLong(2, oid); + pstmt.executeUpdate(); + } + con.commit(); + } + } + + @AfterAll + static void cleanup() throws Exception { + try (Connection con = TestUtil.openDB()) { + try (Statement stmt = con.createStatement()) { + stmt.execute("SELECT lo_unlink(lo) FROM testblob where id = 'l1'"); + } finally { + TestUtil.dropTable(con, "testblob"); + } + } + } + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + con.setAutoCommit(false); + } + + @AfterEach + void tearDown() throws Exception { + con.setAutoCommit(true); + try (Statement stmt = con.createStatement()) { + stmt.execute("SELECT lo_unlink(lo) FROM testblob where id != 'l1'"); + stmt.execute("delete from testblob where id != 'l1'"); + } finally { + TestUtil.closeDB(con); + } + } + + @Test + void setNull() throws Exception { + try (PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(lo) VALUES (?)")) { + + pstmt.setBlob(1, (Blob) null); + pstmt.executeUpdate(); + + pstmt.setNull(1, Types.BLOB); + pstmt.executeUpdate(); + + pstmt.setObject(1, null, Types.BLOB); + pstmt.executeUpdate(); + + pstmt.setClob(1, (Clob) null); + pstmt.executeUpdate(); + + pstmt.setNull(1, Types.CLOB); + pstmt.executeUpdate(); + + pstmt.setObject(1, null, Types.CLOB); + pstmt.executeUpdate(); + } + } + + @Test + void set() throws SQLException { + try (Statement stmt = con.createStatement()) { + stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))"); + ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '1'"); + assertTrue(rs.next()); + + PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)"); + + Blob blob = rs.getBlob(1); + pstmt.setString(1, "setObjectTypeBlob"); + pstmt.setObject(2, blob, Types.BLOB); + assertEquals(1, pstmt.executeUpdate()); + + blob = rs.getBlob(1); + pstmt.setString(1, "setObjectBlob"); + pstmt.setObject(2, blob); + assertEquals(1, pstmt.executeUpdate()); + + blob = rs.getBlob(1); + pstmt.setString(1, "setBlob"); + pstmt.setBlob(2, blob); + assertEquals(1, pstmt.executeUpdate()); + + Clob clob = rs.getClob(1); + pstmt.setString(1, "setObjectTypeClob"); + pstmt.setObject(2, clob, Types.CLOB); + assertEquals(1, pstmt.executeUpdate()); + + clob = rs.getClob(1); + pstmt.setString(1, "setObjectClob"); + pstmt.setObject(2, clob); + assertEquals(1, pstmt.executeUpdate()); + + clob = rs.getClob(1); + pstmt.setString(1, "setClob"); + pstmt.setClob(2, clob); + assertEquals(1, pstmt.executeUpdate()); + } + } + + /* + * Tests one method of uploading a blob to the database + */ + @Test + void uploadBlob_LOOP() throws Exception { + assertTrue(uploadFile(TEST_FILE, LOOP) > 0); + + // Now compare the blob & the file. Note this actually tests the + // InputStream implementation! + assertTrue(compareBlobsLOAPI(TEST_FILE)); + assertTrue(compareBlobs(TEST_FILE)); + assertTrue(compareClobs(TEST_FILE)); + } + + /* + * Tests one method of uploading a blob to the database + */ + @Test + void uploadBlob_NATIVE() throws Exception { + assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0); + + // Now compare the blob & the file. Note this actually tests the + // InputStream implementation! + assertTrue(compareBlobs(TEST_FILE)); + } + + @Test + void markResetStream() throws Exception { + assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0); + + try (Statement stmt = con.createStatement()) { + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) { + assertTrue(rs.next()); + + LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI(); + + long oid = rs.getLong(1); + LargeObject blob = lom.open(oid); + InputStream bis = blob.getInputStream(); + + assertEquals('<', bis.read()); + bis.mark(4); + assertEquals('?', bis.read()); + assertEquals('x', bis.read()); + assertEquals('m', bis.read()); + assertEquals('l', bis.read()); + bis.reset(); + assertEquals('?', bis.read()); + } + } + } + + @Test + void getBytesOffset() throws Exception { + assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0); + + try (Statement stmt = con.createStatement()) { + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) { + + assertTrue(rs.next()); + + Blob lob = rs.getBlob(1); + byte[] data = lob.getBytes(2, 4); + assertEquals(4, data.length); + assertEquals('?', data[0]); + assertEquals('x', data[1]); + assertEquals('m', data[2]); + assertEquals('l', data[3]); + } + } + } + + @Test + void multipleStreams() throws Exception { + assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0); + + try (Statement stmt = con.createStatement()) { + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) { + assertTrue(rs.next()); + + Blob lob = rs.getBlob(1); + byte[] data = new byte[2]; + + InputStream is = lob.getBinaryStream(); + assertEquals(data.length, is.read(data)); + assertEquals('<', data[0]); + assertEquals('?', data[1]); + is.close(); + + is = lob.getBinaryStream(); + assertEquals(data.length, is.read(data)); + assertEquals('<', data[0]); + assertEquals('?', data[1]); + is.close(); + } + } + } + + @Test + void parallelStreams() throws Exception { + assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0); + + try (Statement stmt = con.createStatement()) { + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) { + assertTrue(rs.next()); + + Blob lob = rs.getBlob(1); + InputStream is1 = lob.getBinaryStream(); + InputStream is2 = lob.getBinaryStream(); + + while (true) { + int i1 = is1.read(); + int i2 = is2.read(); + assertEquals(i1, i2); + if (i1 == -1) { + break; + } + } + + is1.close(); + is2.close(); + } + } + } + + @Test + void largeLargeObject() throws Exception { + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_3)) { + return; + } + + try (Statement stmt = con.createStatement()) { + stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))"); + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id ='1'")) { + assertTrue(rs.next()); + + Blob lob = rs.getBlob(1); + long length = ((long) Integer.MAX_VALUE) + 1024; + lob.truncate(length); + assertEquals(length, lob.length()); + } + } + } + + @Test + void largeObjectRead() throws Exception { + con.setAutoCommit(false); + LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI(); + try (Statement stmt = con.createStatement()) { + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id='l1'")) { + assertTrue(rs.next()); + + long oid = rs.getLong(1); + try (InputStream lois = lom.open(oid).getInputStream()) { + // read half of the data with read + for (int j = 0; j < 512; j++) { + lois.read(); + } + byte[] buf2 = new byte[512]; + lois.read(buf2, 0, 512); + } + } + } + con.commit(); + } + + @Test + void largeObjectRead1() throws Exception { + con.setAutoCommit(false); + LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI(); + try (Statement stmt = con.createStatement()) { + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id='l1'")) { + assertTrue(rs.next()); + + long oid = rs.getLong(1); + try (InputStream lois = lom.open(oid).getInputStream(512, 1024)) { + // read one byte + assertEquals(0, lois.read()); + byte[] buf2 = new byte[1024]; + int bytesRead = lois.read(buf2, 0, buf2.length); + assertEquals(1023, bytesRead); + assertEquals(1, buf2[0]); + } + } + } + con.commit(); + } + + /* + * Helper - uploads a file into a blob using old style methods. We use this because it always + * works, and we can use it as a base to test the new methods. + */ + private long uploadFile(String file, int method) throws Exception { + LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI(); + + InputStream fis = getClass().getResourceAsStream(file); + + long oid = lom.createLO(LargeObjectManager.READWRITE); + LargeObject blob = lom.open(oid); + + int s; + int t; + byte[] buf; + OutputStream os; + + switch (method) { + case LOOP: + buf = new byte[2048]; + t = 0; + while ((s = fis.read(buf, 0, buf.length)) > 0) { + t += s; + blob.write(buf, 0, s); + } + break; + + case NATIVE_STREAM: + os = blob.getOutputStream(); + s = fis.read(); + while (s > -1) { + os.write(s); + s = fis.read(); + } + os.close(); + break; + + default: + fail("Unknown method in uploadFile"); + } + + blob.close(); + fis.close(); + + // Insert into the table + Statement st = con.createStatement(); + st.executeUpdate(TestUtil.insertSQL("testblob", "id,lo", "'" + file + "'," + oid)); + con.commit(); + st.close(); + + return oid; + } + + /* + * Helper - compares the blobs in a table with a local file. Note this uses the postgresql + * specific Large Object API + */ + private boolean compareBlobsLOAPI(String id) throws Exception { + boolean result = true; + + LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI(); + + try (Statement st = con.createStatement()) { + try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) { + assertNotNull(rs); + + while (rs.next()) { + String file = rs.getString(1); + long oid = rs.getLong(2); + + InputStream fis = getClass().getResourceAsStream(file); + LargeObject blob = lom.open(oid); + InputStream bis = blob.getInputStream(); + + int f = fis.read(); + int b = bis.read(); + int c = 0; + while (f >= 0 && b >= 0 & result) { + result = f == b; + f = fis.read(); + b = bis.read(); + c++; + } + result = result && f == -1 && b == -1; + + if (!result) { + fail("Large Object API Blob compare failed at " + c + " of " + blob.size()); + } + + blob.close(); + fis.close(); + } + } + } + return result; + } + + /* + * Helper - compares the blobs in a table with a local file. This uses the jdbc java.sql.Blob api + */ + private boolean compareBlobs(String id) throws Exception { + boolean result = true; + + try (Statement st = con.createStatement()) { + try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) { + assertNotNull(rs); + + while (rs.next()) { + String file = rs.getString(1); + Blob blob = rs.getBlob(2); + + InputStream fis = getClass().getResourceAsStream(file); + InputStream bis = blob.getBinaryStream(); + + int f = fis.read(); + int b = bis.read(); + int c = 0; + while (f >= 0 && b >= 0 & result) { + result = f == b; + f = fis.read(); + b = bis.read(); + c++; + } + result = result && f == -1 && b == -1; + + if (!result) { + fail("JDBC API Blob compare failed at " + c + " of " + blob.length()); + } + + bis.close(); + fis.close(); + } + } + } + return result; + } + + /* + * Helper - compares the clobs in a table with a local file. + */ + private boolean compareClobs(String id) throws Exception { + boolean result = true; + + try (Statement st = con.createStatement()) { + try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) { + assertNotNull(rs); + + while (rs.next()) { + String file = rs.getString(1); + Clob clob = rs.getClob(2); + + InputStream fis = getClass().getResourceAsStream(file); + InputStream bis = clob.getAsciiStream(); + + int f = fis.read(); + int b = bis.read(); + int c = 0; + while (f >= 0 && b >= 0 & result) { + result = f == b; + f = fis.read(); + b = bis.read(); + c++; + } + result = result && f == -1 && b == -1; + + if (!result) { + fail("Clob compare failed at " + c + " of " + clob.length()); + } + + bis.close(); + fis.close(); + } + } + } + + return result; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTransactionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTransactionTest.java new file mode 100644 index 0000000..281874d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTransactionTest.java @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.sql.Blob; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.Arrays; + +import javax.sql.rowset.serial.SerialBlob; + +/** + * Test that oid/lob are accessible in concurrent connection, in presence of the lo_manage trigger. + * Require the lo module accessible in $libdir + */ +class BlobTransactionTest { + private Connection con; + private Connection con2; + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + con2 = TestUtil.openDB(); + con2.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + + TestUtil.createTable(con, "testblob", "id name,lo oid"); + + String sql; + + /* + * this would have to be executed using the postgres user in order to get access to a C function + * + */ + Connection privilegedCon = TestUtil.openPrivilegedDB(); + Statement st = privilegedCon.createStatement(); + try { + sql = + "CREATE OR REPLACE FUNCTION lo_manage() RETURNS pg_catalog.trigger AS '$libdir/lo' LANGUAGE C"; + st.executeUpdate(sql); + } finally { + st.close(); + } + + st = privilegedCon.createStatement(); + try { + sql = + "CREATE TRIGGER testblob_lomanage BEFORE UPDATE OR DELETE ON testblob FOR EACH ROW EXECUTE PROCEDURE lo_manage(lo)"; + st.executeUpdate(sql); + } finally { + st.close(); + } + TestUtil.closeDB(privilegedCon); + + con.setAutoCommit(false); + con2.setAutoCommit(false); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.closeDB(con2); + + con.setAutoCommit(true); + try { + Statement stmt = con.createStatement(); + try { + stmt.execute("SELECT lo_unlink(lo) FROM testblob"); + } finally { + try { + stmt.close(); + } catch (Exception e) { + } + } + } finally { + TestUtil.dropTable(con, "testblob"); + TestUtil.closeDB(con); + } + } + + private byte[] randomData() { + byte[] data = new byte[64 * 1024 * 8]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) (Math.random() * 256); + } + return data; + } + + private byte[] readInputStream(InputStream is) throws IOException { + byte[] result = new byte[1024]; + int readPos = 0; + int d; + while ((d = is.read()) != -1) { + if (readPos == result.length) { + result = Arrays.copyOf(result, result.length * 2); + } + result[readPos++] = (byte) d; + } + + return Arrays.copyOf(result, readPos); + } + + @Test + void concurrentReplace() throws SQLException, IOException { + // Statement stmt = con.createStatement(); + // stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))"); + // ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob"); + // assertTrue(rs.next()); + + PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)"); + + byte[] initialData = randomData(); + + pstmt.setString(1, "testConcurrentReplace"); + pstmt.setObject(2, new SerialBlob(initialData), Types.BLOB); + assertEquals(1, pstmt.executeUpdate()); + + con.commit(); + + con2.rollback(); + + // con2 access the blob + PreparedStatement pstmt2 = con2.prepareStatement("SELECT lo FROM testblob WHERE id=?"); + pstmt2.setString(1, "testConcurrentReplace"); + ResultSet rs2 = pstmt2.executeQuery(); + assertTrue(rs2.next()); + + // con replace the blob + byte[] newData = randomData(); + pstmt = con.prepareStatement("UPDATE testblob SET lo=? where id=?"); + pstmt.setObject(1, new SerialBlob(newData), Types.BLOB); + pstmt.setString(2, "testConcurrentReplace"); + assertEquals(1, pstmt.executeUpdate()); + + // con2 read the blob content + Blob initContentBlob = rs2.getBlob(1); + byte[] initialContentReRead = readInputStream(initContentBlob.getBinaryStream()); + assertEquals(initialContentReRead.length, initialData.length); + for (int i = 0; i < initialContentReRead.length; i++) { + assertEquals(initialContentReRead[i], initialData[i]); + } + + con2.rollback(); + pstmt2 = con2.prepareStatement("SELECT lo FROM testblob WHERE id=?"); + pstmt2.setString(1, "testConcurrentReplace"); + rs2 = pstmt2.executeQuery(); + assertTrue(rs2.next()); + + // con commit + con.commit(); + + initContentBlob = rs2.getBlob(1); + initialContentReRead = readInputStream(initContentBlob.getBinaryStream()); + assertEquals(initialContentReRead.length, initialData.length); + for (int i = 0; i < initialContentReRead.length; i++) { + assertEquals(initialContentReRead[i], initialData[i]); + } + + con2.commit(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CallableStmtTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CallableStmtTest.java new file mode 100644 index 0000000..fa4d0e7 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CallableStmtTest.java @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.test.TestUtil; + +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.Array; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.sql.Types; + +/* + * CallableStatement tests. + * + * @author Paul Bethe + */ +public class CallableStmtTest extends BaseTest4 { + @BeforeClass + public static void beforeClass() throws Exception { + try (Connection con = TestUtil.openDB()) { + assumeCallableStatementsSupported(con); + } + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "int_table", "id int"); + Statement stmt = con.createStatement(); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getString (varchar) " + + "RETURNS varchar AS ' DECLARE inString alias for $1; begin " + + "return ''bob''; end; ' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getDouble (float) " + + "RETURNS float AS ' DECLARE inString alias for $1; begin " + + "return 42.42; end; ' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getVoid (float) " + + "RETURNS void AS ' DECLARE inString alias for $1; begin " + + " return; end; ' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getInt (int) RETURNS int " + + " AS 'DECLARE inString alias for $1; begin " + + "return 42; end;' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getShort (int2) RETURNS int2 " + + " AS 'DECLARE inString alias for $1; begin " + + "return 42; end;' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getNumeric (numeric) " + + "RETURNS numeric AS ' DECLARE inString alias for $1; " + + "begin return 42; end; ' LANGUAGE plpgsql;"); + + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getNumericWithoutArg() " + + "RETURNS numeric AS ' " + + "begin return 42; end; ' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getarray() RETURNS int[] as " + + "'SELECT ''{1,2}''::int[];' LANGUAGE sql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__raisenotice() RETURNS int as " + + "'BEGIN RAISE NOTICE ''hello''; RAISE NOTICE ''goodbye''; RETURN 1; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__insertInt(int) RETURNS int as " + + "'BEGIN INSERT INTO int_table(id) VALUES ($1); RETURN 1; END;' LANGUAGE plpgsql"); + stmt.close(); + } + + @Override + public void tearDown() throws SQLException { + Statement stmt = con.createStatement(); + TestUtil.dropTable(con, "int_table"); + stmt.execute("drop FUNCTION testspg__getString (varchar);"); + stmt.execute("drop FUNCTION testspg__getDouble (float);"); + stmt.execute("drop FUNCTION testspg__getVoid(float);"); + stmt.execute("drop FUNCTION testspg__getInt (int);"); + stmt.execute("drop FUNCTION testspg__getShort(int2)"); + stmt.execute("drop FUNCTION testspg__getNumeric (numeric);"); + + stmt.execute("drop FUNCTION testspg__getNumericWithoutArg ();"); + stmt.execute("DROP FUNCTION testspg__getarray();"); + stmt.execute("DROP FUNCTION testspg__raisenotice();"); + stmt.execute("DROP FUNCTION testspg__insertInt(int);"); + super.tearDown(); + } + + final String func = "{ ? = call "; + final String pkgName = "testspg__"; + + @Test + public void testGetUpdateCount() throws SQLException { + CallableStatement call = con.prepareCall(func + pkgName + "getDouble (?) }"); + call.setDouble(2, 3.04); + call.registerOutParameter(1, Types.DOUBLE); + call.execute(); + assertEquals(-1, call.getUpdateCount()); + assertNull(call.getResultSet()); + assertEquals(42.42, call.getDouble(1), 0.00001); + call.close(); + + // test without an out parameter + call = con.prepareCall("{ call " + pkgName + "getDouble(?) }"); + call.setDouble(1, 3.04); + call.execute(); + assertEquals(-1, call.getUpdateCount()); + ResultSet rs = call.getResultSet(); + assertNotNull(rs); + assertTrue(rs.next()); + assertEquals(42.42, rs.getDouble(1), 0.00001); + assertTrue(!rs.next()); + rs.close(); + + assertEquals(-1, call.getUpdateCount()); + assertTrue(!call.getMoreResults()); + call.close(); + } + + @Test + public void testGetDouble() throws Throwable { + CallableStatement call = con.prepareCall(func + pkgName + "getDouble (?) }"); + call.setDouble(2, 3.04); + call.registerOutParameter(1, Types.DOUBLE); + call.execute(); + assertEquals(42.42, call.getDouble(1), 0.00001); + + // test without an out parameter + call = con.prepareCall("{ call " + pkgName + "getDouble(?) }"); + call.setDouble(1, 3.04); + call.execute(); + + call = con.prepareCall("{ call " + pkgName + "getVoid(?) }"); + call.setDouble(1, 3.04); + call.execute(); + } + + @Test + public void testGetInt() throws Throwable { + CallableStatement call = con.prepareCall(func + pkgName + "getInt (?) }"); + call.setInt(2, 4); + call.registerOutParameter(1, Types.INTEGER); + call.execute(); + assertEquals(42, call.getInt(1)); + } + + @Test + public void testGetShort() throws Throwable { + CallableStatement call = con.prepareCall(func + pkgName + "getShort (?) }"); + call.setShort(2, (short) 4); + call.registerOutParameter(1, Types.SMALLINT); + call.execute(); + assertEquals(42, call.getShort(1)); + } + + @Test + public void testGetNumeric() throws Throwable { + CallableStatement call = con.prepareCall(func + pkgName + "getNumeric (?) }"); + call.setBigDecimal(2, new java.math.BigDecimal(4)); + call.registerOutParameter(1, Types.NUMERIC); + call.execute(); + assertEquals(new java.math.BigDecimal(42), call.getBigDecimal(1)); + } + + @Test + public void testGetNumericWithoutArg() throws Throwable { + CallableStatement call = con.prepareCall(func + pkgName + "getNumericWithoutArg () }"); + call.registerOutParameter(1, Types.NUMERIC); + call.execute(); + assertEquals(new java.math.BigDecimal(42), call.getBigDecimal(1)); + } + + @Test + public void testGetString() throws Throwable { + CallableStatement call = con.prepareCall(func + pkgName + "getString (?) }"); + call.setString(2, "foo"); + call.registerOutParameter(1, Types.VARCHAR); + call.execute(); + assertEquals("bob", call.getString(1)); + + } + + @Test + public void testGetArray() throws SQLException { + CallableStatement call = con.prepareCall(func + pkgName + "getarray()}"); + call.registerOutParameter(1, Types.ARRAY); + call.execute(); + Array arr = call.getArray(1); + ResultSet rs = arr.getResultSet(); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertTrue(!rs.next()); + } + + @Test + public void testRaiseNotice() throws SQLException { + Statement statement = con.createStatement(); + statement.execute("SET SESSION client_min_messages = 'NOTICE'"); + CallableStatement call = con.prepareCall(func + pkgName + "raisenotice()}"); + call.registerOutParameter(1, Types.INTEGER); + call.execute(); + SQLWarning warn = call.getWarnings(); + assertNotNull(warn); + assertEquals("hello", warn.getMessage()); + warn = warn.getNextWarning(); + assertNotNull(warn); + assertEquals("goodbye", warn.getMessage()); + assertEquals(1, call.getInt(1)); + } + + @Test + public void testWasNullBeforeFetch() throws SQLException { + CallableStatement cs = con.prepareCall("{? = call lower(?)}"); + cs.registerOutParameter(1, Types.VARCHAR); + cs.setString(2, "Hi"); + try { + cs.wasNull(); + fail("expected exception"); + } catch (Exception e) { + assertTrue(e instanceof SQLException); + } + } + + @Test + public void testFetchBeforeExecute() throws SQLException { + CallableStatement cs = con.prepareCall("{? = call lower(?)}"); + cs.registerOutParameter(1, Types.VARCHAR); + cs.setString(2, "Hi"); + try { + cs.getString(1); + fail("expected exception"); + } catch (Exception e) { + assertTrue(e instanceof SQLException); + } + } + + @Test + public void testFetchWithNoResults() throws SQLException { + CallableStatement cs = con.prepareCall("{call now()}"); + cs.execute(); + try { + cs.getObject(1); + fail("expected exception"); + } catch (Exception e) { + assertTrue(e instanceof SQLException); + } + } + + @Test + public void testBadStmt() throws Throwable { + tryOneBadStmt("{ ?= " + pkgName + "getString (?) }"); + tryOneBadStmt("{ ?= call getString (?) "); + tryOneBadStmt("{ = ? call getString (?); }"); + } + + protected void tryOneBadStmt(String sql) throws SQLException { + try { + con.prepareCall(sql); + fail("Bad statement (" + sql + ") was not caught."); + + } catch (SQLException e) { + } + } + + @Test + public void testBatchCall() throws SQLException { + CallableStatement call = con.prepareCall("{ call " + pkgName + "insertInt(?) }"); + call.setInt(1, 1); + call.addBatch(); + call.setInt(1, 2); + call.addBatch(); + call.setInt(1, 3); + call.addBatch(); + call.executeBatch(); + call.close(); + + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT id FROM int_table ORDER BY id"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + assertTrue(!rs.next()); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ClientEncodingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ClientEncodingTest.java new file mode 100644 index 0000000..117b399 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ClientEncodingTest.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class ClientEncodingTest extends BaseTest4 { + + @Parameterized.Parameter(0) + public boolean allowEncodingChanges; + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.ALLOW_ENCODING_CHANGES.set(props, allowEncodingChanges); + } + + @Parameterized.Parameters(name = "allowEncodingChanges={0}") + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {true}, + {false} + }); + } + + @Test + public void setEncodingUtf8() throws SQLException { + // UTF-8 is a default encoding, so it should always be safe to set encoding to UTF-8 + setEncoding("UTF-8"); + + checkConnectionSanity(); + } + + @Test + public void setEncodingAscii() throws SQLException { + try { + setEncoding("sql_ascii"); + if (!allowEncodingChanges) { + Assert.fail( + "allowEncodingChanges is false, thus set client_encoding=aql_ascii is expected to fail"); + } + } catch (SQLException e) { + if (!allowEncodingChanges && !PSQLState.CONNECTION_FAILURE.getState() + .equals(e.getSQLState())) { + throw e; + } + Assert.assertTrue("Connection should be closed on client_encoding change", con.isClosed()); + return; + } + + checkConnectionSanity(); + } + + private void checkConnectionSanity() throws SQLException { + Statement st = con.createStatement(); + ResultSet rs = st.executeQuery("select 'abc' as x"); + rs.next(); + Assert.assertEquals("abc", rs.getString(1)); + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(st); + } + + private void setEncoding(String encoding) throws SQLException { + Statement st = con.createStatement(); + st.execute("set client_encoding='" + encoding + "'"); + TestUtil.closeQuietly(st); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserDisabledTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserDisabledTest.java new file mode 100644 index 0000000..7b37286 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserDisabledTest.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.core.BaseConnection; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +/* +* This test suite will check the behaviour of the findColumnIndex method. This is testing the +* behaviour when sanitiser is disabled. +*/ +class ColumnSanitiserDisabledTest { + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + Properties props = new Properties(); + props.setProperty("disableColumnSanitiser", Boolean.TRUE.toString()); + conn = TestUtil.openDB(props); + assertTrue(conn instanceof BaseConnection); + BaseConnection bc = (BaseConnection) conn; + assertTrue(bc.isColumnSanitiserDisabled(), + "Expected state [TRUE] of base connection configuration failed test."); + /* + * Quoted columns will be stored with case preserved. Driver will receive column names as + * defined in db server. + */ + TestUtil.createTable(conn, "allmixedup", + "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)"); + Statement data = conn.createStatement(); + data.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'")); + data.close(); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropTable(conn, "allmixedup"); + TestUtil.closeDB(conn); + System.setProperty("disableColumnSanitiser", "false"); + } + + /* + * Test cases checking different combinations of columns origination from database against + * application supplied column names. + */ + + @Test + void tableColumnLowerNowFindFindLowerCaseColumn() throws SQLException { + findColumn("id", true); + } + + @Test + void tableColumnLowerNowFindFindUpperCaseColumn() throws SQLException { + findColumn("ID", true); + } + + @Test + void tableColumnLowerNowFindFindMixedCaseColumn() throws SQLException { + findColumn("Id", false); + } + + @Test + void tableColumnUpperNowFindFindLowerCaseColumn() throws SQLException { + findColumn("description", true); + } + + @Test + void tableColumnUpperNowFindFindUpperCaseColumn() throws SQLException { + findColumn("DESCRIPTION", true); + } + + @Test + void tableColumnUpperNowFindFindMixedCaseColumn() throws SQLException { + findColumn("Description", false); + } + + @Test + void tableColumnMixedNowFindLowerCaseColumn() throws SQLException { + findColumn("foo", false); + } + + @Test + void tableColumnMixedNowFindFindUpperCaseColumn() throws SQLException { + findColumn("FOO", false); + } + + @Test + void tableColumnMixedNowFindFindMixedCaseColumn() throws SQLException { + findColumn("fOo", true); + } + + private void findColumn(String label, boolean failOnNotFound) throws SQLException { + PreparedStatement query = conn.prepareStatement("select * from allmixedup"); + if ((TestUtil.findColumn(query, label) == 0) && failOnNotFound) { + fail(String.format("Expected to find the column with the label [%1$s].", label)); + } + query.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserEnabledTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserEnabledTest.java new file mode 100644 index 0000000..b68a35f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserEnabledTest.java @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.core.BaseConnection; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +/* +* This test suite will check the behaviour of the findColumnIndex method. The tests will check the +* behaviour of the method when the sanitiser is enabled. Default behaviour of the driver. +*/ +class ColumnSanitiserEnabledTest { + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + Properties props = new Properties(); + props.setProperty("disableColumnSanitiser", Boolean.FALSE.toString()); + conn = TestUtil.openDB(props); + assertTrue(conn instanceof BaseConnection); + BaseConnection bc = (BaseConnection) conn; + assertFalse(bc.isColumnSanitiserDisabled(), + "Expected state [FALSE] of base connection configuration failed test."); + TestUtil.createTable(conn, "allmixedup", + "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)"); + Statement data = conn.createStatement(); + data.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'")); + data.close(); + } + + protected void tearDown() throws Exception { + TestUtil.dropTable(conn, "allmixedup"); + TestUtil.closeDB(conn); + } + + /* + * Test cases checking different combinations of columns origination from database against + * application supplied column names. + */ + + @Test + void tableColumnLowerNowFindFindLowerCaseColumn() throws SQLException { + findColumn("id", true); + } + + @Test + void tableColumnLowerNowFindFindUpperCaseColumn() throws SQLException { + findColumn("ID", true); + } + + @Test + void tableColumnLowerNowFindFindMixedCaseColumn() throws SQLException { + findColumn("Id", true); + } + + @Test + void tableColumnUpperNowFindFindLowerCaseColumn() throws SQLException { + findColumn("description", true); + } + + @Test + void tableColumnUpperNowFindFindUpperCaseColumn() throws SQLException { + findColumn("DESCRIPTION", true); + } + + @Test + void tableColumnUpperNowFindFindMixedCaseColumn() throws SQLException { + findColumn("Description", true); + } + + @Test + void tableColumnMixedNowFindLowerCaseColumn() throws SQLException { + findColumn("foo", true); + } + + @Test + void tableColumnMixedNowFindFindUpperCaseColumn() throws SQLException { + findColumn("FOO", true); + } + + @Test + void tableColumnMixedNowFindFindMixedCaseColumn() throws SQLException { + findColumn("fOo", true); + } + + private void findColumn(String label, boolean failOnNotFound) throws SQLException { + PreparedStatement query = conn.prepareStatement("select * from allmixedup"); + if ((TestUtil.findColumn(query, label) == 0) && failOnNotFound) { + fail(String.format("Expected to find the column with the label [%1$s].", label)); + } + query.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConcurrentStatementFetch.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConcurrentStatementFetch.java new file mode 100644 index 0000000..53b5b52 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConcurrentStatementFetch.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class ConcurrentStatementFetch extends BaseTest4 { + + private final AutoCommit autoCommit; + private final int fetchSize; + + public ConcurrentStatementFetch(AutoCommit autoCommit, int fetchSize, BinaryMode binaryMode) { + this.autoCommit = autoCommit; + this.fetchSize = fetchSize; + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "{index}: fetch(autoCommit={0}, fetchSize={1}, binaryMode={2})") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (AutoCommit autoCommit : AutoCommit.values()) { + for (int fetchSize : new int[]{1, 2, 20}) { + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{autoCommit, fetchSize, binaryMode}); + } + } + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + con.setAutoCommit(autoCommit == AutoCommit.YES); + } + + @Test + public void testFetchTwoStatements() throws Exception { + // This test definitely fails at 8.2 in autocommit=false, and works with 8.4+ + Assume.assumeTrue(autoCommit == AutoCommit.YES + || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)); + PreparedStatement ps1 = null; + PreparedStatement ps2 = null; + try { + ps1 = con.prepareStatement("select * from generate_series(0, 9)"); + ps1.setFetchSize(fetchSize); + ResultSet rs1 = ps1.executeQuery(); + ps2 = con.prepareStatement("select * from generate_series(10, 19)"); + ps2.setFetchSize(fetchSize); + ResultSet rs2 = ps2.executeQuery(); + + for (int i = 0; i < 10; i++) { + Assert.assertTrue(rs1.next()); + Assert.assertTrue(rs2.next()); + Assert.assertEquals("Row#" + i + ", resultset 1", i, rs1.getInt(1)); + Assert.assertEquals("Row#" + i + ", resultset 2", i + 10, rs2.getInt(1)); + } + Assert.assertFalse(rs1.next()); + Assert.assertFalse(rs2.next()); + } finally { + TestUtil.closeQuietly(ps1); + TestUtil.closeQuietly(ps2); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectTimeoutTest.java new file mode 100644 index 0000000..aa37f02 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectTimeoutTest.java @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.net.NoRouteToHostException; +import java.net.SocketTimeoutException; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + +class ConnectTimeoutTest { + // The IP below is non-routable (see http://stackoverflow.com/a/904609/1261287) + private static final String UNREACHABLE_HOST = "10.255.255.1"; + private static final String UNREACHABLE_URL = "jdbc:postgresql://" + UNREACHABLE_HOST + ":5432/test"; + private static final int CONNECT_TIMEOUT = 5; + + @BeforeEach + void setUp() throws Exception { + TestUtil.initDriver(); + } + + @Test + void timeout() { + final Properties props = new Properties(); + PGProperty.USER.set(props, TestUtil.getUser()); + PGProperty.PASSWORD.set(props, TestUtil.getPassword()); + // with 0 (default value) it hangs for about 60 seconds (platform dependent) + PGProperty.CONNECT_TIMEOUT.set(props, CONNECT_TIMEOUT); + + final long startTime = System.currentTimeMillis(); + try { + DriverManager.getConnection(UNREACHABLE_URL, props); + } catch (SQLException e) { + final long interval = System.currentTimeMillis() - startTime; + final long connectTimeoutMillis = CONNECT_TIMEOUT * 1000; + final long maxDeviation = connectTimeoutMillis / 10; + + /* + * If the platform fast-fails the unroutable address connection then this + * test may not time out, instead throwing + * java.net.NoRouteToHostException. The test has failed in that the connection + * attempt did not time out. + * + * We treat this as a skipped test, as the test didn't really "succeed" + * in testing the original behaviour, but it didn't fail either. + */ + Assumptions.assumeFalse(e.getCause() instanceof NoRouteToHostException + && interval < connectTimeoutMillis, + "Host fast-failed connection to unreachable address " + + UNREACHABLE_HOST + " after " + interval + " ms, " + + " before timeout should have triggered."); + + assertTrue(e.getCause() instanceof SocketTimeoutException, + "Unexpected " + e.toString() + " with cause " + e.getCause()); + // check that it was not a default system timeout, an approximate value is used + assertTrue(Math.abs(interval - connectTimeoutMillis) < maxDeviation); + return; + } + fail("SQLException expected"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectionTest.java new file mode 100644 index 0000000..c6f3f96 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectionTest.java @@ -0,0 +1,554 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGConnection; +import org.postgresql.PGProperty; +import org.postgresql.core.PGStream; +import org.postgresql.core.QueryExecutor; +import org.postgresql.jdbc.PgConnection; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Field; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * TestCase to test the internal functionality of org.postgresql.jdbc2.Connection and it's + * superclass. + */ +class ConnectionTest { + private Connection con; + + // Set up the fixture for this testcase: the tables for this test. + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + + TestUtil.createTable(con, "test_a", "imagename name,image oid,id int4"); + TestUtil.createTable(con, "test_c", "source text,cost money,imageid int4"); + + TestUtil.closeDB(con); + } + + // Tear down the fixture for this test case. + @AfterEach + void tearDown() throws Exception { + TestUtil.closeDB(con); + + con = TestUtil.openDB(); + + TestUtil.dropTable(con, "test_a"); + TestUtil.dropTable(con, "test_c"); + + TestUtil.closeDB(con); + } + + /* + * Tests the two forms of createStatement() + */ + @Test + void createStatement() throws Exception { + con = TestUtil.openDB(); + + // A standard Statement + Statement stat = con.createStatement(); + assertNotNull(stat); + stat.close(); + + // Ask for Updateable ResultSets + stat = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + assertNotNull(stat); + stat.close(); + } + + /* + * Tests the two forms of prepareStatement() + */ + @Test + void prepareStatement() throws Exception { + con = TestUtil.openDB(); + + String sql = "select source,cost,imageid from test_c"; + + // A standard Statement + PreparedStatement stat = con.prepareStatement(sql); + assertNotNull(stat); + stat.close(); + + // Ask for Updateable ResultSets + stat = con.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + assertNotNull(stat); + stat.close(); + } + + /* + * Put the test for createPrepareCall here + */ + @Test + void prepareCall() { + } + + /* + * Test nativeSQL + */ + @Test + void nativeSQL() throws Exception { + // test a simple escape + con = TestUtil.openDB(); + assertEquals("DATE '2005-01-24'", con.nativeSQL("{d '2005-01-24'}")); + } + + /* + * Test autoCommit (both get & set) + */ + @Test + void transactions() throws Exception { + con = TestUtil.openDB(); + Statement st; + ResultSet rs; + + // Turn it off + con.setAutoCommit(false); + assertFalse(con.getAutoCommit()); + + // Turn it back on + con.setAutoCommit(true); + assertTrue(con.getAutoCommit()); + + // Now test commit + st = con.createStatement(); + st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"); + + con.setAutoCommit(false); + + // Now update image to 9876 and commit + st.executeUpdate("update test_a set image=9876 where id=5678"); + con.commit(); + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); + rs.close(); + + // Now try to change it but rollback + st.executeUpdate("update test_a set image=1111 where id=5678"); + con.rollback(); + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); // Should not change! + rs.close(); + + TestUtil.closeDB(con); + } + + /* + * Tests for session and transaction read only behavior with "always" read only mode. + */ + @Test + void readOnly_always() throws Exception { + final Properties props = new Properties(); + PGProperty.READ_ONLY_MODE.set(props, "always"); + con = TestUtil.openDB(props); + Statement st; + ResultSet rs; + + con.setAutoCommit(true); + con.setReadOnly(true); + assertTrue(con.getAutoCommit()); + assertTrue(con.isReadOnly()); + + // Now test insert with auto commit true and read only + st = con.createStatement(); + try { + st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"); + fail("insert should have failed when read only"); + } catch (SQLException e) { + assertStringContains(e.getMessage(), "read-only"); + } + + con.setAutoCommit(false); + + // auto commit false and read only + try { + st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"); + fail("insert should have failed when read only"); + } catch (SQLException e) { + assertStringContains(e.getMessage(), "read-only"); + } + + try { + con.setReadOnly(false); + fail("cannot set read only during transaction"); + } catch (SQLException e) { + assertEquals(PSQLState.ACTIVE_SQL_TRANSACTION.getState(), e.getSQLState(), "Expecting <>"); + } + + // end the transaction + con.rollback(); + + // disable read only + con.setReadOnly(false); + + assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)")); + + // Now update image to 9876 and commit + st.executeUpdate("update test_a set image=9876 where id=5678"); + con.commit(); + + // back to read only for successful query + con.setReadOnly(true); + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); + rs.close(); + + // Now try to change with auto commit false + try { + st.executeUpdate("update test_a set image=1111 where id=5678"); + fail("update should fail when read only"); + } catch (SQLException e) { + assertStringContains(e.getMessage(), "read-only"); + con.rollback(); + } + + // test that value did not change + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); // Should not change! + rs.close(); + + // repeat attempt to change with auto commit true + con.setAutoCommit(true); + + try { + st.executeUpdate("update test_a set image=1111 where id=5678"); + fail("update should fail when read only"); + } catch (SQLException e) { + assertStringContains(e.getMessage(), "read-only"); + } + + // test that value did not change + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); // Should not change! + rs.close(); + + TestUtil.closeDB(con); + } + + /* + * Tests for session and transaction read only behavior with "ignore" read only mode. + */ + @Test + void readOnly_ignore() throws Exception { + final Properties props = new Properties(); + PGProperty.READ_ONLY_MODE.set(props, "ignore"); + con = TestUtil.openDB(props); + Statement st; + ResultSet rs; + + con.setAutoCommit(true); + con.setReadOnly(true); + assertTrue(con.getAutoCommit()); + assertTrue(con.isReadOnly()); + + // Now test insert with auto commit true and read only + st = con.createStatement(); + assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)")); + con.setAutoCommit(false); + + // Now update image to 9876 and commit + st.executeUpdate("update test_a set image=9876 where id=5678"); + + // back to read only for successful query + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); + rs.close(); + + con.rollback(); + + // test that value did not change + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(1234, rs.getInt(1)); // Should not change! + rs.close(); + + TestUtil.closeDB(con); + } + + /* + * Tests for session and transaction read only behavior with "transaction" read only mode. + */ + @Test + void readOnly_transaction() throws Exception { + final Properties props = new Properties(); + PGProperty.READ_ONLY_MODE.set(props, "transaction"); + con = TestUtil.openDB(props); + Statement st; + ResultSet rs; + + con.setAutoCommit(false); + con.setReadOnly(true); + assertFalse(con.getAutoCommit()); + assertTrue(con.isReadOnly()); + + // Test insert with auto commit false and read only + st = con.createStatement(); + try { + st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"); + fail("insert should have failed when read only"); + } catch (SQLException e) { + assertStringContains(e.getMessage(), "read-only"); + } + + con.rollback(); + + con.setAutoCommit(true); + assertTrue(con.isReadOnly()); + //with autocommit true and read only, can still insert + assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)")); + + // Now update image to 9876 + st.executeUpdate("update test_a set image=9876 where id=5678"); + + //successful query + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); + rs.close(); + + con.setAutoCommit(false); + // Now try to change with auto commit false + try { + st.executeUpdate("update test_a set image=1111 where id=5678"); + fail("update should fail when read only"); + } catch (SQLException e) { + assertStringContains(e.getMessage(), "read-only"); + } + + con.rollback(); + + // test that value did not change + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(9876, rs.getInt(1)); // Should not change! + rs.close(); + + // repeat attempt to change with auto commit true + con.setAutoCommit(true); + + assertEquals(1, st.executeUpdate("update test_a set image=1111 where id=5678")); + + // test that value did not change + rs = st.executeQuery("select image from test_a where id=5678"); + assertTrue(rs.next()); + assertEquals(1111, rs.getInt(1)); // Should not change! + rs.close(); + + TestUtil.closeDB(con); + } + + /* + * Simple test to see if isClosed works. + */ + @Test + void isClosed() throws Exception { + con = TestUtil.openDB(); + + // Should not say closed + assertFalse(con.isClosed()); + + TestUtil.closeDB(con); + + // Should now say closed + assertTrue(con.isClosed()); + } + + /* + * Test the warnings system + */ + @Test + void warnings() throws Exception { + con = TestUtil.openDB(); + + String testStr = "This Is OuR TeSt message"; + + // The connection must be ours! + assertTrue(con instanceof PGConnection); + + // Clear any existing warnings + con.clearWarnings(); + + // Set the test warning + ((PgConnection) con).addWarning(new SQLWarning(testStr)); + + // Retrieve it + SQLWarning warning = con.getWarnings(); + assertNotNull(warning); + assertEquals(testStr, warning.getMessage()); + + // Finally test clearWarnings() this time there must be something to delete + con.clearWarnings(); + assertNull(con.getWarnings()); + + TestUtil.closeDB(con); + } + + /* + * Transaction Isolation Levels + */ + @Test + void transactionIsolation() throws Exception { + con = TestUtil.openDB(); + + int defaultLevel = con.getTransactionIsolation(); + + // Begin a transaction + con.setAutoCommit(false); + + // The isolation level should not have changed + assertEquals(defaultLevel, con.getTransactionIsolation()); + + // Now run some tests with autocommit enabled. + con.setAutoCommit(true); + + assertEquals(defaultLevel, con.getTransactionIsolation()); + + con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation()); + + con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation()); + + // Test if a change of isolation level before beginning the + // transaction affects the isolation level inside the transaction. + con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation()); + con.setAutoCommit(false); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation()); + con.setAutoCommit(true); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation()); + con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation()); + con.setAutoCommit(false); + assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation()); + con.commit(); + + // Test that getTransactionIsolation() does not actually start a new txn. + // Shouldn't start a new transaction. + con.getTransactionIsolation(); + // Should be ok -- we're not in a transaction. + con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + // Should still be ok. + con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + + // Test that we can't change isolation mid-transaction + Statement stmt = con.createStatement(); + stmt.executeQuery("SELECT 1"); // Start transaction. + stmt.close(); + + try { + con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + fail("Expected an exception when changing transaction isolation mid-transaction"); + } catch (SQLException e) { + // Ok. + } + + con.rollback(); + TestUtil.closeDB(con); + } + + /* + * JDBC2 Type mappings + */ + @Test + void typeMaps() throws Exception { + con = TestUtil.openDB(); + + // preserve the current map + Map> oldmap = con.getTypeMap(); + + // now change it for an empty one + Map> newmap = new HashMap<>(); + con.setTypeMap(newmap); + assertEquals(newmap, con.getTypeMap()); + + // restore the old one + con.setTypeMap(oldmap); + assertEquals(oldmap, con.getTypeMap()); + + TestUtil.closeDB(con); + } + + /** + * Closing a Connection more than once is not an error. + */ + @Test + void doubleClose() throws Exception { + con = TestUtil.openDB(); + con.close(); + con.close(); + } + + /** + * Make sure that type map is empty and not null + */ + @Test + void getTypeMapEmpty() throws Exception { + con = TestUtil.openDB(); + Map typeMap = con.getTypeMap(); + assertNotNull(typeMap); + assertTrue(typeMap.isEmpty(), "TypeMap should be empty"); + con.close(); + } + + @Test + void pGStreamSettings() throws Exception { + con = TestUtil.openDB(); + QueryExecutor queryExecutor = ((PgConnection) con).getQueryExecutor(); + + Field f = queryExecutor.getClass().getSuperclass().getDeclaredField("pgStream"); + f.setAccessible(true); + PGStream pgStream = (PGStream) f.get(queryExecutor); + pgStream.setNetworkTimeout(1000); + pgStream.getSocket().setKeepAlive(true); + pgStream.getSocket().setSendBufferSize(8192); + pgStream.getSocket().setReceiveBufferSize(2048); + PGStream newStream = new PGStream(pgStream, 10); + assertEquals(1000, newStream.getSocket().getSoTimeout()); + assertEquals(2048, newStream.getSocket().getReceiveBufferSize()); + assertEquals(8192, newStream.getSocket().getSendBufferSize()); + assertTrue(newStream.getSocket().getKeepAlive()); + + TestUtil.closeDB(con); + } + + private static void assertStringContains(String orig, String toContain) { + if (!orig.contains(toContain)) { + fail("expected [" + orig + ']' + "to contain [" + toContain + "]."); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyLargeFileTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyLargeFileTest.java new file mode 100644 index 0000000..af0ef92 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyLargeFileTest.java @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.PGConnection; +import org.postgresql.copy.CopyManager; +import org.postgresql.test.TestUtil; +import org.postgresql.test.util.BufferGenerator; +import org.postgresql.test.util.StrangeInputStream; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.FileInputStream; +import java.io.InputStream; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.Random; + +/** + * @author amozhenin on 30.09.2015. + */ +class CopyLargeFileTest { + + private static final int FEED_COUNT = 10; + + private Connection con; + private CopyManager copyAPI; + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + + TestUtil.createTable(con, "pgjdbc_issue366_test_glossary", + "id SERIAL, text_id VARCHAR(1000) NOT NULL UNIQUE, name VARCHAR(10) NOT NULL UNIQUE"); + TestUtil.createTable(con, "pgjdbc_issue366_test_data", + "id SERIAL,\n" + + "data_text_id VARCHAR(1000) NOT NULL /*UNIQUE <-- it slows down inserts due to additional index */,\n" + + "glossary_text_id VARCHAR(1000) NOT NULL /* REFERENCES pgjdbc_issue366_test_glossary(text_id) */,\n" + + "value DOUBLE PRECISION NOT NULL"); + + feedTable(); + BufferGenerator.main(new String[]{}); + copyAPI = ((PGConnection) con).getCopyAPI(); + } + + private void feedTable() throws Exception { + PreparedStatement stmt = con.prepareStatement( + TestUtil.insertSQL("pgjdbc_issue366_test_glossary", "text_id, name", "?, ?")); + for (int i = 0; i < 26; i++) { + char ch = (char) ('A' + i); // black magic + insertData(stmt, "VERY_LONG_STRING_TO_REPRODUCE_ISSUE_366_" + ch + ch + ch, + "" + ch + ch + ch); + } + } + + private void insertData(PreparedStatement stmt, String textId, String name) throws SQLException { + stmt.setString(1, textId); + stmt.setString(2, name); + stmt.executeUpdate(); + } + + @AfterEach + void tearDown() throws Exception { + try { + TestUtil.dropTable(con, "pgjdbc_issue366_test_data"); + TestUtil.dropTable(con, "pgjdbc_issue366_test_glossary"); + new File("target/buffer.txt").delete(); + } finally { + con.close(); + } + } + + @Test + void feedTableSeveralTimesTest() throws Throwable { + for (int i = 1; i <= FEED_COUNT; i++) { + feedTableAndCheckTableFeedIsOk(con); + cleanupTable(con); + } + } + + private void feedTableAndCheckTableFeedIsOk(Connection conn) throws Throwable { + Long seed = Long.getLong("StrangeInputStream.seed"); + if (seed == null) { + seed = new Random().nextLong(); + } + InputStream in = null; + try { + in = new StrangeInputStream(new FileInputStream("target/buffer.txt"), seed); + long size = copyAPI.copyIn( + "COPY pgjdbc_issue366_test_data(data_text_id, glossary_text_id, value) FROM STDIN", in); + assertEquals(BufferGenerator.ROW_COUNT, size); + } catch (Throwable t) { + String message = "Using seed = " + seed + " for StrangeInputStream. Set -DStrangeInputStream.seed=" + + seed + " to reproduce the test"; + t.addSuppressed(new Throwable(message) { + @Override + public Throwable fillInStackTrace() { + return this; + } + }); + } finally { + if (in != null) { + in.close(); + } + } + } + + private void cleanupTable(Connection conn) throws Exception { + CallableStatement stmt = null; + try { + stmt = conn.prepareCall("TRUNCATE pgjdbc_issue366_test_data;"); + stmt.execute(); + } finally { + if (stmt != null) { + stmt.close(); + } + } + + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyTest.java new file mode 100644 index 0000000..3850e61 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyTest.java @@ -0,0 +1,502 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGConnection; +import org.postgresql.copy.CopyIn; +import org.postgresql.copy.CopyManager; +import org.postgresql.copy.CopyOut; +import org.postgresql.copy.PGCopyOutputStream; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.util.ByteBufferByteStreamWriter; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.PrintStream; +import java.io.StringReader; +import java.nio.ByteBuffer; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Locale; + +/** + * @author kato@iki.fi + */ +class CopyTest { + private Connection con; + private CopyManager copyAPI; + private String copyParams; + // 0's required to match DB output for numeric(5,2) + private final String[] origData = + {"First Row\t1\t1.10\n", + "Second Row\t2\t-22.20\n", + "\\N\t\\N\t\\N\n", + "\t4\t444.40\n"}; + private final int dataRows = origData.length; + + private byte[] getData(String[] origData) { + ByteArrayOutputStream buf = new ByteArrayOutputStream(); + PrintStream ps = new PrintStream(buf); + for (String anOrigData : origData) { + ps.print(anOrigData); + } + return buf.toByteArray(); + } + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + + TestUtil.createTempTable(con, "copytest", "stringvalue text, intvalue int, numvalue numeric(5,2)"); + + copyAPI = ((PGConnection) con).getCopyAPI(); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + copyParams = "(FORMAT CSV, HEADER false)"; + } else { + copyParams = "CSV"; + } + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.closeDB(con); + + // one of the tests will render the existing connection broken, + // so we need to drop the table on a fresh one. + con = TestUtil.openDB(); + try { + TestUtil.dropTable(con, "copytest"); + } finally { + con.close(); + } + } + + private int getCount() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT count(*) FROM copytest"); + rs.next(); + int result = rs.getInt(1); + rs.close(); + return result; + } + + @Test + void copyInByRow() throws SQLException { + String sql = "COPY copytest FROM STDIN"; + CopyIn cp = copyAPI.copyIn(sql); + for (String anOrigData : origData) { + byte[] buf = anOrigData.getBytes(); + cp.writeToCopy(buf, 0, buf.length); + } + + long count1 = cp.endCopy(); + long count2 = cp.getHandledRowCount(); + assertEquals(dataRows, count1); + assertEquals(dataRows, count2); + + try { + cp.cancelCopy(); + } catch (SQLException se) { // should fail with obsolete operation + if (!PSQLState.OBJECT_NOT_IN_STATE.getState().equals(se.getSQLState())) { + fail("should have thrown object not in state exception."); + } + } + int rowCount = getCount(); + assertEquals(dataRows, rowCount); + } + + @Test + void copyInAsOutputStream() throws SQLException, IOException { + String sql = "COPY copytest FROM STDIN"; + OutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000); + for (String anOrigData : origData) { + byte[] buf = anOrigData.getBytes(); + os.write(buf); + } + os.close(); + int rowCount = getCount(); + assertEquals(dataRows, rowCount); + } + + @Test + void copyInAsOutputStreamClosesAfterEndCopy() throws SQLException, IOException { + String sql = "COPY copytest FROM STDIN"; + PGCopyOutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000); + try { + for (String anOrigData : origData) { + byte[] buf = anOrigData.getBytes(); + os.write(buf); + } + os.endCopy(); + } finally { + os.close(); + } + assertFalse(os.isActive()); + int rowCount = getCount(); + assertEquals(dataRows, rowCount); + } + + @Test + void copyInAsOutputStreamFailsOnFlushAfterEndCopy() throws SQLException, IOException { + String sql = "COPY copytest FROM STDIN"; + PGCopyOutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000); + try { + for (String anOrigData : origData) { + byte[] buf = anOrigData.getBytes(); + os.write(buf); + } + os.endCopy(); + } finally { + os.close(); + } + try { + os.flush(); + fail("should have failed flushing an inactive copy stream."); + } catch (IOException e) { + // We expect "This copy stream is closed", however, the message is locale-dependent + if (Locale.getDefault().getLanguage().equals(new Locale("en").getLanguage()) + && !e.toString().contains("This copy stream is closed.")) { + fail("has failed not due to checkClosed(): " + e); + } + } + } + + @Test + void copyInFromInputStream() throws SQLException, IOException { + String sql = "COPY copytest FROM STDIN"; + copyAPI.copyIn(sql, new ByteArrayInputStream(getData(origData)), 3); + int rowCount = getCount(); + assertEquals(dataRows, rowCount); + } + + @Test + void copyInFromStreamFail() throws SQLException { + String sql = "COPY copytest FROM STDIN"; + try { + copyAPI.copyIn(sql, new InputStream() { + @Override + public int read() { + throw new RuntimeException("COPYTEST"); + } + }, 3); + } catch (Exception e) { + if (!e.toString().contains("COPYTEST")) { + fail("should have failed trying to read from our bogus stream."); + } + } + int rowCount = getCount(); + assertEquals(0, rowCount); + } + + @Test + void copyInFromReader() throws SQLException, IOException { + String sql = "COPY copytest FROM STDIN"; + copyAPI.copyIn(sql, new StringReader(new String(getData(origData))), 3); + int rowCount = getCount(); + assertEquals(dataRows, rowCount); + } + + @Test + void copyInFromByteStreamWriter() throws SQLException, IOException { + String sql = "COPY copytest FROM STDIN"; + copyAPI.copyIn(sql, new ByteBufferByteStreamWriter(ByteBuffer.wrap(getData(origData)))); + int rowCount = getCount(); + assertEquals(dataRows, rowCount); + } + + /** + * Tests writing to a COPY ... FROM STDIN using both the standard OutputStream API + * write(byte[]) and the driver specific write(ByteStreamWriter) API interleaved. + */ + @Test + void copyMultiApi() throws SQLException, IOException { + TestUtil.execute(con, "CREATE TABLE pg_temp.copy_api_test (data text)"); + String sql = "COPY pg_temp.copy_api_test (data) FROM STDIN"; + PGCopyOutputStream out = new PGCopyOutputStream(copyAPI.copyIn(sql)); + try { + out.write("a".getBytes()); + out.writeToCopy(new ByteBufferByteStreamWriter(ByteBuffer.wrap("b".getBytes()))); + out.write("c".getBytes()); + out.writeToCopy(new ByteBufferByteStreamWriter(ByteBuffer.wrap("d".getBytes()))); + out.write("\n".getBytes()); + } finally { + out.close(); + } + String data = TestUtil.queryForString(con, "SELECT data FROM pg_temp.copy_api_test"); + assertEquals("abcd", data, "The writes to the COPY should be in order"); + } + + @Test + void skipping() { + String sql = "COPY copytest FROM STDIN"; + String at = "init"; + int rowCount = -1; + int skip = 0; + int skipChar = 1; + try { + while (skipChar > 0) { + at = "buffering"; + InputStream ins = new ByteArrayInputStream(getData(origData)); + at = "skipping"; + ins.skip(skip++); + skipChar = ins.read(); + at = "copying"; + copyAPI.copyIn(sql, ins, 3); + at = "using connection after writing copy"; + rowCount = getCount(); + } + } catch (Exception e) { + if (skipChar != '\t') { + // error expected when field separator consumed + fail("testSkipping at " + at + " round " + skip + ": " + e.toString()); + } + } + assertEquals(dataRows * (skip - 1), rowCount); + } + + @Test + void copyOutByRow() throws SQLException, IOException { + copyInByRow(); // ensure we have some data. + String sql = "COPY copytest TO STDOUT"; + CopyOut cp = copyAPI.copyOut(sql); + int count = 0; + byte[] buf; + while ((buf = cp.readFromCopy()) != null) { + count++; + } + assertFalse(cp.isActive()); + assertEquals(dataRows, count); + + long rowCount = cp.getHandledRowCount(); + + assertEquals(dataRows, rowCount); + + assertEquals(dataRows, getCount()); + } + + @Test + void copyOut() throws SQLException, IOException { + copyInByRow(); // ensure we have some data. + String sql = "COPY copytest TO STDOUT"; + ByteArrayOutputStream copydata = new ByteArrayOutputStream(); + copyAPI.copyOut(sql, copydata); + assertEquals(dataRows, getCount()); + // deep comparison of data written and read + byte[] copybytes = copydata.toByteArray(); + assertNotNull(copybytes); + for (int i = 0, l = 0; i < origData.length; i++) { + byte[] origBytes = origData[i].getBytes(); + assertTrue(copybytes.length >= l + origBytes.length, "Copy is shorter than original"); + for (int j = 0; j < origBytes.length; j++, l++) { + assertEquals(origBytes[j], copybytes[l], "content changed at byte#" + j + ": " + origBytes[j] + copybytes[l]); + } + } + } + + @Test + void nonCopyOut() throws SQLException, IOException { + String sql = "SELECT 1"; + try { + copyAPI.copyOut(sql, new ByteArrayOutputStream()); + fail("Can't use a non-copy query."); + } catch (SQLException sqle) { + } + // Ensure connection still works. + assertEquals(0, getCount()); + } + + @Test + void nonCopyIn() throws SQLException, IOException { + String sql = "SELECT 1"; + try { + copyAPI.copyIn(sql, new ByteArrayInputStream(new byte[0])); + fail("Can't use a non-copy query."); + } catch (SQLException sqle) { + } + // Ensure connection still works. + assertEquals(0, getCount()); + } + + @Test + void statementCopyIn() throws SQLException { + Statement stmt = con.createStatement(); + try { + stmt.execute("COPY copytest FROM STDIN"); + fail("Should have failed because copy doesn't work from a Statement."); + } catch (SQLException sqle) { + } + stmt.close(); + + assertEquals(0, getCount()); + } + + @Test + void statementCopyOut() throws SQLException { + copyInByRow(); // ensure we have some data. + + Statement stmt = con.createStatement(); + try { + stmt.execute("COPY copytest TO STDOUT"); + fail("Should have failed because copy doesn't work from a Statement."); + } catch (SQLException sqle) { + } + stmt.close(); + + assertEquals(dataRows, getCount()); + } + + @Test + void copyQuery() throws SQLException, IOException { + copyInByRow(); // ensure we have some data. + + long count = copyAPI.copyOut("COPY (SELECT generate_series(1,1000)) TO STDOUT", + new ByteArrayOutputStream()); + assertEquals(1000, count); + } + + @Test + void copyRollback() throws SQLException { + con.setAutoCommit(false); + copyInByRow(); + con.rollback(); + assertEquals(0, getCount()); + } + + @Test + void changeDateStyle() throws SQLException { + try { + con.setAutoCommit(false); + con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + CopyManager manager = con.unwrap(PGConnection.class).getCopyAPI(); + + Statement stmt = con.createStatement(); + + stmt.execute("SET DateStyle = 'ISO, DMY'"); + + // I expect an SQLException + String sql = "COPY copytest FROM STDIN with xxx " + copyParams; + CopyIn cp = manager.copyIn(sql); + for (String anOrigData : origData) { + byte[] buf = anOrigData.getBytes(); + cp.writeToCopy(buf, 0, buf.length); + } + + long count1 = cp.endCopy(); + long count2 = cp.getHandledRowCount(); + con.commit(); + } catch (SQLException ex) { + + // the with xxx is a syntax error which should return a state of 42601 + // if this fails the 'S' command is not being handled in the copy manager query handler + assertEquals("42601", ex.getSQLState()); + con.rollback(); + } + } + + @Test + void lockReleaseOnCancelFailure() throws SQLException, InterruptedException { + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) { + // pg_backend_pid() requires PostgreSQL 8.4+ + return; + } + + // This is a fairly complex test because it is testing a + // deadlock that only occurs when the connection to postgres + // is broken during a copy operation. We'll start a copy + // operation, use pg_terminate_backend to rudely break it, + // and then cancel. The test passes if a subsequent operation + // on the Connection object fails to deadlock. + con.setAutoCommit(false); + + CopyManager manager = con.unwrap(PGConnection.class).getCopyAPI(); + CopyIn copyIn = manager.copyIn("COPY copytest FROM STDIN with " + copyParams); + TestUtil.terminateBackend(con); + try { + byte[] bunchOfNulls = ",,\n".getBytes(); + while (true) { + copyIn.writeToCopy(bunchOfNulls, 0, bunchOfNulls.length); + } + } catch (SQLException e) { + acceptIOCause(e); + } finally { + if (copyIn.isActive()) { + try { + copyIn.cancelCopy(); + fail("cancelCopy should have thrown an exception"); + } catch (SQLException e) { + acceptIOCause(e); + } + } + } + + // Now we'll execute rollback on another thread so that if the + // deadlock _does_ occur the case doesn't just hange forever. + Rollback rollback = new Rollback(con); + rollback.start(); + rollback.join(1000); + if (rollback.isAlive()) { + fail("rollback did not terminate"); + } + SQLException rollbackException = rollback.exception(); + if (rollbackException == null) { + fail("rollback should have thrown an exception"); + } + acceptIOCause(rollbackException); + } + + private static class Rollback extends Thread { + private final Connection con; + private SQLException rollbackException; + + Rollback(Connection con) { + setName("Asynchronous rollback"); + setDaemon(true); + this.con = con; + } + + @Override + public void run() { + try { + con.rollback(); + } catch (SQLException e) { + rollbackException = e; + } + } + + public SQLException exception() { + return rollbackException; + } + } + + private void acceptIOCause(SQLException e) throws SQLException { + if (e.getSQLState().equals(PSQLState.CONNECTION_FAILURE.getState()) + || e.getSQLState().equals(PSQLState.CONNECTION_DOES_NOT_EXIST.getState())) { + // The test expects network exception, so CONNECTION_FAILURE looks good + return; + } + if (!(e.getCause() instanceof IOException)) { + throw e; + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CursorFetchTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CursorFetchTest.java new file mode 100644 index 0000000..794e54d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CursorFetchTest.java @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; + +/* + * Tests for using non-zero setFetchSize(). + */ +@RunWith(Parameterized.class) +public class CursorFetchTest extends BaseTest4 { + + public CursorFetchTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "test_fetch", "value integer"); + con.setAutoCommit(false); + } + + @Override + public void tearDown() throws SQLException { + if (!con.getAutoCommit()) { + con.rollback(); + } + + con.setAutoCommit(true); + TestUtil.dropTable(con, "test_fetch"); + super.tearDown(); + } + + protected void createRows(int count) throws Exception { + PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value) values(?)"); + for (int i = 0; i < count; i++) { + stmt.setInt(1, i); + stmt.executeUpdate(); + } + } + + // Test various fetchsizes. + @Test + public void testBasicFetch() throws Exception { + createRows(100); + + PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value"); + int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101}; + for (int testSize : testSizes) { + stmt.setFetchSize(testSize); + assertEquals(testSize, stmt.getFetchSize()); + + ResultSet rs = stmt.executeQuery(); + assertEquals(testSize, rs.getFetchSize()); + + int count = 0; + while (rs.next()) { + assertEquals("query value error with fetch size " + testSize, count, rs.getInt(1)); + ++count; + } + + assertEquals("total query size error with fetch size " + testSize, 100, count); + } + } + + // Similar, but for scrollable resultsets. + @Test + public void testScrollableFetch() throws Exception { + createRows(100); + + PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value", + ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + + int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101}; + for (int testSize : testSizes) { + stmt.setFetchSize(testSize); + assertEquals(testSize, stmt.getFetchSize()); + + ResultSet rs = stmt.executeQuery(); + assertEquals(testSize, rs.getFetchSize()); + + for (int j = 0; j <= 50; j++) { + assertTrue("ran out of rows at position " + j + " with fetch size " + testSize, rs.next()); + assertEquals("query value error with fetch size " + testSize, j, rs.getInt(1)); + } + + int position = 50; + for (int j = 1; j < 100; j++) { + for (int k = 0; k < j; k++) { + if (j % 2 == 0) { + ++position; + assertTrue("ran out of rows doing a forward fetch on iteration " + j + "/" + k + + " at position " + position + " with fetch size " + testSize, rs.next()); + } else { + --position; + assertTrue( + "ran out of rows doing a reverse fetch on iteration " + j + "/" + k + + " at position " + position + " with fetch size " + testSize, + rs.previous()); + } + + assertEquals( + "query value error on iteration " + j + "/" + k + " with fetch size " + testSize, + position, rs.getInt(1)); + } + } + } + } + + @Test + public void testScrollableAbsoluteFetch() throws Exception { + createRows(100); + + PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value", + ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + + int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101}; + for (int testSize : testSizes) { + stmt.setFetchSize(testSize); + assertEquals(testSize, stmt.getFetchSize()); + + ResultSet rs = stmt.executeQuery(); + assertEquals(testSize, rs.getFetchSize()); + + int position = 50; + assertTrue("ran out of rows doing an absolute fetch at " + position + " with fetch size " + + testSize, rs.absolute(position + 1)); + assertEquals("query value error with fetch size " + testSize, position, rs.getInt(1)); + + for (int j = 1; j < 100; j++) { + if (j % 2 == 0) { + position += j; + } else { + position -= j; + } + + assertTrue("ran out of rows doing an absolute fetch at " + position + " on iteration " + j + + " with fetchsize" + testSize, rs.absolute(position + 1)); + assertEquals("query value error with fetch size " + testSize, position, rs.getInt(1)); + } + } + } + + // + // Tests for ResultSet.setFetchSize(). + // + + // test one: + // -set fetchsize = 0 + // -run query (all rows should be fetched) + // -set fetchsize = 50 (should have no effect) + // -process results + @Test + public void testResultSetFetchSizeOne() throws Exception { + createRows(100); + + PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value"); + stmt.setFetchSize(0); + ResultSet rs = stmt.executeQuery(); + rs.setFetchSize(50); // Should have no effect. + + int count = 0; + while (rs.next()) { + assertEquals(count, rs.getInt(1)); + ++count; + } + + assertEquals(100, count); + } + + // test two: + // -set fetchsize = 25 + // -run query (25 rows fetched) + // -set fetchsize = 0 + // -process results: + // --process 25 rows + // --should do a FETCH ALL to get more data + // --process 75 rows + @Test + public void testResultSetFetchSizeTwo() throws Exception { + createRows(100); + + PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value"); + stmt.setFetchSize(25); + ResultSet rs = stmt.executeQuery(); + rs.setFetchSize(0); + + int count = 0; + while (rs.next()) { + assertEquals(count, rs.getInt(1)); + ++count; + } + + assertEquals(100, count); + } + + // test three: + // -set fetchsize = 25 + // -run query (25 rows fetched) + // -set fetchsize = 50 + // -process results: + // --process 25 rows. should NOT hit end-of-results here. + // --do a FETCH FORWARD 50 + // --process 50 rows + // --do a FETCH FORWARD 50 + // --process 25 rows. end of results. + @Test + public void testResultSetFetchSizeThree() throws Exception { + createRows(100); + + PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value"); + stmt.setFetchSize(25); + ResultSet rs = stmt.executeQuery(); + rs.setFetchSize(50); + + int count = 0; + while (rs.next()) { + assertEquals(count, rs.getInt(1)); + ++count; + } + + assertEquals(100, count); + } + + // test four: + // -set fetchsize = 50 + // -run query (50 rows fetched) + // -set fetchsize = 25 + // -process results: + // --process 50 rows. + // --do a FETCH FORWARD 25 + // --process 25 rows + // --do a FETCH FORWARD 25 + // --process 25 rows. end of results. + @Test + public void testResultSetFetchSizeFour() throws Exception { + createRows(100); + + PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value"); + stmt.setFetchSize(50); + ResultSet rs = stmt.executeQuery(); + rs.setFetchSize(25); + + int count = 0; + while (rs.next()) { + assertEquals(count, rs.getInt(1)); + ++count; + } + + assertEquals(100, count); + } + + @Test + public void testSingleRowResultPositioning() throws Exception { + String msg; + createRows(1); + + int[] sizes = {0, 1, 10}; + for (int size : sizes) { + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + stmt.setFetchSize(size); + + // Create a one row result set. + ResultSet rs = stmt.executeQuery("select * from test_fetch order by value"); + + msg = "before-first row positioning error with fetchsize=" + size; + assertTrue(msg, rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + msg = "row 1 positioning error with fetchsize=" + size; + assertTrue(msg, rs.next()); + + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, rs.isFirst()); + assertTrue(msg, rs.isLast()); + assertEquals(msg, 0, rs.getInt(1)); + + msg = "after-last row positioning error with fetchsize=" + size; + assertTrue(msg, !rs.next()); + + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + rs.close(); + stmt.close(); + } + } + + @Test + public void testMultiRowResultPositioning() throws Exception { + String msg; + + createRows(100); + + int[] sizes = {0, 1, 10, 100}; + for (int size : sizes) { + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + stmt.setFetchSize(size); + + ResultSet rs = stmt.executeQuery("select * from test_fetch order by value"); + msg = "before-first row positioning error with fetchsize=" + size; + assertTrue(msg, rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + for (int j = 0; j < 100; j++) { + msg = "row " + j + " positioning error with fetchsize=" + size; + assertTrue(msg, rs.next()); + assertEquals(msg, j, rs.getInt(1)); + + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + if (j == 0) { + assertTrue(msg, rs.isFirst()); + } else { + assertTrue(msg, !rs.isFirst()); + } + + if (j == 99) { + assertTrue(msg, rs.isLast()); + } else { + assertTrue(msg, !rs.isLast()); + } + } + + msg = "after-last row positioning error with fetchsize=" + size; + assertTrue(msg, !rs.next()); + + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + rs.close(); + stmt.close(); + } + } + + // Test odd queries that should not be transformed into cursor-based fetches. + @Test + public void testInsert() throws Exception { + // INSERT should not be transformed. + PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value) values(1)"); + stmt.setFetchSize(100); // Should be meaningless. + stmt.executeUpdate(); + } + + @Test + public void testMultistatement() throws Exception { + // Queries with multiple statements should not be transformed. + + createRows(100); // 0 .. 99 + PreparedStatement stmt = con.prepareStatement( + "insert into test_fetch(value) values(100); select * from test_fetch order by value"); + stmt.setFetchSize(10); + + assertTrue(!stmt.execute()); // INSERT + assertTrue(stmt.getMoreResults()); // SELECT + ResultSet rs = stmt.getResultSet(); + int count = 0; + while (rs.next()) { + assertEquals(count, rs.getInt(1)); + ++count; + } + + assertEquals(101, count); + } + + // if the driver tries to use a cursor with autocommit on + // it will fail because the cursor will disappear partway + // through execution + @Test + public void testNoCursorWithAutoCommit() throws Exception { + createRows(10); // 0 .. 9 + con.setAutoCommit(true); + Statement stmt = con.createStatement(); + stmt.setFetchSize(3); + ResultSet rs = stmt.executeQuery("SELECT * FROM test_fetch ORDER BY value"); + int count = 0; + while (rs.next()) { + assertEquals(count++, rs.getInt(1)); + } + + assertEquals(10, count); + } + + @Test + public void testGetRow() throws SQLException { + Statement stmt = con.createStatement(); + stmt.setFetchSize(1); + ResultSet rs = stmt.executeQuery("SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3"); + int count = 0; + while (rs.next()) { + count++; + assertEquals(count, rs.getInt(1)); + assertEquals(count, rs.getRow()); + } + assertEquals(3, count); + } + + // isLast() may change the results of other positioning methods as it has to + // buffer some more results. This tests avoid using it so as to test robustness + // other positioning methods + @Test + public void testRowResultPositioningWithoutIsLast() throws Exception { + String msg; + + int rowCount = 4; + createRows(rowCount); + + int[] sizes = {1, 2, 3, 4, 5}; + for (int size : sizes) { + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + stmt.setFetchSize(size); + + ResultSet rs = stmt.executeQuery("select * from test_fetch order by value"); + msg = "before-first row positioning error with fetchsize=" + size; + assertTrue(msg, rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + + for (int j = 0; j < rowCount; j++) { + msg = "row " + j + " positioning error with fetchsize=" + size; + assertTrue(msg, rs.next()); + assertEquals(msg, j, rs.getInt(1)); + + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + if (j == 0) { + assertTrue(msg, rs.isFirst()); + } else { + assertTrue(msg, !rs.isFirst()); + } + } + + msg = "after-last row positioning error with fetchsize=" + size; + assertTrue(msg, !rs.next()); + + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + rs.close(); + stmt.close(); + } + } + + // Empty resultsets require all row positioning methods to return false + @Test + public void testNoRowResultPositioning() throws Exception { + int[] sizes = {0, 1, 50, 100}; + for (int size : sizes) { + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + stmt.setFetchSize(size); + + ResultSet rs = stmt.executeQuery("select * from test_fetch order by value"); + String msg = "no row (empty resultset) positioning error with fetchsize=" + size; + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + assertTrue(msg, !rs.next()); + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + rs.close(); + stmt.close(); + } + } + + // Empty resultsets require all row positioning methods to return false + @Test + public void testScrollableNoRowResultPositioning() throws Exception { + int[] sizes = {0, 1, 50, 100}; + for (int size : sizes) { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + stmt.setFetchSize(size); + + ResultSet rs = stmt.executeQuery("select * from test_fetch order by value"); + String msg = "no row (empty resultset) positioning error with fetchsize=" + size; + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + assertTrue(msg, !rs.first()); + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + assertTrue(msg, !rs.next()); + assertTrue(msg, !rs.isBeforeFirst()); + assertTrue(msg, !rs.isAfterLast()); + assertTrue(msg, !rs.isFirst()); + assertTrue(msg, !rs.isLast()); + + rs.close(); + stmt.close(); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CustomTypeWithBinaryTransferTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CustomTypeWithBinaryTransferTest.java new file mode 100644 index 0000000..a0900ae --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CustomTypeWithBinaryTransferTest.java @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.postgresql.PGConnection; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.Oid; +import org.postgresql.core.QueryExecutor; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGBinaryObject; +import org.postgresql.util.PGobject; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; + +/** + * TestCase to test handling of binary types for custom objects. + */ +@RunWith(Parameterized.class) +public class CustomTypeWithBinaryTransferTest extends BaseTest4 { + // define an oid of a binary type for testing, POINT is used here as it already exists in the + // database and requires no complex own type definition + private static final int CUSTOM_TYPE_OID = Oid.POINT; + + public CustomTypeWithBinaryTransferTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + /** + * Set up the fixture for this testcase: the tables for this test. + * + * @throws SQLException if a database error occurs + */ + @BeforeClass + public static void createTestTable() throws SQLException { + try (Connection con = TestUtil.openDB()) { + TestUtil.createTable(con, "test_binary_pgobject", "id integer,name text,geom point"); + } + } + + /** + * Tear down the fixture for this test case. + * + * @throws SQLException if a database error occurs + */ + @AfterClass + public static void dropTestTable() throws SQLException { + try (Connection con = TestUtil.openDB()) { + TestUtil.dropTable(con, "test_binary_pgobject"); + } + } + + @Override + public void setUp() throws Exception { + super.setUp(); + QueryExecutor queryExecutor = con.unwrap(BaseConnection.class).getQueryExecutor(); + queryExecutor.removeBinarySendOid(CUSTOM_TYPE_OID); + queryExecutor.removeBinaryReceiveOid(CUSTOM_TYPE_OID); + assertBinaryForReceive(CUSTOM_TYPE_OID, false, + () -> "Binary transfer for point type should be disabled since we've deactivated it in " + + "updateProperties"); + + assertBinaryForSend(CUSTOM_TYPE_OID, false, + () -> "Binary transfer for point type should be disabled since we've deactivated it in " + + "updateProperties"); + try (Statement st = con.createStatement()) { + st.execute("DELETE FROM test_binary_pgobject"); + st.execute("INSERT INTO test_binary_pgobject(id,name,geom) values(1,'Test',Point(1,2))"); + } + } + + /** + * Make sure custom binary types are handled automatically. + * + * @throws SQLException if a database error occurs + */ + @Test + public void testCustomBinaryTypes() throws SQLException { + PGConnection pgconn = con.unwrap(PGConnection.class); + + // make sure the test type implements PGBinaryObject + assertTrue("test type should implement PGBinaryObject", + PGBinaryObject.class.isAssignableFrom(TestCustomType.class)); + + // now define a custom type, which will add it to the binary sent/received OIDs (if the type + // implements PGBinaryObject) + pgconn.addDataType("point", TestCustomType.class); + // check if the type was marked for binary transfer + if (preferQueryMode != PreferQueryMode.SIMPLE) { + assertBinaryForReceive(CUSTOM_TYPE_OID, true, + () -> "Binary transfer for point type should be activated by addDataType(..., " + + "TestCustomType.class)"); + assertBinaryForSend(CUSTOM_TYPE_OID, true, + () -> "Binary transfer for point type should be activated by addDataType(..., " + + "TestCustomType.class)"); + } + + TestCustomType co; + // Try with PreparedStatement + try (PreparedStatement pst = con.prepareStatement("SELECT geom FROM test_binary_pgobject WHERE id=?")) { + pst.setInt(1, 1); + try (ResultSet rs = pst.executeQuery()) { + assertTrue("rs.next()", rs.next()); + Object o = rs.getObject(1); + co = (TestCustomType) o; + // now binary transfer should be working + if (preferQueryMode == PreferQueryMode.SIMPLE) { + assertEquals( + "reading via prepared statement: TestCustomType.wasReadBinary() should use text encoding since preferQueryMode=SIMPLE", + "text", + co.wasReadBinary() ? "binary" : "text"); + } else { + assertEquals( + "reading via prepared statement: TestCustomType.wasReadBinary() should use match binary mode requested by the test", + binaryMode == BinaryMode.FORCE ? "binary" : "text", + co.wasReadBinary() ? "binary" : "text"); + } + } + } + + // ensure flag is still unset + assertFalse("wasWrittenBinary should be false since we have not written the object yet", + co.wasWrittenBinary()); + // now try to write it + try (PreparedStatement pst = + con.prepareStatement("INSERT INTO test_binary_pgobject(id,geom) VALUES(?,?)")) { + pst.setInt(1, 2); + pst.setObject(2, co); + pst.executeUpdate(); + // make sure transfer was binary + if (preferQueryMode == PreferQueryMode.SIMPLE) { + assertEquals( + "writing via prepared statement: TestCustomType.wasWrittenBinary() should use text encoding since preferQueryMode=SIMPLE", + "text", + co.wasWrittenBinary() ? "binary" : "text"); + } else { + assertEquals( + "writing via prepared statement: TestCustomType.wasWrittenBinary() should use match binary mode requested by the test", + binaryMode == BinaryMode.FORCE ? "binary" : "text", + co.wasWrittenBinary() ? "binary" : "text"); + } + } + } + + /** + * Custom type that supports binary format. + */ + @SuppressWarnings("serial") + public static class TestCustomType extends PGobject implements PGBinaryObject { + private byte[] byteValue; + private boolean wasReadBinary; + private boolean wasWrittenBinary; + + @Override + public String getValue() { + // set flag + this.wasWrittenBinary = false; + return super.getValue(); + } + + @Override + public int lengthInBytes() { + if (byteValue != null) { + return byteValue.length; + } else { + return 0; + } + } + + @Override + public void setByteValue(byte[] value, int offset) throws SQLException { + this.wasReadBinary = true; + // remember the byte value + byteValue = new byte[value.length - offset]; + System.arraycopy(value, offset, byteValue, 0, byteValue.length); + } + + @Override + public void setValue(String value) throws SQLException { + super.setValue(value); + // set flag + this.wasReadBinary = false; + } + + @Override + public void toBytes(byte[] bytes, int offset) { + if (byteValue != null) { + // make sure array is large enough + if ((bytes.length - offset) <= byteValue.length) { + // copy data + System.arraycopy(byteValue, 0, bytes, offset, byteValue.length); + } else { + throw new IllegalArgumentException( + "byte array is too small, expected: " + byteValue.length + " got: " + + (bytes.length - offset)); + } + } else { + throw new IllegalStateException("no geometry has been set"); + } + // set flag + this.wasWrittenBinary = true; + } + + /** + * Checks, if this type was read in binary mode. + * + * @return true for binary mode, else false + */ + public boolean wasReadBinary() { + return this.wasReadBinary; + } + + /** + * Checks, if this type was written in binary mode. + * + * @return true for binary mode, else false + */ + public boolean wasWrittenBinary() { + return this.wasWrittenBinary; + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseEncodingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseEncodingTest.java new file mode 100644 index 0000000..c070936 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseEncodingTest.java @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.Encoding; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.Arrays; + +/* +* Test case for various encoding problems. +* +* Ensure that we can do a round-trip of all server-supported unicode values without trashing them, +* and that bad encodings are detected. +*/ +class DatabaseEncodingTest { + private static final int STEP = 100; + + private Connection con; + + // Set up the fixture for this testcase: a connection to a database with + // a table for this test. + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + TestUtil.createTempTable(con, "testdbencoding", + "unicode_ordinal integer primary key not null, unicode_string varchar(" + STEP + ")"); + // disabling auto commit makes the test run faster + // by not committing each insert individually. + con.setAutoCommit(false); + } + + // Tear down the fixture for this test case. + @AfterEach + void tearDown() throws Exception { + con.setAutoCommit(true); + TestUtil.closeDB(con); + } + + private static String dumpString(String s) { + StringBuffer sb = new StringBuffer(s.length() * 6); + for (int i = 0; i < s.length(); i++) { + sb.append("\\u"); + char c = s.charAt(i); + sb.append(Integer.toHexString((c >> 12) & 15)); + sb.append(Integer.toHexString((c >> 8) & 15)); + sb.append(Integer.toHexString((c >> 4) & 15)); + sb.append(Integer.toHexString(c & 15)); + } + return sb.toString(); + } + + @Test + void encoding() throws Exception { + String databaseEncoding = TestUtil.queryForString(con, "SELECT getdatabaseencoding()"); + Assumptions.assumeTrue("UTF8".equals(databaseEncoding), "Database encoding must be UTF8"); + + boolean testHighUnicode = true; + + // Create data. + // NB: we avoid d800-dfff as those are reserved for surrogates in UTF-16 + PreparedStatement insert = con.prepareStatement( + "INSERT INTO testdbencoding(unicode_ordinal, unicode_string) VALUES (?,?)"); + for (int i = 1; i < 0xd800; i += STEP) { + int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP; + char[] testChars = new char[count]; + for (int j = 0; j < count; j++) { + testChars[j] = (char) (i + j); + } + + String testString = new String(testChars); + + insert.setInt(1, i); + insert.setString(2, testString); + assertEquals(1, insert.executeUpdate()); + } + + for (int i = 0xe000; i < 0x10000; i += STEP) { + int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP; + char[] testChars = new char[count]; + for (int j = 0; j < count; j++) { + testChars[j] = (char) (i + j); + } + + String testString = new String(testChars); + + insert.setInt(1, i); + insert.setString(2, testString); + assertEquals(1, insert.executeUpdate()); + } + + if (testHighUnicode) { + for (int i = 0x10000; i < 0x110000; i += STEP) { + int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP; + char[] testChars = new char[count * 2]; + for (int j = 0; j < count; j++) { + testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10)); + testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff)); + } + + String testString = new String(testChars); + + insert.setInt(1, i); + insert.setString(2, testString); + + // System.err.println("Inserting: " + dumpString(testString)); + + assertEquals(1, insert.executeUpdate()); + } + } + + con.commit(); + + // Check data. + Statement stmt = con.createStatement(); + stmt.setFetchSize(1); + ResultSet rs = stmt.executeQuery( + "SELECT unicode_ordinal, unicode_string FROM testdbencoding ORDER BY unicode_ordinal"); + for (int i = 1; i < 0xd800; i += STEP) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + + int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP; + char[] testChars = new char[count]; + for (int j = 0; j < count; j++) { + testChars[j] = (char) (i + j); + } + + String testString = new String(testChars); + + assertEquals(dumpString(testString), + dumpString(rs.getString(2)), + "Test string: " + dumpString(testString)); + } + + for (int i = 0xe000; i < 0x10000; i += STEP) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + + int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP; + char[] testChars = new char[count]; + for (int j = 0; j < count; j++) { + testChars[j] = (char) (i + j); + } + + String testString = new String(testChars); + + assertEquals(dumpString(testString), + dumpString(rs.getString(2)), + "Test string: " + dumpString(testString)); + } + + if (testHighUnicode) { + for (int i = 0x10000; i < 0x110000; i += STEP) { + assertTrue(rs.next()); + assertEquals(i, rs.getInt(1)); + + int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP; + char[] testChars = new char[count * 2]; + for (int j = 0; j < count; j++) { + testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10)); + testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff)); + } + + String testString = new String(testChars); + + assertEquals(dumpString(testString), + dumpString(rs.getString(2)), + "Test string: " + dumpString(testString)); + } + } + } + + @Test + void uTF8Decode() throws Exception { + // Tests for our custom UTF-8 decoder. + + Encoding utf8Encoding = Encoding.getJVMEncoding("UTF-8"); + + for (int ch = 0; ch < 0x110000; ch++) { + if (ch >= 0xd800 && ch < 0xe000) { + continue; // Surrogate range. + } + + String testString; + if (ch >= 0x10000) { + testString = new String(new char[]{(char) (0xd800 + ((ch - 0x10000) >> 10)), + (char) (0xdc00 + ((ch - 0x10000) & 0x3ff))}); + } else { + testString = new String(new char[]{(char) ch}); + } + + byte[] jvmEncoding = testString.getBytes("UTF-8"); + String jvmDecoding = new String(jvmEncoding, 0, jvmEncoding.length, "UTF-8"); + String ourDecoding = utf8Encoding.decode(jvmEncoding, 0, jvmEncoding.length); + + assertEquals(dumpString(testString), + dumpString(jvmDecoding), + "Test string: " + dumpString(testString)); + assertEquals(dumpString(testString), + dumpString(ourDecoding), + "Test string: " + dumpString(testString)); + } + } + + /** + * Tests that invalid utf-8 values are replaced with the unicode replacement chart. + */ + @Test + void truncatedUTF8Decode() throws Exception { + Encoding utf8Encoding = Encoding.getJVMEncoding("UTF-8"); + + byte[][] shortSequences = new byte[][]{{(byte) 0xc0}, // Second byte must be present + + {(byte) 0xe0}, // Second byte must be present + {(byte) 0xe0, (byte) 0x80}, // Third byte must be present + + {(byte) 0xf0}, // Second byte must be present + {(byte) 0xf0, (byte) 0x80}, // Third byte must be present + {(byte) 0xf0, (byte) 0x80, (byte) 0x80}, // Fourth byte must be present + }; + + byte[] paddedSequence = new byte[32]; + for (int i = 0; i < shortSequences.length; i++) { + byte[] sequence = shortSequences[i]; + String expected = "\uFFFD"; + for (int j = 1; j < sequence.length; j++) { + expected += "\uFFFD"; + } + + String str = utf8Encoding.decode(sequence, 0, sequence.length); + assertEquals(expected, str, "itr:" + i); + + // Try it with padding and a truncated length. + Arrays.fill(paddedSequence, (byte) 0); + System.arraycopy(sequence, 0, paddedSequence, 0, sequence.length); + + str = utf8Encoding.decode(paddedSequence, 0, sequence.length); + assertEquals(expected, str, "itr:" + i); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataCacheTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataCacheTest.java new file mode 100644 index 0000000..55814a3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataCacheTest.java @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.core.TypeInfo; +import org.postgresql.jdbc.PgConnection; +import org.postgresql.test.TestUtil; +import org.postgresql.util.TestLogHandler; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.LogManager; +import java.util.logging.LogRecord; +import java.util.logging.Logger; +import java.util.regex.Pattern; + +/* +* Tests for caching of DatabaseMetadata +* +*/ +class DatabaseMetaDataCacheTest { + private PgConnection con; + private TestLogHandler log; + private Logger driverLogger; + private Level driverLogLevel; + + private static final Pattern SQL_TYPE_QUERY_LOG_FILTER = Pattern.compile("querying SQL typecode for pg type"); + private static final Pattern SQL_TYPE_CACHE_LOG_FILTER = Pattern.compile("caching all SQL typecodes"); + + @BeforeEach + void setUp() throws Exception { + con = (PgConnection) TestUtil.openDB(); + log = new TestLogHandler(); + driverLogger = LogManager.getLogManager().getLogger("org.postgresql"); + driverLogger.addHandler(log); + driverLogLevel = driverLogger.getLevel(); + driverLogger.setLevel(Level.ALL); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.closeDB(con); + driverLogger.removeHandler(log); + driverLogger.setLevel(driverLogLevel); + log = null; + } + + @Test + void getSQLTypeQueryCache() throws SQLException { + TypeInfo ti = con.getTypeInfo(); + + List typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER); + assertEquals(0, typeQueries.size()); + + ti.getSQLType("xid"); // this must be a type not in the hardcoded 'types' list + typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER); + assertEquals(1, typeQueries.size()); + + ti.getSQLType("xid"); // this time it should be retrieved from the cache + typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER); + assertEquals(1, typeQueries.size()); + } + + @Test + void getTypeInfoUsesCache() throws SQLException { + con.getMetaData().getTypeInfo(); + + List typeCacheQuery = log.getRecordsMatching(SQL_TYPE_CACHE_LOG_FILTER); + assertEquals(1, typeCacheQuery.size(), "PgDatabaseMetadata.getTypeInfo() did not cache SQL typecodes"); + + List typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER); + assertEquals(0, typeQueries.size(), "PgDatabaseMetadata.getTypeInfo() resulted in individual queries for SQL typecodes"); + } + + @Test + void typeForAlias() { + TypeInfo ti = con.getTypeInfo(); + assertEquals("bool", ti.getTypeForAlias("boolean")); + assertEquals("bool", ti.getTypeForAlias("Boolean")); + assertEquals("bool", ti.getTypeForAlias("Bool")); + assertEquals("bogus", ti.getTypeForAlias("bogus")); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataPropertiesTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataPropertiesTest.java new file mode 100644 index 0000000..ebb44a8 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataPropertiesTest.java @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.Driver; +import org.postgresql.PGConnection; +import org.postgresql.test.TestUtil; +import org.postgresql.util.DriverInfo; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.SQLException; + +/* +* TestCase to test the internal functionality of org.postgresql.jdbc2.DatabaseMetaData's various +* properties. Methods which return a ResultSet are tested elsewhere. This avoids a complicated +* setUp/tearDown for something like assertTrue(dbmd.nullPlusNonNullIsNull()); +*/ +class DatabaseMetaDataPropertiesTest { + private Connection con; + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.closeDB(con); + } + + /* + * The spec says this may return null, but we always do! + */ + @Test + void getMetaData() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + } + + /* + * Test default capabilities + */ + @Test + void capabilities() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertTrue(dbmd.allProceduresAreCallable()); + assertTrue(dbmd.allTablesAreSelectable()); // not true all the time + + // This should always be false for postgresql (at least for 7.x) + assertFalse(dbmd.isReadOnly()); + + // we support multiple resultsets via multiple statements in one execute() now + assertTrue(dbmd.supportsMultipleResultSets()); + + // yes, as multiple backends can have transactions open + assertTrue(dbmd.supportsMultipleTransactions()); + + assertTrue(dbmd.supportsMinimumSQLGrammar()); + assertFalse(dbmd.supportsCoreSQLGrammar()); + assertFalse(dbmd.supportsExtendedSQLGrammar()); + assertTrue(dbmd.supportsANSI92EntryLevelSQL()); + assertFalse(dbmd.supportsANSI92IntermediateSQL()); + assertFalse(dbmd.supportsANSI92FullSQL()); + + assertTrue(dbmd.supportsIntegrityEnhancementFacility()); + + } + + @Test + void joins() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertTrue(dbmd.supportsOuterJoins()); + assertTrue(dbmd.supportsFullOuterJoins()); + assertTrue(dbmd.supportsLimitedOuterJoins()); + } + + @Test + void cursors() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertFalse(dbmd.supportsPositionedDelete()); + assertFalse(dbmd.supportsPositionedUpdate()); + } + + @Test + void values() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + int indexMaxKeys = dbmd.getMaxColumnsInIndex(); + assertEquals(32, indexMaxKeys); + } + + @Test + void nulls() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertFalse(dbmd.nullsAreSortedAtStart()); + assertFalse(dbmd.nullsAreSortedAtEnd()); + assertTrue(dbmd.nullsAreSortedHigh()); + assertFalse(dbmd.nullsAreSortedLow()); + + assertTrue(dbmd.nullPlusNonNullIsNull()); + + assertTrue(dbmd.supportsNonNullableColumns()); + } + + @Test + void localFiles() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertFalse(dbmd.usesLocalFilePerTable()); + assertFalse(dbmd.usesLocalFiles()); + } + + @Test + void identifiers() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertFalse(dbmd.supportsMixedCaseIdentifiers()); + assertTrue(dbmd.supportsMixedCaseQuotedIdentifiers()); + + assertFalse(dbmd.storesUpperCaseIdentifiers()); + assertTrue(dbmd.storesLowerCaseIdentifiers()); + assertFalse(dbmd.storesUpperCaseQuotedIdentifiers()); + assertFalse(dbmd.storesLowerCaseQuotedIdentifiers()); + assertFalse(dbmd.storesMixedCaseQuotedIdentifiers()); + + assertEquals("\"", dbmd.getIdentifierQuoteString()); + + } + + @Test + void tables() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + // we can add columns + assertTrue(dbmd.supportsAlterTableWithAddColumn()); + + // we can only drop columns in >= 7.3 + assertTrue(dbmd.supportsAlterTableWithDropColumn()); + } + + @Test + void select() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + // yes we can?: SELECT col a FROM a; + assertTrue(dbmd.supportsColumnAliasing()); + + // yes we can have expressions in ORDERBY + assertTrue(dbmd.supportsExpressionsInOrderBy()); + + // Yes, an ORDER BY clause can contain columns that are not in the + // SELECT clause. + assertTrue(dbmd.supportsOrderByUnrelated()); + + assertTrue(dbmd.supportsGroupBy()); + assertTrue(dbmd.supportsGroupByUnrelated()); + assertTrue(dbmd.supportsGroupByBeyondSelect()); // needs checking + } + + @Test + void dBParams() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertEquals(TestUtil.getURL(), dbmd.getURL()); + assertEquals(TestUtil.getUser(), dbmd.getUserName()); + } + + @Test + void dbProductDetails() throws SQLException { + assertTrue(con instanceof PGConnection); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertEquals("PostgreSQL", dbmd.getDatabaseProductName()); + assertTrue(dbmd.getDatabaseMajorVersion() >= 8); + assertTrue(dbmd.getDatabaseMinorVersion() >= 0); + assertTrue(dbmd.getDatabaseProductVersion().startsWith(String.valueOf(dbmd.getDatabaseMajorVersion()))); + } + + @Test + void driverVersioning() throws SQLException { + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + assertEquals("PostgreSQL JDBC Driver", dbmd.getDriverName()); + assertEquals(DriverInfo.DRIVER_VERSION, dbmd.getDriverVersion()); + assertEquals(new Driver().getMajorVersion(), dbmd.getDriverMajorVersion()); + assertEquals(new Driver().getMinorVersion(), dbmd.getDriverMinorVersion()); + assertTrue(dbmd.getJDBCMajorVersion() >= 4); + assertTrue(dbmd.getJDBCMinorVersion() >= 0); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTest.java new file mode 100644 index 0000000..5c16fae --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTest.java @@ -0,0 +1,1817 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.hamcrest.CoreMatchers.allOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4.BinaryMode; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; + +/* + * TestCase to test the internal functionality of org.postgresql.jdbc2.DatabaseMetaData + * + */ +public class DatabaseMetaDataTest { + private Connection con; + private BinaryMode binaryMode; + + public void initDatabaseMetaDataTest(BinaryMode binaryMode) { + this.binaryMode = binaryMode; + } + + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @BeforeEach + void setUp() throws Exception { + if (binaryMode == BinaryMode.FORCE) { + final Properties props = new Properties(); + PGProperty.PREPARE_THRESHOLD.set(props, -1); + con = TestUtil.openDB(props); + } else { + con = TestUtil.openDB(); + } + TestUtil.createTable(con, "metadatatest", + "id int4, name text, updated timestamptz, colour text, quest text"); + TestUtil.createTable(con, "precision_test", "implicit_precision numeric"); + TestUtil.dropSequence(con, "sercoltest_b_seq"); + TestUtil.dropSequence(con, "sercoltest_c_seq"); + TestUtil.createTable(con, "sercoltest", "a int, b serial, c bigserial"); + TestUtil.createTable(con, "\"a\\\"", "a int4"); + TestUtil.createTable(con, "\"a'\"", "a int4"); + TestUtil.createTable(con, "arraytable", "a numeric(5,2)[], b varchar(100)[]"); + TestUtil.createTable(con, "intarraytable", "a int4[], b int4[][]"); + TestUtil.createView(con, "viewtest", "SELECT id, quest FROM metadatatest"); + TestUtil.dropType(con, "custom"); + TestUtil.dropType(con, "_custom"); + TestUtil.createCompositeType(con, "custom", "i int", false); + TestUtil.createCompositeType(con, "_custom", "f float", false); + + // create a table and multiple comments on it + TestUtil.createTable(con, "duplicate", "x text"); + TestUtil.execute(con, "comment on table duplicate is 'duplicate table'"); + TestUtil.execute(con, "create or replace function bar() returns integer language sql as $$ select 1 $$"); + TestUtil.execute(con, "comment on function bar() is 'bar function'"); + try (Connection conPriv = TestUtil.openPrivilegedDB()) { + TestUtil.execute(conPriv, "update pg_description set objoid = 'duplicate'::regclass where objoid = 'bar'::regproc"); + } + + // 8.2 does not support arrays of composite types + TestUtil.createTable(con, "customtable", "c1 custom, c2 _custom" + + (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3) ? ", c3 custom[], c4 _custom[]" : "")); + + Statement stmt = con.createStatement(); + // we add the following comments to ensure the joins to the comments + // are done correctly. This ensures we correctly test that case. + stmt.execute("comment on table metadatatest is 'this is a table comment'"); + stmt.execute("comment on column metadatatest.id is 'this is a column comment'"); + + stmt.execute( + "CREATE OR REPLACE FUNCTION f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL"); + stmt.execute( + "CREATE OR REPLACE FUNCTION f2(a int, b varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL"); + stmt.execute( + "CREATE OR REPLACE FUNCTION f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION f4(int) RETURNS metadatatest AS 'SELECT 1, ''a''::text, now(), ''c''::text, ''q''::text' LANGUAGE SQL"); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) { + // RETURNS TABLE requires PostgreSQL 8.4+ + stmt.execute( + "CREATE OR REPLACE FUNCTION f5() RETURNS TABLE (i int) LANGUAGE sql AS 'SELECT 1'"); + } + + // create a custom `&` operator, which caused failure with `&` usage in getIndexInfo() + stmt.execute( + "CREATE OR REPLACE FUNCTION f6(numeric, integer) returns integer as 'BEGIN return $1::integer & $2;END;' language plpgsql immutable;"); + stmt.execute("DROP OPERATOR IF EXISTS & (numeric, integer)"); + stmt.execute("CREATE OPERATOR & (LEFTARG = numeric, RIGHTARG = integer, PROCEDURE = f6)"); + + TestUtil.createDomain(con, "nndom", "int not null"); + TestUtil.createDomain(con, "varbit2", "varbit(3)"); + TestUtil.createDomain(con, "float83", "numeric(8,3)"); + + TestUtil.createTable(con, "domaintable", "id nndom, v varbit2, f float83"); + stmt.close(); + + if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v12) ) { + TestUtil.createTable(con, "employee", "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, hours_per_week decimal(3,2), rate_per_hour decimal(3,2), gross_pay decimal GENERATED ALWAYS AS (hours_per_week * rate_per_hour) STORED"); + } + } + + @AfterEach + void tearDown() throws Exception { + // Drop function first because it depends on the + // metadatatest table's type + Statement stmt = con.createStatement(); + stmt.execute("DROP FUNCTION f4(int)"); + TestUtil.execute(con, "drop function bar()"); + TestUtil.dropTable(con, "duplicate"); + + TestUtil.dropView(con, "viewtest"); + TestUtil.dropTable(con, "metadatatest"); + TestUtil.dropTable(con, "sercoltest"); + TestUtil.dropSequence(con, "sercoltest_b_seq"); + TestUtil.dropSequence(con, "sercoltest_c_seq"); + TestUtil.dropTable(con, "precision_test"); + TestUtil.dropTable(con, "\"a\\\""); + TestUtil.dropTable(con, "\"a'\""); + TestUtil.dropTable(con, "arraytable"); + TestUtil.dropTable(con, "intarraytable"); + TestUtil.dropTable(con, "customtable"); + TestUtil.dropType(con, "custom"); + TestUtil.dropType(con, "_custom"); + + stmt.execute("DROP FUNCTION f1(int, varchar)"); + stmt.execute("DROP FUNCTION f2(int, varchar)"); + stmt.execute("DROP FUNCTION f3(int, varchar)"); + stmt.execute("DROP OPERATOR IF EXISTS & (numeric, integer)"); + stmt.execute("DROP FUNCTION f6(numeric, integer)"); + TestUtil.dropTable(con, "domaintable"); + TestUtil.dropDomain(con, "nndom"); + TestUtil.dropDomain(con, "varbit2"); + TestUtil.dropDomain(con, "float83"); + + if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v12) ) { + TestUtil.dropTable(con, "employee"); + } + + TestUtil.closeDB(con); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void arrayTypeInfo(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns(null, null, "intarraytable", "a"); + assertTrue(rs.next()); + assertEquals("_int4", rs.getString("TYPE_NAME")); + con.createArrayOf("integer", new Integer[]{}); + TestUtil.closeQuietly(rs); + rs = dbmd.getColumns(null, null, "intarraytable", "a"); + assertTrue(rs.next()); + assertEquals("_int4", rs.getString("TYPE_NAME")); + TestUtil.closeQuietly(rs); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void arrayInt4DoubleDim(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns(null, null, "intarraytable", "b"); + assertTrue(rs.next()); + assertEquals("_int4", rs.getString("TYPE_NAME")); // even int4[][] is represented as _int4 + con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}}); + rs = dbmd.getColumns(null, null, "intarraytable", "b"); + assertTrue(rs.next()); + assertEquals("_int4", rs.getString("TYPE_NAME")); // even int4[][] is represented as _int4 + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void customArrayTypeInfo(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet res = dbmd.getColumns(null, null, "customtable", null); + assertTrue(res.next()); + assertEquals("custom", res.getString("TYPE_NAME")); + assertTrue(res.next()); + assertEquals("_custom", res.getString("TYPE_NAME")); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) { + assertTrue(res.next()); + assertEquals("__custom", res.getString("TYPE_NAME")); + assertTrue(res.next()); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v16)) { + assertEquals("__custom_1", res.getString("TYPE_NAME")); + } else { + assertEquals("___custom", res.getString("TYPE_NAME")); + } + } + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) { + con.createArrayOf("custom", new Object[]{}); + res = dbmd.getColumns(null, null, "customtable", null); + assertTrue(res.next()); + assertEquals("custom", res.getString("TYPE_NAME")); + assertTrue(res.next()); + assertEquals("_custom", res.getString("TYPE_NAME")); + assertTrue(res.next()); + assertEquals("__custom", res.getString("TYPE_NAME")); + assertTrue(res.next()); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v16)) { + assertEquals("__custom_1", res.getString("TYPE_NAME")); + } else { + assertEquals("___custom", res.getString("TYPE_NAME")); + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void tables(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + ResultSet rs = dbmd.getTables(null, null, "metadatates%", new String[]{"TABLE"}); + assertTrue(rs.next()); + String tableName = rs.getString("TABLE_NAME"); + assertEquals("metadatatest", tableName); + String tableType = rs.getString("TABLE_TYPE"); + assertEquals("TABLE", tableType); + assertEquals(5, rs.findColumn("REMARKS")); + assertEquals(6, rs.findColumn("TYPE_CAT")); + assertEquals(7, rs.findColumn("TYPE_SCHEM")); + assertEquals(8, rs.findColumn("TYPE_NAME")); + assertEquals(9, rs.findColumn("SELF_REFERENCING_COL_NAME")); + assertEquals(10, rs.findColumn("REF_GENERATION")); + + // There should only be one row returned + assertFalse(rs.next(), "getTables() returned too many rows"); + + rs.close(); + + rs = dbmd.getColumns("", "", "meta%", "%"); + assertTrue(rs.next()); + assertEquals("metadatatest", rs.getString("TABLE_NAME")); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertEquals(Types.INTEGER, rs.getInt("DATA_TYPE")); + + assertTrue(rs.next()); + assertEquals("metadatatest", rs.getString("TABLE_NAME")); + assertEquals("name", rs.getString("COLUMN_NAME")); + assertEquals(Types.VARCHAR, rs.getInt("DATA_TYPE")); + + assertTrue(rs.next()); + assertEquals("metadatatest", rs.getString("TABLE_NAME")); + assertEquals("updated", rs.getString("COLUMN_NAME")); + assertEquals(Types.TIMESTAMP, rs.getInt("DATA_TYPE")); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void crossReference(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + Connection con1 = TestUtil.openDB(); + + TestUtil.createTable(con1, "vv", "a int not null, b int not null, constraint vv_pkey primary key ( a, b )"); + + TestUtil.createTable(con1, "ww", + "m int not null, n int not null, constraint m_pkey primary key ( m, n ), constraint ww_m_fkey foreign key ( m, n ) references vv ( a, b )"); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + ResultSet rs = dbmd.getCrossReference(null, "", "vv", null, "", "ww"); + String[] expectedPkColumnNames = new String[]{"a", "b"}; + String[] expectedFkColumnNames = new String[]{"m", "n"}; + int numRows = 0; + + for (int j = 1; rs.next(); j++) { + + String pkTableName = rs.getString("PKTABLE_NAME"); + assertEquals("vv", pkTableName); + + String pkColumnName = rs.getString("PKCOLUMN_NAME"); + assertEquals(expectedPkColumnNames[j - 1], pkColumnName); + + String fkTableName = rs.getString("FKTABLE_NAME"); + assertEquals("ww", fkTableName); + + String fkColumnName = rs.getString("FKCOLUMN_NAME"); + assertEquals(expectedFkColumnNames[j - 1], fkColumnName); + + String fkName = rs.getString("FK_NAME"); + assertEquals("ww_m_fkey", fkName); + + String pkName = rs.getString("PK_NAME"); + assertEquals("vv_pkey", pkName); + + int keySeq = rs.getInt("KEY_SEQ"); + assertEquals(j, keySeq); + numRows += 1; + } + assertEquals(2, numRows); + + TestUtil.dropTable(con1, "vv"); + TestUtil.dropTable(con1, "ww"); + TestUtil.closeDB(con1); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void foreignKeyActions(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + Connection conn = TestUtil.openDB(); + TestUtil.createTable(conn, "pkt", "id int primary key"); + TestUtil.createTable(conn, "fkt1", + "id int references pkt on update restrict on delete cascade"); + TestUtil.createTable(conn, "fkt2", + "id int references pkt on update set null on delete set default"); + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getImportedKeys(null, "", "fkt1"); + assertTrue(rs.next()); + assertEquals(DatabaseMetaData.importedKeyRestrict, rs.getInt("UPDATE_RULE")); + assertEquals(DatabaseMetaData.importedKeyCascade, rs.getInt("DELETE_RULE")); + rs.close(); + + rs = dbmd.getImportedKeys(null, "", "fkt2"); + assertTrue(rs.next()); + assertEquals(DatabaseMetaData.importedKeySetNull, rs.getInt("UPDATE_RULE")); + assertEquals(DatabaseMetaData.importedKeySetDefault, rs.getInt("DELETE_RULE")); + rs.close(); + + TestUtil.dropTable(conn, "fkt2"); + TestUtil.dropTable(conn, "fkt1"); + TestUtil.dropTable(conn, "pkt"); + TestUtil.closeDB(conn); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void foreignKeysToUniqueIndexes(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + Connection con1 = TestUtil.openDB(); + TestUtil.createTable(con1, "pkt", + "a int not null, b int not null, CONSTRAINT pkt_pk_a PRIMARY KEY (a), CONSTRAINT pkt_un_b UNIQUE (b)"); + TestUtil.createTable(con1, "fkt", + "c int, d int, CONSTRAINT fkt_fk_c FOREIGN KEY (c) REFERENCES pkt(b)"); + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getImportedKeys("", "", "fkt"); + int j = 0; + for (; rs.next(); j++) { + assertEquals("pkt", rs.getString("PKTABLE_NAME")); + assertEquals("fkt", rs.getString("FKTABLE_NAME")); + assertEquals("pkt_un_b", rs.getString("PK_NAME")); + assertEquals("b", rs.getString("PKCOLUMN_NAME")); + } + assertEquals(1, j); + + TestUtil.dropTable(con1, "fkt"); + TestUtil.dropTable(con1, "pkt"); + con1.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void multiColumnForeignKeys(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + Connection con1 = TestUtil.openDB(); + TestUtil.createTable(con1, "pkt", + "a int not null, b int not null, CONSTRAINT pkt_pk PRIMARY KEY (a,b)"); + TestUtil.createTable(con1, "fkt", + "c int, d int, CONSTRAINT fkt_fk_pkt FOREIGN KEY (c,d) REFERENCES pkt(b,a)"); + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getImportedKeys("", "", "fkt"); + int j = 0; + for (; rs.next(); j++) { + assertEquals("pkt", rs.getString("PKTABLE_NAME")); + assertEquals("fkt", rs.getString("FKTABLE_NAME")); + assertEquals(j + 1, rs.getInt("KEY_SEQ")); + if (j == 0) { + assertEquals("b", rs.getString("PKCOLUMN_NAME")); + assertEquals("c", rs.getString("FKCOLUMN_NAME")); + } else { + assertEquals("a", rs.getString("PKCOLUMN_NAME")); + assertEquals("d", rs.getString("FKCOLUMN_NAME")); + } + } + assertEquals(2, j); + + TestUtil.dropTable(con1, "fkt"); + TestUtil.dropTable(con1, "pkt"); + con1.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void sameTableForeignKeys(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + Connection con1 = TestUtil.openDB(); + + TestUtil.createTable(con1, "person", + "FIRST_NAME character varying(100) NOT NULL," + "LAST_NAME character varying(100) NOT NULL," + + "FIRST_NAME_PARENT_1 character varying(100)," + + "LAST_NAME_PARENT_1 character varying(100)," + + "FIRST_NAME_PARENT_2 character varying(100)," + + "LAST_NAME_PARENT_2 character varying(100)," + + "CONSTRAINT PERSON_pkey PRIMARY KEY (FIRST_NAME , LAST_NAME )," + + "CONSTRAINT PARENT_1_fkey FOREIGN KEY (FIRST_NAME_PARENT_1, LAST_NAME_PARENT_1)" + + "REFERENCES PERSON (FIRST_NAME, LAST_NAME) MATCH SIMPLE " + + "ON UPDATE CASCADE ON DELETE CASCADE," + + "CONSTRAINT PARENT_2_fkey FOREIGN KEY (FIRST_NAME_PARENT_2, LAST_NAME_PARENT_2)" + + "REFERENCES PERSON (FIRST_NAME, LAST_NAME) MATCH SIMPLE " + + "ON UPDATE CASCADE ON DELETE CASCADE"); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getImportedKeys(null, "", "person"); + + final List fkNames = new ArrayList<>(); + + int lastFieldCount = -1; + while (rs.next()) { + // destination table (all foreign keys point to the same) + String pkTableName = rs.getString("PKTABLE_NAME"); + assertEquals("person", pkTableName); + + // destination fields + String pkColumnName = rs.getString("PKCOLUMN_NAME"); + assertTrue("first_name".equals(pkColumnName) || "last_name".equals(pkColumnName)); + + // source table (all foreign keys are in the same) + String fkTableName = rs.getString("FKTABLE_NAME"); + assertEquals("person", fkTableName); + + // foreign key name + String fkName = rs.getString("FK_NAME"); + // sequence number within the foreign key + int seq = rs.getInt("KEY_SEQ"); + if (seq == 1) { + // begin new foreign key + assertFalse(fkNames.contains(fkName)); + fkNames.add(fkName); + // all foreign keys have 2 fields + assertTrue(lastFieldCount < 0 || lastFieldCount == 2); + } else { + // continue foreign key, i.e. fkName matches the last foreign key + assertEquals(fkNames.get(fkNames.size() - 1), fkName); + // see always increases by 1 + assertEquals(seq, lastFieldCount + 1); + } + lastFieldCount = seq; + } + // there's more than one foreign key from a table to another + assertEquals(2, fkNames.size()); + + TestUtil.dropTable(con1, "person"); + TestUtil.closeDB(con1); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void foreignKeys(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + Connection con1 = TestUtil.openDB(); + TestUtil.createTable(con1, "people", "id int4 primary key, name text"); + TestUtil.createTable(con1, "policy", "id int4 primary key, name text"); + + TestUtil.createTable(con1, "users", + "id int4 primary key, people_id int4, policy_id int4," + + "CONSTRAINT people FOREIGN KEY (people_id) references people(id)," + + "constraint policy FOREIGN KEY (policy_id) references policy(id)"); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + ResultSet rs = dbmd.getImportedKeys(null, "", "users"); + int j = 0; + for (; rs.next(); j++) { + + String pkTableName = rs.getString("PKTABLE_NAME"); + assertTrue("people".equals(pkTableName) || "policy".equals(pkTableName)); + + String pkColumnName = rs.getString("PKCOLUMN_NAME"); + assertEquals("id", pkColumnName); + + String fkTableName = rs.getString("FKTABLE_NAME"); + assertEquals("users", fkTableName); + + String fkColumnName = rs.getString("FKCOLUMN_NAME"); + assertTrue("people_id".equals(fkColumnName) || "policy_id".equals(fkColumnName)); + + String fkName = rs.getString("FK_NAME"); + assertTrue(fkName.startsWith("people") || fkName.startsWith("policy")); + + String pkName = rs.getString("PK_NAME"); + assertTrue("people_pkey".equals(pkName) || "policy_pkey".equals(pkName)); + + } + + assertEquals(2, j); + + rs = dbmd.getExportedKeys(null, "", "people"); + + // this is hacky, but it will serve the purpose + assertTrue(rs.next()); + + assertEquals("people", rs.getString("PKTABLE_NAME")); + assertEquals("id", rs.getString("PKCOLUMN_NAME")); + + assertEquals("users", rs.getString("FKTABLE_NAME")); + assertEquals("people_id", rs.getString("FKCOLUMN_NAME")); + + assertTrue(rs.getString("FK_NAME").startsWith("people")); + + TestUtil.dropTable(con1, "users"); + TestUtil.dropTable(con1, "people"); + TestUtil.dropTable(con1, "policy"); + TestUtil.closeDB(con1); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void numericPrecision(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getColumns(null, "public", "precision_test", "%"); + assertTrue(rs.next(), "It should have a row for the first column"); + assertEquals(0, rs.getInt("COLUMN_SIZE"), "The column size should be zero"); + assertFalse(rs.next(), "It should have a single column"); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void columns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + // At the moment just test that no exceptions are thrown KJ + String [] metadataColumns = {"TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", + "DATA_TYPE", "TYPE_NAME", "COLUMN_SIZE", "BUFFER_LENGTH", + "DECIMAL_DIGITS", "NUM_PREC_RADIX", "NULLABLE", "REMARKS", + "COLUMN_DEF", "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "CHAR_OCTET_LENGTH", + "ORDINAL_POSITION", "IS_NULLABLE", "SCOPE_CATALOG", "SCOPE_SCHEMA", + "SCOPE_TABLE", "SOURCE_DATA_TYPE", "IS_AUTOINCREMENT", "IS_GENERATEDCOLUMN"}; + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getColumns(null, null, "pg_class", null); + if ( rs.next() ) { + for (int i = 0; i < metadataColumns.length; i++) { + assertEquals(i + 1, rs.findColumn(metadataColumns[i])); + } + } + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void droppedColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) { + return; + } + + Statement stmt = con.createStatement(); + stmt.execute("ALTER TABLE metadatatest DROP name"); + stmt.execute("ALTER TABLE metadatatest DROP colour"); + stmt.close(); + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns(null, null, "metadatatest", null); + + assertTrue(rs.next()); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertEquals(1, rs.getInt("ORDINAL_POSITION")); + + assertTrue(rs.next()); + assertEquals("updated", rs.getString("COLUMN_NAME")); + assertEquals(2, rs.getInt("ORDINAL_POSITION")); + + assertTrue(rs.next()); + assertEquals("quest", rs.getString("COLUMN_NAME")); + assertEquals(3, rs.getInt("ORDINAL_POSITION")); + + rs.close(); + + rs = dbmd.getColumns(null, null, "metadatatest", "quest"); + assertTrue(rs.next()); + assertEquals("quest", rs.getString("COLUMN_NAME")); + assertEquals(3, rs.getInt("ORDINAL_POSITION")); + assertFalse(rs.next()); + rs.close(); + + /* getFunctionColumns also has to be aware of dropped columns + add this in here to make sure it can deal with them + */ + rs = dbmd.getFunctionColumns(null, null, "f4", null); + assertTrue(rs.next()); + + assertTrue(rs.next()); + assertEquals("id", rs.getString(4)); + + assertTrue(rs.next()); + assertEquals("updated", rs.getString(4)); + + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void serialColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns(null, null, "sercoltest", null); + int rownum = 0; + while (rs.next()) { + assertEquals("sercoltest", rs.getString("TABLE_NAME")); + assertEquals(rownum + 1, rs.getInt("ORDINAL_POSITION")); + if (rownum == 0) { + assertEquals("int4", rs.getString("TYPE_NAME")); + + } else if (rownum == 1) { + assertEquals("serial", rs.getString("TYPE_NAME")); + assertTrue(rs.getBoolean("IS_AUTOINCREMENT")); + } else if (rownum == 2) { + assertEquals("bigserial", rs.getString("TYPE_NAME")); + assertTrue(rs.getBoolean("IS_AUTOINCREMENT")); + } + + rownum++; + } + assertEquals(3, rownum); + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void columnPrivileges(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + // At the moment just test that no exceptions are thrown KJ + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getColumnPrivileges(null, null, "pg_statistic", null); + rs.close(); + } + + /* + * Helper function - this logic is used several times to test relation privileges + */ + public void relationPrivilegesHelper(String relationName) throws SQLException { + // Query PG catalog for privileges + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getTablePrivileges(null, null, relationName); + + // Parse result to check if table/view owner has select privileges + boolean foundSelect = false; + while (rs.next()) { + if (rs.getString("GRANTEE").equals(TestUtil.getUser()) + && "SELECT".equals(rs.getString("PRIVILEGE"))) { + foundSelect = true; + } + } + rs.close(); + + // Check test condition + assertTrue(foundSelect, + "Couldn't find SELECT priv on relation " + + relationName + " for " + TestUtil.getUser()); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void tablePrivileges(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + relationPrivilegesHelper("metadatatest"); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void viewPrivileges(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + relationPrivilegesHelper("viewtest"); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void materializedViewPrivileges(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_3)); + TestUtil.createMaterializedView(con, "matviewtest", "SELECT id, quest FROM metadatatest"); + try { + relationPrivilegesHelper("matviewtest"); + } finally { + TestUtil.dropMaterializedView(con, "matviewtest"); + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void noTablePrivileges(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + Statement stmt = con.createStatement(); + stmt.execute("REVOKE ALL ON metadatatest FROM PUBLIC"); + stmt.execute("REVOKE ALL ON metadatatest FROM " + TestUtil.getUser()); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getTablePrivileges(null, null, "metadatatest"); + assertFalse(rs.next()); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void primaryKeys(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + // At the moment just test that no exceptions are thrown KJ + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getPrimaryKeys(null, null, "pg_class"); + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void indexInfo(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + Statement stmt = con.createStatement(); + stmt.execute("create index idx_id on metadatatest (id)"); + stmt.execute("create index idx_func_single on metadatatest (upper(colour))"); + stmt.execute("create unique index idx_un_id on metadatatest(id)"); + stmt.execute("create index idx_func_multi on metadatatest (upper(colour), upper(quest))"); + stmt.execute("create index idx_func_mixed on metadatatest (colour, upper(quest))"); + + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false); + + assertTrue(rs.next()); + assertEquals("idx_un_id", rs.getString("INDEX_NAME")); + assertEquals(1, rs.getInt("ORDINAL_POSITION")); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertFalse(rs.getBoolean("NON_UNIQUE")); + + assertTrue(rs.next()); + assertEquals("idx_func_mixed", rs.getString("INDEX_NAME")); + assertEquals(1, rs.getInt("ORDINAL_POSITION")); + assertEquals("colour", rs.getString("COLUMN_NAME")); + + assertTrue(rs.next()); + assertEquals("idx_func_mixed", rs.getString("INDEX_NAME")); + assertEquals(2, rs.getInt("ORDINAL_POSITION")); + assertEquals("upper(quest)", rs.getString("COLUMN_NAME")); + + assertTrue(rs.next()); + assertEquals("idx_func_multi", rs.getString("INDEX_NAME")); + assertEquals(1, rs.getInt("ORDINAL_POSITION")); + assertEquals("upper(colour)", rs.getString("COLUMN_NAME")); + + assertTrue(rs.next()); + assertEquals("idx_func_multi", rs.getString("INDEX_NAME")); + assertEquals(2, rs.getInt("ORDINAL_POSITION")); + assertEquals("upper(quest)", rs.getString("COLUMN_NAME")); + + assertTrue(rs.next()); + assertEquals("idx_func_single", rs.getString("INDEX_NAME")); + assertEquals(1, rs.getInt("ORDINAL_POSITION")); + assertEquals("upper(colour)", rs.getString("COLUMN_NAME")); + + assertTrue(rs.next()); + assertEquals("idx_id", rs.getString("INDEX_NAME")); + assertEquals(1, rs.getInt("ORDINAL_POSITION")); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertTrue(rs.getBoolean("NON_UNIQUE")); + + assertFalse(rs.next()); + + rs.close(); + } + + /** + * Order defined at + * https://docs.oracle.com/javase/8/docs/api/java/sql/DatabaseMetaData.html#getIndexInfo-java.lang.String-java.lang.String-java.lang.String-boolean-boolean- + */ + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void indexInfoColumnOrder(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false); + assertEquals(1, rs.findColumn("TABLE_CAT")); + assertEquals(2, rs.findColumn("TABLE_SCHEM")); + assertEquals(3, rs.findColumn("TABLE_NAME")); + assertEquals(4, rs.findColumn("NON_UNIQUE")); + assertEquals(5, rs.findColumn("INDEX_QUALIFIER")); + assertEquals(6, rs.findColumn("INDEX_NAME")); + assertEquals(7, rs.findColumn("TYPE")); + assertEquals(8, rs.findColumn("ORDINAL_POSITION")); + assertEquals(9, rs.findColumn("COLUMN_NAME")); + assertEquals(10, rs.findColumn("ASC_OR_DESC")); + assertEquals(11, rs.findColumn("CARDINALITY")); + assertEquals(12, rs.findColumn("PAGES")); + assertEquals(13, rs.findColumn("FILTER_CONDITION")); + + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void indexInfoColumnCase(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + try (ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false)) { + ResultSetMetaData rsmd = rs.getMetaData(); + for (int i = 1; i < rsmd.getColumnCount() + 1; i++) { + char[] chars = rsmd.getColumnName(i).toCharArray(); + for (int j = 0; j < chars.length; j++) { + if (Character.isAlphabetic(chars[j])) { + assertTrue(Character.isUpperCase(chars[j]), "Column: " + rsmd.getColumnName(i) + " is not UPPER CASE"); + } + } + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void notNullDomainColumn(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns("", "", "domaintable", ""); + assertTrue(rs.next()); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertEquals("NO", rs.getString("IS_NULLABLE")); + assertTrue(rs.next()); + assertTrue(rs.next()); + assertFalse(rs.next()); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void domainColumnSize(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns("", "", "domaintable", ""); + assertTrue(rs.next()); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertEquals(10, rs.getInt("COLUMN_SIZE")); + assertTrue(rs.next()); + assertEquals("v", rs.getString("COLUMN_NAME")); + assertEquals(3, rs.getInt("COLUMN_SIZE")); + assertTrue(rs.next()); + assertEquals("f", rs.getString("COLUMN_NAME")); + assertEquals(8, rs.getInt("COLUMN_SIZE")); + assertEquals(3, rs.getInt("DECIMAL_DIGITS")); + + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void ascDescIndexInfo(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) { + return; + } + + Statement stmt = con.createStatement(); + stmt.execute("CREATE INDEX idx_a_d ON metadatatest (id ASC, quest DESC)"); + stmt.close(); + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false); + + assertTrue(rs.next()); + assertEquals("idx_a_d", rs.getString("INDEX_NAME")); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertEquals("A", rs.getString("ASC_OR_DESC")); + + assertTrue(rs.next()); + assertEquals("idx_a_d", rs.getString("INDEX_NAME")); + assertEquals("quest", rs.getString("COLUMN_NAME")); + assertEquals("D", rs.getString("ASC_OR_DESC")); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void partialIndexInfo(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + Statement stmt = con.createStatement(); + stmt.execute("create index idx_p_name_id on metadatatest (name) where id > 5"); + stmt.close(); + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false); + + assertTrue(rs.next()); + assertEquals("idx_p_name_id", rs.getString("INDEX_NAME")); + assertEquals(1, rs.getInt("ORDINAL_POSITION")); + assertEquals("name", rs.getString("COLUMN_NAME")); + assertEquals("(id > 5)", rs.getString("FILTER_CONDITION")); + assertTrue(rs.getBoolean("NON_UNIQUE")); + + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void tableTypes(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + final List expectedTableTypes = new ArrayList<>(Arrays.asList("FOREIGN TABLE", "INDEX", "PARTITIONED INDEX", + "MATERIALIZED VIEW", "PARTITIONED TABLE", "SEQUENCE", "SYSTEM INDEX", "SYSTEM TABLE", "SYSTEM TOAST INDEX", + "SYSTEM TOAST TABLE", "SYSTEM VIEW", "TABLE", "TEMPORARY INDEX", "TEMPORARY SEQUENCE", "TEMPORARY TABLE", + "TEMPORARY VIEW", "TYPE", "VIEW")); + final List foundTableTypes = new ArrayList<>(); + + // Test that no exceptions are thrown + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + // Test that the table types returned are the same as those expected + ResultSet rs = dbmd.getTableTypes(); + while (rs.next()) { + String tableType = new String(rs.getBytes(1)); + foundTableTypes.add(tableType); + } + rs.close(); + Collections.sort(expectedTableTypes); + Collections.sort(foundTableTypes); + assertEquals(foundTableTypes, expectedTableTypes, "The table types received from DatabaseMetaData should match the 18 expected types"); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void funcWithoutNames(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getProcedureColumns(null, null, "f1", null); + + assertTrue(rs.next()); + assertEquals("returnValue", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5)); + + assertTrue(rs.next()); + assertEquals("$1", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("$2", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5)); + assertEquals(Types.VARCHAR, rs.getInt(6)); + + assertFalse(rs.next()); + + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void funcWithNames(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getProcedureColumns(null, null, "f2", null); + + assertTrue(rs.next()); + + assertTrue(rs.next()); + assertEquals("a", rs.getString(4)); + + assertTrue(rs.next()); + assertEquals("b", rs.getString(4)); + + assertFalse(rs.next()); + + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void funcWithDirection(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getProcedureColumns(null, null, "f3", null); + + assertTrue(rs.next()); + assertEquals("a", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("b", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnInOut, rs.getInt(5)); + assertEquals(Types.VARCHAR, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("c", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnOut, rs.getInt(5)); + assertEquals(Types.TIMESTAMP, rs.getInt(6)); + + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void funcReturningComposite(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getProcedureColumns(null, null, "f4", null); + + assertTrue(rs.next()); + assertEquals("$1", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("id", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("name", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5)); + assertEquals(Types.VARCHAR, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("updated", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5)); + assertEquals(Types.TIMESTAMP, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("colour", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5)); + assertEquals(Types.VARCHAR, rs.getInt(6)); + + assertTrue(rs.next()); + assertEquals("quest", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5)); + assertEquals(Types.VARCHAR, rs.getInt(6)); + + assertFalse(rs.next()); + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void funcReturningTable(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) { + return; + } + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getProcedureColumns(null, null, "f5", null); + assertTrue(rs.next()); + assertEquals("returnValue", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + assertTrue(rs.next()); + assertEquals("i", rs.getString(4)); + assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + assertFalse(rs.next()); + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void versionColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + // At the moment just test that no exceptions are thrown KJ + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getVersionColumns(null, null, "pg_class"); + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void bestRowIdentifier(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + // At the moment just test that no exceptions are thrown KJ + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = + dbmd.getBestRowIdentifier(null, null, "pg_type", DatabaseMetaData.bestRowSession, false); + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void procedures(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + // At the moment just test that no exceptions are thrown KJ + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + ResultSet rs = dbmd.getProcedures(null, null, null); + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void catalogs(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + try (ResultSet rs = dbmd.getCatalogs()) { + List catalogs = new ArrayList<>(); + while (rs.next()) { + catalogs.add(rs.getString("TABLE_CAT")); + } + List sortedCatalogs = new ArrayList<>(catalogs); + Collections.sort(sortedCatalogs); + + assertThat( + catalogs, + allOf( + hasItem("test"), + hasItem("postgres"), + equalTo(sortedCatalogs) + ) + ); + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void schemas(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + assertNotNull(dbmd); + + ResultSet rs = dbmd.getSchemas(); + boolean foundPublic = false; + boolean foundEmpty = false; + boolean foundPGCatalog = false; + int count; + + for (count = 0; rs.next(); count++) { + String schema = rs.getString("TABLE_SCHEM"); + if ("public".equals(schema)) { + foundPublic = true; + } else if ("".equals(schema)) { + foundEmpty = true; + } else if ("pg_catalog".equals(schema)) { + foundPGCatalog = true; + } + } + rs.close(); + assertTrue(count >= 2); + assertTrue(foundPublic); + assertTrue(foundPGCatalog); + assertFalse(foundEmpty); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void escaping(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getTables(null, null, "a'", new String[]{"TABLE"}); + assertTrue(rs.next()); + rs = dbmd.getTables(null, null, "a\\\\", new String[]{"TABLE"}); + assertTrue(rs.next()); + rs = dbmd.getTables(null, null, "a\\", new String[]{"TABLE"}); + assertFalse(rs.next()); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void searchStringEscape(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + String pattern = dbmd.getSearchStringEscape() + "_"; + PreparedStatement pstmt = con.prepareStatement("SELECT 'a' LIKE ?, '_' LIKE ?"); + pstmt.setString(1, pattern); + pstmt.setString(2, pattern); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertFalse(rs.getBoolean(1)); + assertTrue(rs.getBoolean(2)); + rs.close(); + pstmt.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void getUDTQualified(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + Statement stmt = null; + try { + stmt = con.createStatement(); + stmt.execute("create schema jdbc"); + stmt.execute("create type jdbc.testint8 as (i int8)"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getUDTs(null, null, "jdbc.testint8", null); + assertTrue(rs.next()); + String cat; + String schema; + String typeName; + String remarks; + String className; + int dataType; + int baseType; + + cat = rs.getString("type_cat"); + schema = rs.getString("type_schem"); + typeName = rs.getString("type_name"); + className = rs.getString("class_name"); + dataType = rs.getInt("data_type"); + remarks = rs.getString("remarks"); + baseType = rs.getInt("base_type"); + assertEquals("testint8", typeName, "type name "); + assertEquals("jdbc", schema, "schema name "); + + // now test to see if the fully qualified stuff works as planned + rs = dbmd.getUDTs("catalog", "public", "catalog.jdbc.testint8", null); + assertTrue(rs.next()); + cat = rs.getString("type_cat"); + schema = rs.getString("type_schem"); + typeName = rs.getString("type_name"); + className = rs.getString("class_name"); + dataType = rs.getInt("data_type"); + remarks = rs.getString("remarks"); + baseType = rs.getInt("base_type"); + assertEquals("testint8", typeName, "type name "); + assertEquals("jdbc", schema, "schema name "); + } finally { + try { + if (stmt != null) { + stmt.close(); + } + stmt = con.createStatement(); + stmt.execute("drop type jdbc.testint8"); + stmt.execute("drop schema jdbc"); + } catch (Exception ex) { + } + } + + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void getUDT1(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + try { + Statement stmt = con.createStatement(); + stmt.execute("create domain testint8 as int8"); + stmt.execute("comment on domain testint8 is 'jdbc123'"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getUDTs(null, null, "testint8", null); + assertTrue(rs.next()); + + String cat = rs.getString("type_cat"); + String schema = rs.getString("type_schem"); + String typeName = rs.getString("type_name"); + String className = rs.getString("class_name"); + int dataType = rs.getInt("data_type"); + String remarks = rs.getString("remarks"); + + int baseType = rs.getInt("base_type"); + assertEquals(Types.BIGINT, baseType, "base type"); + assertEquals(Types.DISTINCT, dataType, "data type"); + assertEquals("testint8", typeName, "type name "); + assertEquals("jdbc123", remarks, "remarks"); + } finally { + try { + Statement stmt = con.createStatement(); + stmt.execute("drop domain testint8"); + } catch (Exception ex) { + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void getUDT2(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + try { + Statement stmt = con.createStatement(); + stmt.execute("create domain testint8 as int8"); + stmt.execute("comment on domain testint8 is 'jdbc123'"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getUDTs(null, null, "testint8", new int[]{Types.DISTINCT, Types.STRUCT}); + assertTrue(rs.next()); + String typeName; + + String cat = rs.getString("type_cat"); + String schema = rs.getString("type_schem"); + typeName = rs.getString("type_name"); + String className = rs.getString("class_name"); + int dataType = rs.getInt("data_type"); + String remarks = rs.getString("remarks"); + + int baseType = rs.getInt("base_type"); + assertEquals(Types.BIGINT, baseType, "base type"); + assertEquals(Types.DISTINCT, dataType, "data type"); + assertEquals("testint8", typeName, "type name "); + assertEquals("jdbc123", remarks, "remarks"); + } finally { + try { + Statement stmt = con.createStatement(); + stmt.execute("drop domain testint8"); + } catch (Exception ex) { + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void getUDT3(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + try { + Statement stmt = con.createStatement(); + stmt.execute("create domain testint8 as int8"); + stmt.execute("comment on domain testint8 is 'jdbc123'"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getUDTs(null, null, "testint8", new int[]{Types.DISTINCT}); + assertTrue(rs.next()); + + String cat = rs.getString("type_cat"); + String schema = rs.getString("type_schem"); + String typeName = rs.getString("type_name"); + String className = rs.getString("class_name"); + int dataType = rs.getInt("data_type"); + String remarks = rs.getString("remarks"); + + int baseType = rs.getInt("base_type"); + assertEquals(Types.BIGINT, baseType, "base type"); + assertEquals(Types.DISTINCT, dataType, "data type"); + assertEquals("testint8", typeName, "type name "); + assertEquals("jdbc123", remarks, "remarks"); + } finally { + try { + Statement stmt = con.createStatement(); + stmt.execute("drop domain testint8"); + } catch (Exception ex) { + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void getUDT4(BinaryMode binaryMode) throws Exception { + initDatabaseMetaDataTest(binaryMode); + try { + Statement stmt = con.createStatement(); + stmt.execute("create type testint8 as (i int8)"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getUDTs(null, null, "testint8", null); + assertTrue(rs.next()); + + String cat = rs.getString("type_cat"); + String schema = rs.getString("type_schem"); + String typeName = rs.getString("type_name"); + String className = rs.getString("class_name"); + int dataType = rs.getInt("data_type"); + String remarks = rs.getString("remarks"); + + int baseType = rs.getInt("base_type"); + assertTrue(rs.wasNull(), "base type"); + assertEquals(Types.STRUCT, dataType, "data type"); + assertEquals("testint8", typeName, "type name "); + } finally { + try { + Statement stmt = con.createStatement(); + stmt.execute("drop type testint8"); + } catch (Exception ex) { + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void types(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + // https://www.postgresql.org/docs/8.2/static/datatype.html + List stringTypeList = new ArrayList<>(); + stringTypeList.addAll(Arrays.asList("bit", + "bool", + "box", + "bytea", + "char", + "cidr", + "circle", + "date", + "float4", + "float8", + "inet", + "int2", + "int4", + "int8", + "interval", + "line", + "lseg", + "macaddr", + "money", + "numeric", + "path", + "point", + "polygon", + "text", + "time", + "timestamp", + "timestamptz", + "timetz", + "varbit", + "varchar")); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) { + stringTypeList.add("tsquery"); + stringTypeList.add("tsvector"); + stringTypeList.add("txid_snapshot"); + stringTypeList.add("uuid"); + stringTypeList.add("xml"); + } + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) { + stringTypeList.add("json"); + } + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)) { + stringTypeList.add("jsonb"); + stringTypeList.add("pg_lsn"); + } + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getTypeInfo(); + List types = new ArrayList<>(); + + while (rs.next()) { + types.add(rs.getString("TYPE_NAME")); + } + for (String typeName : stringTypeList) { + assertTrue(types.contains(typeName)); + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void typeInfoSigned(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getTypeInfo(); + while (rs.next()) { + if ("int4".equals(rs.getString("TYPE_NAME"))) { + assertFalse(rs.getBoolean("UNSIGNED_ATTRIBUTE")); + } else if ("float8".equals(rs.getString("TYPE_NAME"))) { + assertFalse(rs.getBoolean("UNSIGNED_ATTRIBUTE")); + } else if ("text".equals(rs.getString("TYPE_NAME"))) { + assertTrue(rs.getBoolean("UNSIGNED_ATTRIBUTE")); + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void typeInfoQuoting(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getTypeInfo(); + while (rs.next()) { + if ("int4".equals(rs.getString("TYPE_NAME"))) { + assertNull(rs.getString("LITERAL_PREFIX")); + } else if ("text".equals(rs.getString("TYPE_NAME"))) { + assertEquals("'", rs.getString("LITERAL_PREFIX")); + assertEquals("'", rs.getString("LITERAL_SUFFIX")); + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void informationAboutArrayTypes(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns("", "", "arraytable", ""); + assertTrue(rs.next()); + assertEquals("a", rs.getString("COLUMN_NAME")); + assertEquals(5, rs.getInt("COLUMN_SIZE")); + assertEquals(2, rs.getInt("DECIMAL_DIGITS")); + assertTrue(rs.next()); + assertEquals("b", rs.getString("COLUMN_NAME")); + assertEquals(100, rs.getInt("COLUMN_SIZE")); + assertFalse(rs.next()); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void partitionedTablesIndex(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + Statement stmt = null; + try { + stmt = con.createStatement(); + stmt.execute( + "CREATE TABLE measurement (logdate date not null primary key,peaktemp int,unitsales int ) PARTITION BY RANGE (logdate);"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getPrimaryKeys("", "", "measurement"); + assertTrue(rs.next()); + assertEquals("measurement_pkey", rs.getString(6)); + + } finally { + if (stmt != null) { + stmt.execute("drop table if exists measurement"); + stmt.close(); + } + } + } + + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void partitionedTables(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + Statement stmt = null; + try { + stmt = con.createStatement(); + stmt.execute( + "CREATE TABLE measurement (logdate date not null primary key,peaktemp int,unitsales int ) PARTITION BY RANGE (logdate);"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getTables("", "", "measurement", new String[]{"PARTITIONED TABLE"}); + assertTrue(rs.next()); + assertEquals("measurement", rs.getString("table_name")); + rs.close(); + rs = dbmd.getPrimaryKeys("", "", "measurement"); + assertTrue(rs.next()); + assertEquals("measurement_pkey", rs.getString(6)); + + } finally { + if (stmt != null) { + stmt.execute("drop table if exists measurement"); + stmt.close(); + } + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void identityColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v10) ) { + Statement stmt = null; + try { + stmt = con.createStatement(); + stmt.execute("CREATE TABLE test_new (" + + "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY," + + "payload text)"); + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns("", "", "test_new", "id"); + assertTrue(rs.next()); + assertEquals("id", rs.getString("COLUMN_NAME")); + assertTrue(rs.getBoolean("IS_AUTOINCREMENT")); + + } finally { + if ( stmt != null ) { + stmt.execute("drop table test_new"); + stmt.close(); + } + } + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void generatedColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v12) ) { + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns("", "", "employee", "gross_pay"); + assertTrue(rs.next()); + assertEquals("gross_pay", rs.getString("COLUMN_NAME")); + assertTrue(rs.getBoolean("IS_GENERATEDCOLUMN")); + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void getSQLKeywords(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + DatabaseMetaData dbmd = con.getMetaData(); + String keywords = dbmd.getSQLKeywords(); + + // We don't want SQL:2003 keywords returned, so check for that. + String sql2003 = "a,abs,absolute,action,ada,add,admin,after,all,allocate,alter,always,and,any,are," + + "array,as,asc,asensitive,assertion,assignment,asymmetric,at,atomic,attribute,attributes," + + "authorization,avg,before,begin,bernoulli,between,bigint,binary,blob,boolean,both,breadth,by," + + "c,call,called,cardinality,cascade,cascaded,case,cast,catalog,catalog_name,ceil,ceiling,chain," + + "char,char_length,character,character_length,character_set_catalog,character_set_name," + + "character_set_schema,characteristics,characters,check,checked,class_origin,clob,close," + + "coalesce,cobol,code_units,collate,collation,collation_catalog,collation_name,collation_schema," + + "collect,column,column_name,command_function,command_function_code,commit,committed,condition," + + "condition_number,connect,connection_name,constraint,constraint_catalog,constraint_name," + + "constraint_schema,constraints,constructors,contains,continue,convert,corr,corresponding,count," + + "covar_pop,covar_samp,create,cross,cube,cume_dist,current,current_collation,current_date," + + "current_default_transform_group,current_path,current_role,current_time,current_timestamp," + + "current_transform_group_for_type,current_user,cursor,cursor_name,cycle,data,date,datetime_interval_code," + + "datetime_interval_precision,day,deallocate,dec,decimal,declare,default,defaults,deferrable," + + "deferred,defined,definer,degree,delete,dense_rank,depth,deref,derived,desc,describe," + + "descriptor,deterministic,diagnostics,disconnect,dispatch,distinct,domain,double,drop,dynamic," + + "dynamic_function,dynamic_function_code,each,element,else,end,end-exec,equals,escape,every," + + "except,exception,exclude,excluding,exec,execute,exists,exp,external,extract,false,fetch,filter," + + "final,first,float,floor,following,for,foreign,fortran,found,free,from,full,function,fusion," + + "g,general,get,global,go,goto,grant,granted,group,grouping,having,hierarchy,hold,hour,identity," + + "immediate,implementation,in,including,increment,indicator,initially,inner,inout,input," + + "insensitive,insert,instance,instantiable,int,integer,intersect,intersection,interval,into," + + "invoker,is,isolation,join,k,key,key_member,key_type,language,large,last,lateral,leading,left," + + "length,level,like,ln,local,localtime,localtimestamp,locator,lower,m,map,match,matched,max," + + "maxvalue,member,merge,message_length,message_octet_length,message_text,method,min,minute," + + "minvalue,mod,modifies,module,month,more,multiset,mumps,name,names,national,natural,nchar," + + "nclob,nesting,new,next,no,none,normalize,normalized,not,null,nullable,nullif,nulls,number," + + "numeric,object,octet_length,octets,of,old,on,only,open,option,options,or,order,ordering," + + "ordinality,others,out,outer,output,over,overlaps,overlay,overriding,pad,parameter,parameter_mode," + + "parameter_name,parameter_ordinal_position,parameter_specific_catalog,parameter_specific_name," + + "parameter_specific_schema,partial,partition,pascal,path,percent_rank,percentile_cont," + + "percentile_disc,placing,pli,position,power,preceding,precision,prepare,preserve,primary," + + "prior,privileges,procedure,public,range,rank,read,reads,real,recursive,ref,references," + + "referencing,regr_avgx,regr_avgy,regr_count,regr_intercept,regr_r2,regr_slope,regr_sxx," + + "regr_sxy,regr_syy,relative,release,repeatable,restart,result,return,returned_cardinality," + + "returned_length,returned_octet_length,returned_sqlstate,returns,revoke,right,role,rollback," + + "rollup,routine,routine_catalog,routine_name,routine_schema,row,row_count,row_number,rows," + + "savepoint,scale,schema,schema_name,scope_catalog,scope_name,scope_schema,scroll,search,second," + + "section,security,select,self,sensitive,sequence,serializable,server_name,session,session_user," + + "set,sets,similar,simple,size,smallint,some,source,space,specific,specific_name,specifictype,sql," + + "sqlexception,sqlstate,sqlwarning,sqrt,start,state,statement,static,stddev_pop,stddev_samp," + + "structure,style,subclass_origin,submultiset,substring,sum,symmetric,system,system_user,table," + + "table_name,tablesample,temporary,then,ties,time,timestamp,timezone_hour,timezone_minute,to," + + "top_level_count,trailing,transaction,transaction_active,transactions_committed," + + "transactions_rolled_back,transform,transforms,translate,translation,treat,trigger,trigger_catalog," + + "trigger_name,trigger_schema,trim,true,type,uescape,unbounded,uncommitted,under,union,unique," + + "unknown,unnamed,unnest,update,upper,usage,user,user_defined_type_catalog,user_defined_type_code," + + "user_defined_type_name,user_defined_type_schema,using,value,values,var_pop,var_samp,varchar," + + "varying,view,when,whenever,where,width_bucket,window,with,within,without,work,write,year,zone"; + + String[] excludeSQL2003 = sql2003.split(","); + String[] returned = keywords.split(","); + Set returnedSet = new HashSet<>(Arrays.asList(returned)); + assertEquals(returnedSet.size(), returned.length, "Returned keywords should be unique"); + + for (String s : excludeSQL2003) { + assertFalse(returnedSet.contains(s), "Keyword from SQL:2003 \"" + s + "\" found"); + } + + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + assertTrue(returnedSet.contains("reindex"), "reindex should be in keywords"); + } + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void functionColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) { + return; + } + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getFunctionColumns(null, null, "f1", null); + + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(17, rsmd.getColumnCount()); + assertEquals("FUNCTION_CAT", rsmd.getColumnName(1)); + assertEquals("FUNCTION_SCHEM", rsmd.getColumnName(2)); + assertEquals("FUNCTION_NAME", rsmd.getColumnName(3)); + assertEquals("COLUMN_NAME", rsmd.getColumnName(4)); + assertEquals("COLUMN_TYPE", rsmd.getColumnName(5)); + assertEquals("DATA_TYPE", rsmd.getColumnName(6)); + assertEquals("TYPE_NAME", rsmd.getColumnName(7)); + assertEquals("PRECISION", rsmd.getColumnName(8)); + assertEquals("LENGTH", rsmd.getColumnName(9)); + assertEquals("SCALE", rsmd.getColumnName(10)); + assertEquals("RADIX", rsmd.getColumnName(11)); + assertEquals("NULLABLE", rsmd.getColumnName(12)); + assertEquals("REMARKS", rsmd.getColumnName(13)); + assertEquals("CHAR_OCTET_LENGTH", rsmd.getColumnName(14)); + assertEquals("ORDINAL_POSITION", rsmd.getColumnName(15)); + assertEquals("IS_NULLABLE", rsmd.getColumnName(16)); + assertEquals("SPECIFIC_NAME", rsmd.getColumnName(17)); + + assertTrue(rs.next()); + assertNull(rs.getString(1)); + assertEquals("public", rs.getString(2)); + assertEquals("f1", rs.getString(3)); + assertEquals("returnValue", rs.getString(4)); + assertEquals(DatabaseMetaData.functionReturn, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + assertEquals("int4", rs.getString(7)); + assertEquals(0, rs.getInt(15)); + + assertTrue(rs.next()); + assertNull(rs.getString(1)); + assertEquals("public", rs.getString(2)); + assertEquals("f1", rs.getString(3)); + assertEquals("$1", rs.getString(4)); + assertEquals(DatabaseMetaData.functionColumnIn, rs.getInt(5)); + assertEquals(Types.INTEGER, rs.getInt(6)); + assertEquals("int4", rs.getString(7)); + assertEquals(1, rs.getInt(15)); + + assertTrue(rs.next()); + assertNull(rs.getString(1)); + assertEquals("public", rs.getString(2)); + assertEquals("f1", rs.getString(3)); + assertEquals("$2", rs.getString(4)); + assertEquals(DatabaseMetaData.functionColumnIn, rs.getInt(5)); + assertEquals(Types.VARCHAR, rs.getInt(6)); + assertEquals("varchar", rs.getString(7)); + assertEquals(2, rs.getInt(15)); + + assertFalse(rs.next()); + + rs.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void smallSerialColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)); + TestUtil.createTable(con, "smallserial_test", "a smallserial"); + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns(null, null, "smallserial_test", "a"); + assertTrue(rs.next()); + assertEquals("smallserial_test", rs.getString("TABLE_NAME")); + assertEquals("a", rs.getString("COLUMN_NAME")); + assertEquals(Types.SMALLINT, rs.getInt("DATA_TYPE")); + assertEquals("smallserial", rs.getString("TYPE_NAME")); + assertTrue(rs.getBoolean("IS_AUTOINCREMENT")); + assertEquals("nextval('smallserial_test_a_seq'::regclass)", rs.getString("COLUMN_DEF")); + assertFalse(rs.next()); + rs.close(); + + TestUtil.dropTable(con, "smallserial_test"); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void smallSerialSequenceLikeColumns(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + Statement stmt = con.createStatement(); + // This is the equivalent of the smallserial, not the actual smallserial + stmt.execute("CREATE SEQUENCE smallserial_test_a_seq;\n" + + "CREATE TABLE smallserial_test (\n" + + " a smallint NOT NULL DEFAULT nextval('smallserial_test_a_seq')\n" + + ");\n" + + "ALTER SEQUENCE smallserial_test_a_seq OWNED BY smallserial_test.a;"); + + DatabaseMetaData dbmd = con.getMetaData(); + ResultSet rs = dbmd.getColumns(null, null, "smallserial_test", "a"); + assertTrue(rs.next()); + assertEquals("smallserial_test", rs.getString("TABLE_NAME")); + assertEquals("a", rs.getString("COLUMN_NAME")); + assertEquals(Types.SMALLINT, rs.getInt("DATA_TYPE")); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) { + // in Pg 9.2+ it behaves like smallserial + assertEquals("smallserial", rs.getString("TYPE_NAME")); + } else { + assertEquals("int2", rs.getString("TYPE_NAME")); + } + assertTrue(rs.getBoolean("IS_AUTOINCREMENT")); + assertEquals("nextval('smallserial_test_a_seq'::regclass)", rs.getString("COLUMN_DEF")); + assertFalse(rs.next()); + rs.close(); + + stmt.execute("DROP TABLE smallserial_test"); + stmt.close(); + } + + @MethodSource("data") + @ParameterizedTest(name = "binary = {0}") + void upperCaseMetaDataLabels(BinaryMode binaryMode) throws SQLException { + initDatabaseMetaDataTest(binaryMode); + ResultSet rs = con.getMetaData().getTables(null, null, null, null); + ResultSetMetaData rsmd = rs.getMetaData(); + + assertEquals("TABLE_CAT", rsmd.getColumnName(1)); + assertEquals("TABLE_SCHEM", rsmd.getColumnName(2)); + assertEquals("TABLE_NAME", rsmd.getColumnName(3)); + assertEquals("TABLE_TYPE", rsmd.getColumnName(4)); + assertEquals("REMARKS", rsmd.getColumnName(5)); + + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTransactionIsolationTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTransactionIsolationTest.java new file mode 100644 index 0000000..1b52f8c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTransactionIsolationTest.java @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.function.Supplier; + +class DatabaseMetaDataTransactionIsolationTest { + static Connection con; + + @BeforeAll + static void setup() throws SQLException { + con = TestUtil.openDB(); + } + + @AfterAll + static void teardown() throws SQLException { + TestUtil.closeDB(con); + } + + @BeforeEach + void resetTransactionIsolation() throws SQLException { + // Restore to defaults + con.setAutoCommit(true); + try (Statement st = con.createStatement()) { + st.execute("alter database test set default_transaction_isolation to DEFAULT"); + } + } + + @Test + void connectionTransactionIsolation() throws SQLException { + // We use a new connection to avoid any side effects from other tests as we need to test + // the default transaction isolation level. + try (Connection con = TestUtil.openDB()) { + assertIsolationEquals( + "read committed", + con.getTransactionIsolation(), + () -> "Default connection transaction isolation in PostgreSQL is read committed"); + } + } + + @Test + void metadataDefaultTransactionIsolation() throws SQLException { + assertIsolationEquals( + "read committed", + getDefaultTransactionIsolation(), + () -> "Default database transaction isolation in PostgreSQL is read committed"); + } + + @ParameterizedTest + @ValueSource(strings = {"read committed", "read uncommitted", "repeatable read", "serializable"}) + void alterDatabaseDefaultTransactionIsolation(String isolationLevel) throws SQLException { + try (Statement st = con.createStatement()) { + st.execute( + "alter database test set default_transaction_isolation to '" + isolationLevel + "'"); + } + + assertIsolationEquals( + isolationLevel, + getDefaultTransactionIsolation(), + () -> "Default transaction isolation should be " + isolationLevel); + } + + /** + * PostgreSQL does not seem to update the value in + * pg_catalog.pg_settings WHERE name='default_transaction_isolation' + * when changing default_transaction_isolation, so we reconnect to get the new value. + */ + static int getDefaultTransactionIsolation() throws SQLException { + try (Connection con = TestUtil.openDB()) { + return con.getMetaData().getDefaultTransactionIsolation(); + } + } + + @ParameterizedTest + @ValueSource(strings = {"read committed", "read uncommitted", "repeatable read", "serializable"}) + void alterConnectionTransactionIsolation(String isolationLevel) throws SQLException { + con.setAutoCommit(false); + try (Statement st = con.createStatement()) { + st.execute("set transaction ISOLATION LEVEL " + isolationLevel); + } + + assertIsolationEquals( + isolationLevel, + con.getTransactionIsolation(), + () -> "Connection transaction isolation should be " + isolationLevel); + } + + @ParameterizedTest + @ValueSource(ints = { + Connection.TRANSACTION_SERIALIZABLE, + Connection.TRANSACTION_REPEATABLE_READ, + Connection.TRANSACTION_READ_COMMITTED, + Connection.TRANSACTION_READ_UNCOMMITTED}) + void setConnectionTransactionIsolation(int isolationLevel) throws SQLException { + con.setAutoCommit(false); + con.setTransactionIsolation(isolationLevel); + + assertIsolationEquals( + mapJdbcIsolationToPg(isolationLevel), + con.getTransactionIsolation(), + () -> "Connection transaction isolation should be " + isolationLevel); + } + + private static void assertIsolationEquals(String expected, int actual, Supplier message) { + assertEquals( + expected, + mapJdbcIsolationToPg(actual), + message); + } + + private static String mapJdbcIsolationToPg(int isolationLevel) { + switch (isolationLevel) { + case Connection.TRANSACTION_READ_COMMITTED: + return "read committed"; + case Connection.TRANSACTION_READ_UNCOMMITTED: + return "read uncommitted"; + case Connection.TRANSACTION_REPEATABLE_READ: + return "repeatable read"; + case Connection.TRANSACTION_SERIALIZABLE: + return "serializable"; + case Connection.TRANSACTION_NONE: + return "none"; + default: + return "Unknown isolation level " + isolationLevel; + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateStyleTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateStyleTest.java new file mode 100644 index 0000000..b3f4d75 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateStyleTest.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; + +@RunWith(Parameterized.class) +public class DateStyleTest extends BaseTest4 { + + @Parameterized.Parameter(0) + public String dateStyle; + + @Parameterized.Parameter(1) + public boolean shouldPass; + + @Parameterized.Parameters(name = "dateStyle={0}, shouldPass={1}") + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {"iso, mdy", true}, + {"ISO", true}, + {"ISO,ymd", true}, + {"PostgreSQL", false} + }); + } + + @Test + public void connect() throws SQLException { + Statement st = con.createStatement(); + try { + st.execute("set DateStyle='" + dateStyle + "'"); + if (!shouldPass) { + Assert.fail("Set DateStyle=" + dateStyle + " should not be allowed"); + } + } catch (SQLException e) { + if (shouldPass) { + throw new IllegalStateException("Set DateStyle=" + dateStyle + + " should be fine, however received " + e.getMessage(), e); + } + if (PSQLState.CONNECTION_FAILURE.getState().equals(e.getSQLState())) { + return; + } + throw new IllegalStateException("Set DateStyle=" + dateStyle + + " should result in CONNECTION_FAILURE error, however received " + e.getMessage(), e); + } finally { + TestUtil.closeQuietly(st); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateTest.java new file mode 100644 index 0000000..7b875e0 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateTest.java @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.TimeZone; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +/* + * Some simple tests based on problems reported by users. Hopefully these will help prevent previous + * problems from re-occurring ;-) + * + */ +@RunWith(Parameterized.class) +public class DateTest extends BaseTest4 { + private static final TimeZone saveTZ = TimeZone.getDefault(); + + private final String type; + private final String zoneId; + + public DateTest(String type, String zoneId, BinaryMode binaryMode) { + this.type = type; + this.zoneId = zoneId; + TimeZone.setDefault(TimeZone.getTimeZone(zoneId)); + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "type = {0}, zoneId = {1}, binary = {2}") + public static Iterable data() { + final List data = new ArrayList<>(); + for (String type : Arrays.asList("date", "timestamp", "timestamptz")) { + Stream tzIds = Stream.of("Africa/Casablanca", "America/New_York", "America/Toronto", + "Europe/Berlin", "Europe/Moscow", "Pacific/Apia", "America/Los_Angeles"); + // some selection of static GMT offsets (not all, as this takes too long): + tzIds = Stream.concat(tzIds, IntStream.of(-12, -11, -5, -1, 0, 1, 3, 12, 13) + .mapToObj(i -> String.format(Locale.ROOT, "GMT%+02d", i))); + for (String tzId : (Iterable) tzIds::iterator) { + for (BinaryMode binaryMode : BinaryMode.values()) { + data.add(new Object[]{type, tzId, binaryMode}); + } + } + } + return data; + } + + @Before + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "test", "dt ".concat(type)); + } + + @After + public void tearDown() throws SQLException { + TimeZone.setDefault(saveTZ); + TestUtil.dropTable(con, "test"); + super.tearDown(); + } + + /* + * Tests the time methods in ResultSet + */ + @Test + public void testGetDate() throws SQLException { + assumeTrue("TODO: Test fails on some server versions with local time zones (not GMT based)", + false == Objects.equals(type, "timestamptz") || zoneId.startsWith("GMT")); + try (Statement stmt = con.createStatement()) { + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1950-02-07'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1970-06-02'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1999-08-11'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2001-02-13'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1950-04-02'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1970-11-30'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1988-01-01'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2003-07-09'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1934-02-28'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1969-04-03'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1982-08-03'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2012-03-15'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1912-05-01'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1971-12-15'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1984-12-03'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2000-01-01'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'3456-01-01'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0101-01-01 BC'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-01-01'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-01-01 BC'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-12-31 BC'"))); + + /* dateTest() contains all of the tests */ + dateTest(); + + assertEquals(21, stmt.executeUpdate("DELETE FROM test")); + } + } + + /* + * Tests the time methods in PreparedStatement + */ + @Test + public void testSetDate() throws SQLException { + try (Statement stmt = con.createStatement()) { + PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("test", "?")); + + ps.setDate(1, makeDate(1950, 2, 7)); + assertEquals(1, ps.executeUpdate()); + + ps.setDate(1, makeDate(1970, 6, 2)); + assertEquals(1, ps.executeUpdate()); + + ps.setDate(1, makeDate(1999, 8, 11)); + assertEquals(1, ps.executeUpdate()); + + ps.setDate(1, makeDate(2001, 2, 13)); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Timestamp.valueOf("1950-04-02 12:00:00"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Timestamp.valueOf("1970-11-30 3:00:00"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Timestamp.valueOf("1988-01-01 13:00:00"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Timestamp.valueOf("2003-07-09 12:00:00"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "1934-02-28", java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "1969-04-03", java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "1982-08-03", java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "2012-03-15", java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Date.valueOf("1912-05-01"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Date.valueOf("1971-12-15"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Date.valueOf("1984-12-03"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Date.valueOf("2000-01-01"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, java.sql.Date.valueOf("3456-01-01"), java.sql.Types.DATE); + assertEquals(1, ps.executeUpdate()); + + // We can't use valueOf on BC dates. + ps.setObject(1, makeDate(-100, 1, 1)); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, makeDate(1, 1, 1)); + assertEquals(1, ps.executeUpdate()); + + // Note: Year 0 in Java is year '0001-01-01 BC' in PostgreSQL. + ps.setObject(1, makeDate(0, 1, 1)); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, makeDate(0, 12, 31)); + assertEquals(1, ps.executeUpdate()); + + ps.close(); + + dateTest(); + + assertEquals(21, stmt.executeUpdate("DELETE FROM test")); + } + } + + /* + * Helper for the date tests. It tests what should be in the db + */ + private void dateTest() throws SQLException { + Statement st = con.createStatement(); + ResultSet rs; + java.sql.Date d; + + rs = st.executeQuery(TestUtil.selectSQL("test", "dt")); + assertNotNull(rs); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1950, 2, 7), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1970, 6, 2), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1999, 8, 11), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(2001, 2, 13), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1950, 4, 2), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1970, 11, 30), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1988, 1, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(2003, 7, 9), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1934, 2, 28), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1969, 4, 3), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1982, 8, 3), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(2012, 3, 15), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1912, 5, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1971, 12, 15), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1984, 12, 3), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(2000, 1, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(3456, 1, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(-100, 1, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(1, 1, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(0, 1, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(0, 12, 31), d); + + assertTrue(!rs.next()); + + rs.close(); + st.close(); + } + + private java.sql.Date makeDate(int y, int m, int d) { + return new java.sql.Date(y - 1900, m - 1, d); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DriverTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DriverTest.java new file mode 100644 index 0000000..74169df --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DriverTest.java @@ -0,0 +1,528 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.Driver; +import org.postgresql.PGEnvironment; +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.util.StubEnvironmentAndProperties; +import org.postgresql.util.URLCoder; + +import org.junit.jupiter.api.Test; +import uk.org.webcompere.systemstubs.environment.EnvironmentVariables; +import uk.org.webcompere.systemstubs.properties.SystemProperties; +import uk.org.webcompere.systemstubs.resource.Resources; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.lang.reflect.Method; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Properties; + +/* + * Tests the dynamically created class org.postgresql.Driver + * + */ +@StubEnvironmentAndProperties +class DriverTest { + + @Test + void urlIsNotForPostgreSQL() throws SQLException { + Driver driver = new Driver(); + + assertNull(driver.connect("jdbc:otherdb:database", new Properties())); + } + + /** + * According to the javadoc of java.sql.Driver.connect(...), calling abort when the {@code executor} is {@code null} + * results in SQLException + */ + @Test + void urlIsNull() throws SQLException { + Driver driver = new Driver(); + + assertThrows(SQLException.class, () -> driver.connect(null, new Properties())); + } + + /* + * This tests the acceptsURL() method with a couple of well and poorly formed jdbc urls. + */ + @Test + void acceptsURL() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + + // Load the driver (note clients should never do it this way!) + Driver drv = new Driver(); + assertNotNull(drv); + + // These are always correct + verifyUrl(drv, "jdbc:postgresql:test", "localhost", "5432", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost/test", "localhost", "5432", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost,locahost2/test", "localhost,locahost2", "5432,5432", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost:5433,locahost2:5434/test", "localhost,locahost2", "5433,5434", "test"); + verifyUrl(drv, "jdbc:postgresql://[::1]:5433,:5434,[::1]/test", "[::1],localhost,[::1]", "5433,5434,5432", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost/test?port=8888", "localhost", "8888", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost:5432/test", "localhost", "5432", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?dbname=test2", "localhost", "5432", "test2"); + verifyUrl(drv, "jdbc:postgresql://127.0.0.1/anydbname", "127.0.0.1", "5432", "anydbname"); + verifyUrl(drv, "jdbc:postgresql://127.0.0.1:5433/hidden", "127.0.0.1", "5433", "hidden"); + verifyUrl(drv, "jdbc:postgresql://127.0.0.1:5433/hidden?port=7777", "127.0.0.1", "7777", "hidden"); + verifyUrl(drv, "jdbc:postgresql://[::1]:5740/db", "[::1]", "5740", "db"); + verifyUrl(drv, "jdbc:postgresql://[::1]:5740/my%20data%23base%251?loggerFile=C%3A%5Cdir%5Cfile.log", "[::1]", "5740", "my data#base%1"); + + // tests for service syntax + URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf"); + assertNotNull(urlFileProps); + Resources.with( + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile()) + ).execute(() -> { + // correct cases + verifyUrl(drv, "jdbc:postgresql://?service=driverTestService1", "test-host1", "5444", "testdb1"); + verifyUrl(drv, "jdbc:postgresql://?service=driverTestService1&host=other-host", "other-host", "5444", "testdb1"); + verifyUrl(drv, "jdbc:postgresql:///?service=driverTestService1", "test-host1", "5444", "testdb1"); + verifyUrl(drv, "jdbc:postgresql:///?service=driverTestService1&port=3333&dbname=other-db", "test-host1", "3333", "other-db"); + verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?service=driverTestService1", "localhost", "5432", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?port=7777&dbname=other-db&service=driverTestService1", "localhost", "7777", "other-db"); + verifyUrl(drv, "jdbc:postgresql://[::1]:5740/?service=driverTestService1", "[::1]", "5740", "testdb1"); + verifyUrl(drv, "jdbc:postgresql://:5740/?service=driverTestService1", "localhost", "5740", "testdb1"); + verifyUrl(drv, "jdbc:postgresql://[::1]/?service=driverTestService1", "[::1]", "5432", "testdb1"); + verifyUrl(drv, "jdbc:postgresql://localhost/?service=driverTestService2", "localhost", "5432", "testdb1"); + // fail cases + assertFalse(drv.acceptsURL("jdbc:postgresql://?service=driverTestService2")); + }); + + // Badly formatted url's + assertFalse(drv.acceptsURL("jdbc:postgres:test")); + assertFalse(drv.acceptsURL("jdbc:postgresql:/test")); + assertFalse(drv.acceptsURL("jdbc:postgresql:////")); + assertFalse(drv.acceptsURL("jdbc:postgresql:///?service=my data#base%1")); + assertFalse(drv.acceptsURL("jdbc:postgresql://[::1]:5740/my data#base%1")); + assertFalse(drv.acceptsURL("jdbc:postgresql://localhost/dbname?loggerFile=C%3A%5Cdir%5Cfile.%log")); + assertFalse(drv.acceptsURL("postgresql:test")); + assertFalse(drv.acceptsURL("db")); + assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:5432a/test")); + assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:500000/test")); + assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:0/test")); + assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:-2/test")); + + // failover urls + verifyUrl(drv, "jdbc:postgresql://localhost,127.0.0.1:5432/test", "localhost,127.0.0.1", + "5432,5432", "test"); + verifyUrl(drv, "jdbc:postgresql://localhost:5433,127.0.0.1:5432/test", "localhost,127.0.0.1", + "5433,5432", "test"); + verifyUrl(drv, "jdbc:postgresql://[::1],[::1]:5432/db", "[::1],[::1]", "5432,5432", "db"); + verifyUrl(drv, "jdbc:postgresql://[::1]:5740,127.0.0.1:5432/db", "[::1],127.0.0.1", "5740,5432", + "db"); + } + + private void verifyUrl(Driver drv, String url, String hosts, String ports, String dbName) + throws Exception { + assertTrue(drv.acceptsURL(url), url); + Method parseMethod = + drv.getClass().getDeclaredMethod("parseURL", String.class, Properties.class); + parseMethod.setAccessible(true); + Properties p = (Properties) parseMethod.invoke(drv, url, null); + assertEquals(dbName, p.getProperty(PGProperty.PG_DBNAME.getName()), url); + assertEquals(hosts, p.getProperty(PGProperty.PG_HOST.getName()), url); + assertEquals(ports, p.getProperty(PGProperty.PG_PORT.getName()), url); + } + + /** + * Tests the connect method by connecting to the test database. + */ + @Test + void connect() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + + // Test with the url, username & password + Connection con = + DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword()); + assertNotNull(con); + con.close(); + + // Test with the username in the url + con = DriverManager.getConnection( + TestUtil.getURL() + + "&user=" + URLCoder.encode(TestUtil.getUser()) + + "&password=" + URLCoder.encode(TestUtil.getPassword())); + assertNotNull(con); + con.close(); + + // Test with failover url + } + + /** + * Tests the connect method by connecting to the test database. + */ + @Test + void connectService() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + String wrongPort = "65536"; + + // Create temporary pg_service.conf file + Path tempDirWithPrefix = Files.createTempDirectory("junit"); + Path tempFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf"); + try { + // Write service section + String testService1 = "testService1"; // with correct port + String testService2 = "testService2"; // with wrong port + try (PrintStream ps = new PrintStream(Files.newOutputStream(tempFile))) { + ps.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword()); + ps.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService2, TestUtil.getServer(), wrongPort, TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword()); + } + // consume service + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), tempFile.toString(), PGEnvironment.PGSYSCONFDIR.getName(), ""), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent") + ).execute(() -> { + // + // testing that properties overriding priority is correct (POSITIVE cases) + // + // service=correct port + Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1)); + assertNotNull(con); + con.close(); + // service=wrong port; Properties=correct port + Properties info = new Properties(); + info.setProperty("PGPORT", String.valueOf(TestUtil.getPort())); + con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService2), info); + assertNotNull(con); + con.close(); + // service=wrong port; Properties=wrong port; URL port=correct + info.setProperty("PGPORT", wrongPort); + con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s", TestUtil.getPort(), testService2), info); + assertNotNull(con); + con.close(); + // service=wrong port; Properties=wrong port; URL port=wrong; URL argument=correct port + con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s&port=%s", wrongPort, testService2, TestUtil.getPort()), info); + assertNotNull(con); + con.close(); + + // + // testing that properties overriding priority is correct (NEGATIVE cases) + // + // service=wrong port + try { + con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService2)); + fail("Expected an SQLException because port is out of range"); + } catch (SQLException e) { + // Expected exception. + } + // service=correct port; Properties=wrong port + info.setProperty("PGPORT", wrongPort); + try { + con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1), info); + fail("Expected an SQLException because port is out of range"); + } catch (SQLException e) { + // Expected exception. + } + // service=correct port; Properties=correct port; URL port=wrong + info.setProperty("PGPORT", String.valueOf(TestUtil.getPort())); + try { + con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s", wrongPort, testService1), info); + fail("Expected an SQLException because port is out of range"); + } catch (SQLException e) { + // Expected exception. + } + // service=correct port; Properties=correct port; URL port=correct; URL argument=wrong port + try { + con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s&port=%s", TestUtil.getPort(), testService1, wrongPort), info); + fail("Expected an SQLException because port is out of range"); + } catch (SQLException e) { + // Expected exception. + } + }); + } finally { + // cleanup + Files.delete(tempFile); + Files.delete(tempDirWithPrefix); + } + } + + /** + * Tests the password by connecting to the test database. + * password from .pgpass (correct) + */ + @Test + void connectPassword01() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + + // Create temporary .pgpass file + Path tempDirWithPrefix = Files.createTempDirectory("junit"); + Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf"); + try { + try (PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) { + psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword()); + } + // ignore pg_service.conf, use .pgpass + Resources.with( + new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), ""), + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent", + PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString()) + ).execute(() -> { + // password from .pgpass (correct) + Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://%s:%s/%s?user=%s", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser())); + assertNotNull(con); + con.close(); + }); + } finally { + // cleanup + Files.delete(tempPgPassFile); + Files.delete(tempDirWithPrefix); + } + } + + /** + * Tests the password by connecting to the test database. + * password from service (correct) and .pgpass (wrong) + */ + @Test + void connectPassword02() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + String wrongPassword = "random wrong"; + + // Create temporary pg_service.conf and .pgpass file + Path tempDirWithPrefix = Files.createTempDirectory("junit"); + Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf"); + Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf"); + try { + // Write service section + String testService1 = "testService1"; + try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile)); + PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) { + psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword()); + psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword); + } + // ignore pg_service.conf, use .pgpass + Resources.with( + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString()) + ).execute(() -> { + // password from service (correct) and .pgpass (wrong) + Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1)); + assertNotNull(con); + con.close(); + }); + } finally { + // cleanup + Files.delete(tempPgPassFile); + Files.delete(tempPgServiceFile); + Files.delete(tempDirWithPrefix); + } + } + + /** + * Tests the password by connecting to the test database. + * password from java property (correct) and service (wrong) and .pgpass (wrong) + */ + @Test + void connectPassword03() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + String wrongPassword = "random wrong"; + + // Create temporary pg_service.conf and .pgpass file + Path tempDirWithPrefix = Files.createTempDirectory("junit"); + Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf"); + Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf"); + try { + // Write service section + String testService1 = "testService1"; + try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile)); + PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) { + psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword); + psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword); + } + // ignore pg_service.conf, use .pgpass + Resources.with( + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString()) + ).execute(() -> { + // password from java property (correct) and service (wrong) and .pgpass (wrong) + Properties info = new Properties(); + PGProperty.PASSWORD.set(info, TestUtil.getPassword()); + Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1), info); + assertNotNull(con); + con.close(); + }); + } finally { + // cleanup + Files.delete(tempPgPassFile); + Files.delete(tempPgServiceFile); + Files.delete(tempDirWithPrefix); + } + } + + /** + * Tests the password by connecting to the test database. + * password from URL parameter (correct) and java property (wrong) and service (wrong) and .pgpass (wrong) + */ + @Test + void connectPassword04() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + String wrongPassword = "random wrong"; + + // Create temporary pg_service.conf and .pgpass file + Path tempDirWithPrefix = Files.createTempDirectory("junit"); + Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf"); + Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf"); + try { + // Write service section + String testService1 = "testService1"; + try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile)); + PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) { + psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword); + psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword); + } + // ignore pg_service.conf, use .pgpass + Resources.with( + new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString()) + ).execute(() -> { + // + Properties info = new Properties(); + PGProperty.PASSWORD.set(info, wrongPassword); + Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s&password=%s", testService1, TestUtil.getPassword()), info); + assertNotNull(con); + con.close(); + }); + } finally { + // cleanup + Files.delete(tempPgPassFile); + Files.delete(tempPgServiceFile); + Files.delete(tempDirWithPrefix); + } + } + + /** + * Tests that pgjdbc performs connection failover if unable to connect to the first host in the + * URL. + * + * @throws Exception if something wrong happens + */ + @Test + void connectFailover() throws Exception { + String url = "jdbc:postgresql://invalidhost.not.here," + TestUtil.getServer() + ":" + + TestUtil.getPort() + "/" + TestUtil.getDatabase() + "?connectTimeout=5"; + Connection con = DriverManager.getConnection(url, TestUtil.getUser(), TestUtil.getPassword()); + assertNotNull(con); + con.close(); + } + + /* + * Test that the readOnly property works. + */ + @Test + void readOnly() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + + Connection con = DriverManager.getConnection(TestUtil.getURL() + "&readOnly=true", + TestUtil.getUser(), TestUtil.getPassword()); + assertNotNull(con); + assertTrue(con.isReadOnly()); + con.close(); + + con = DriverManager.getConnection(TestUtil.getURL() + "&readOnly=false", TestUtil.getUser(), + TestUtil.getPassword()); + assertNotNull(con); + assertFalse(con.isReadOnly()); + con.close(); + + con = + DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword()); + assertNotNull(con); + assertFalse(con.isReadOnly()); + con.close(); + } + + @Test + void registration() throws Exception { + TestUtil.initDriver(); + + // Driver is initially registered because it is automatically done when class is loaded + assertTrue(Driver.isRegistered()); + + ArrayList drivers = Collections.list(DriverManager.getDrivers()); + searchInstanceOf: { + + for (java.sql.Driver driver : drivers) { + if (driver instanceof Driver) { + break searchInstanceOf; + } + } + fail("Driver has not been found in DriverManager's list but it should be registered"); + } + + // Deregister the driver + Driver.deregister(); + assertFalse(Driver.isRegistered()); + + drivers = Collections.list(DriverManager.getDrivers()); + for (java.sql.Driver driver : drivers) { + if (driver instanceof Driver) { + fail("Driver should be deregistered but it is still present in DriverManager's list"); + } + } + + // register again the driver + Driver.register(); + assertTrue(Driver.isRegistered()); + + drivers = Collections.list(DriverManager.getDrivers()); + for (java.sql.Driver driver : drivers) { + if (driver instanceof Driver) { + return; + } + } + fail("Driver has not been found in DriverManager's list but it should be registered"); + } + + @Test + void systemErrIsNotClosedWhenCreatedMultipleConnections() throws Exception { + TestUtil.initDriver(); + PrintStream err = System.err; + PrintStream buffer = new PrintStream(new ByteArrayOutputStream()); + System.setErr(buffer); + try { + Connection con = DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword()); + try { + assertNotNull(con); + } finally { + con.close(); + } + con = DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword()); + try { + assertNotNull(con); + System.err.println(); + assertFalse(System.err.checkError(), "The System.err should not be closed."); + } finally { + con.close(); + } + } finally { + System.setErr(err); + } + } + + private void setProperty(String key, String value) { + if (value == null) { + System.clearProperty(key); + } else { + System.setProperty(key, value); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EncodingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EncodingTest.java new file mode 100644 index 0000000..5ad1ffb --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EncodingTest.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.Encoding; + +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.Reader; +import java.util.Locale; + +/** + * Tests for the Encoding class. + */ +class EncodingTest { + + @Test + void creation() throws Exception { + Encoding encoding = Encoding.getDatabaseEncoding("UTF8"); + assertEquals("UTF", encoding.name().substring(0, 3).toUpperCase(Locale.US)); + encoding = Encoding.getDatabaseEncoding("SQL_ASCII"); + assertTrue(encoding.name().toUpperCase(Locale.US).contains("ASCII")); + assertEquals(Encoding.defaultEncoding(), Encoding.getDatabaseEncoding("UNKNOWN"), "When encoding is unknown the default encoding should be used"); + } + + @Test + void transformations() throws Exception { + Encoding encoding = Encoding.getDatabaseEncoding("UTF8"); + assertEquals("ab", encoding.decode(new byte[]{97, 98})); + + assertEquals(2, encoding.encode("ab").length); + assertEquals(97, encoding.encode("a")[0]); + assertEquals(98, encoding.encode("b")[0]); + + encoding = Encoding.defaultEncoding(); + assertEquals("a".getBytes()[0], encoding.encode("a")[0]); + assertEquals(new String(new byte[]{97}), encoding.decode(new byte[]{97})); + } + + @Test + void reader() throws Exception { + Encoding encoding = Encoding.getDatabaseEncoding("SQL_ASCII"); + InputStream stream = new ByteArrayInputStream(new byte[]{97, 98}); + Reader reader = encoding.getDecodingReader(stream); + assertEquals(97, reader.read()); + assertEquals(98, reader.read()); + assertEquals(-1, reader.read()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EnumTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EnumTest.java new file mode 100644 index 0000000..b6bc331 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EnumTest.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Array; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class EnumTest extends BaseTest4 { + public EnumTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createEnumType(con, "flag", "'duplicate','spike','new'"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropType(con, "flag"); + super.tearDown(); + } + + @Test + public void enumArray() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT '{duplicate,new}'::flag[]"); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + Array array = rs.getArray(1); + Assert.assertNotNull("{duplicate,new} should come up as a non-null array", array); + Object[] objectArray = (Object[]) array.getArray(); + Assert.assertEquals( + "{duplicate,new} should come up as Java array with two entries", + "[duplicate, new]", + Arrays.deepToString(objectArray) + ); + + Assert.assertEquals( + "Enum array entries should come up as strings", + "java.lang.String, java.lang.String", + objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName() + ); + rs.close(); + pstmt.close(); + } + + @Test + public void enumArrayArray() throws SQLException { + String value = "{{duplicate,new},{spike,spike}}"; + PreparedStatement pstmt = con.prepareStatement("SELECT '" + value + "'::flag[][]"); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + Array array = rs.getArray(1); + Assert.assertNotNull(value + " should come up as a non-null array", array); + Object[] objectArray = (Object[]) array.getArray(); + Assert.assertEquals( + value + " should come up as Java array with two entries", + "[[duplicate, new], [spike, spike]]", + Arrays.deepToString(objectArray) + ); + + Assert.assertEquals( + "Enum array entries should come up as strings", + "[Ljava.lang.String;, [Ljava.lang.String;", + objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName() + ); + rs.close(); + pstmt.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GeometricTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GeometricTest.java new file mode 100644 index 0000000..38f0794 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GeometricTest.java @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.geometric.PGbox; +import org.postgresql.geometric.PGcircle; +import org.postgresql.geometric.PGline; +import org.postgresql.geometric.PGlseg; +import org.postgresql.geometric.PGpath; +import org.postgresql.geometric.PGpoint; +import org.postgresql.geometric.PGpolygon; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGobject; +import org.postgresql.util.PSQLException; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +/* + * Test case for geometric type I/O + */ +@RunWith(Parameterized.class) +public class GeometricTest extends BaseTest4 { + + public GeometricTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "testgeometric", + "boxval box, circleval circle, lsegval lseg, pathval path, polygonval polygon, pointval point, lineval line"); + } + + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "testgeometric"); + super.tearDown(); + } + + private void checkReadWrite(PGobject obj, String column) throws Exception { + PreparedStatement insert = + con.prepareStatement("INSERT INTO testgeometric(" + column + ") VALUES (?)"); + insert.setObject(1, obj); + insert.executeUpdate(); + insert.close(); + + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT " + column + " FROM testgeometric"); + assertTrue(rs.next()); + assertEquals("PGObject#equals(rs.getObject)", obj, rs.getObject(1)); + PGobject obj2 = (PGobject) obj.clone(); + obj2.setValue(rs.getString(1)); + assertEquals("PGobject.toString vs rs.getString", obj, obj2); + rs.close(); + + stmt.executeUpdate("DELETE FROM testgeometric"); + stmt.close(); + } + + @Test + public void testPGbox() throws Exception { + checkReadWrite(new PGbox(1.0, 2.0, 3.0, 4.0), "boxval"); + checkReadWrite(new PGbox(-1.0, 2.0, 3.0, 4.0), "boxval"); + checkReadWrite(new PGbox(1.0, -2.0, 3.0, 4.0), "boxval"); + checkReadWrite(new PGbox(1.0, 2.0, -3.0, 4.0), "boxval"); + checkReadWrite(new PGbox(1.0, 2.0, 3.0, -4.0), "boxval"); + } + + @Test + public void testPGcircle() throws Exception { + checkReadWrite(new PGcircle(1.0, 2.0, 3.0), "circleval"); + checkReadWrite(new PGcircle(-1.0, 2.0, 3.0), "circleval"); + checkReadWrite(new PGcircle(1.0, -2.0, 3.0), "circleval"); + } + + @Test + public void testPGlseg() throws Exception { + checkReadWrite(new PGlseg(1.0, 2.0, 3.0, 4.0), "lsegval"); + checkReadWrite(new PGlseg(-1.0, 2.0, 3.0, 4.0), "lsegval"); + checkReadWrite(new PGlseg(1.0, -2.0, 3.0, 4.0), "lsegval"); + checkReadWrite(new PGlseg(1.0, 2.0, -3.0, 4.0), "lsegval"); + checkReadWrite(new PGlseg(1.0, 2.0, 3.0, -4.0), "lsegval"); + } + + @Test + public void testPGpath() throws Exception { + PGpoint[] points = + new PGpoint[]{new PGpoint(0.0, 0.0), new PGpoint(0.0, 5.0), new PGpoint(5.0, 5.0), + new PGpoint(5.0, -5.0), new PGpoint(-5.0, -5.0), new PGpoint(-5.0, 5.0),}; + + checkReadWrite(new PGpath(points, true), "pathval"); + checkReadWrite(new PGpath(points, false), "pathval"); + } + + @Test + public void testPGpolygon() throws Exception { + PGpoint[] points = + new PGpoint[]{new PGpoint(0.0, 0.0), new PGpoint(0.0, 5.0), new PGpoint(5.0, 5.0), + new PGpoint(5.0, -5.0), new PGpoint(-5.0, -5.0), new PGpoint(-5.0, 5.0),}; + + checkReadWrite(new PGpolygon(points), "polygonval"); + } + + @Test + public void testPGline() throws Exception { + final String columnName = "lineval"; + + // PostgreSQL versions older than 9.4 support creating columns with the LINE datatype, but + // not actually writing to those columns. Only try to write if the version if at least 9.4 + final boolean roundTripToDatabase = TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4); + + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)) { + + // Apparently the driver requires public no-args constructor, and postgresql doesn't accept + // lines with A and B + // coefficients both being zero... so assert a no-arg instantiated instance throws an + // exception. + if (roundTripToDatabase) { + try { + checkReadWrite(new PGline(), columnName); + fail("Expected a PSQLException to be thrown"); + } catch (PSQLException e) { + assertEquals("22P02", e.getSQLState()); + } + } + + // Generate a dataset for testing. + List linesToTest = new ArrayList<>(); + for (double i = 1; i <= 3; i += 0.25) { + // Test the 3-arg constructor (coefficients+constant) + linesToTest.add(new PGline(i, (0 - i), (1 / i))); + linesToTest.add(new PGline("{" + i + "," + (0 - i) + "," + (1 / i) + "}")); + // Test the 4-arg constructor (x/y coords of two points on the line) + linesToTest.add(new PGline(i, (0 - i), (1 / i), (1 / i / i))); + linesToTest.add(new PGline(i, (0 - i), i, (1 / i / i))); // tests vertical line + // Test 2-arg constructor (2 PGpoints on the line); + linesToTest.add(new PGline(new PGpoint(i, (0 - i)), new PGpoint((1 / i), (1 / i / i)))); + // tests vertical line + linesToTest.add(new PGline(new PGpoint(i, (0 - i)), new PGpoint(i, (1 / i / i)))); + // Test 1-arg constructor (PGlseg on the line); + linesToTest.add(new PGline(new PGlseg(i, (0 - i), (1 / i), (1 / i / i)))); + linesToTest.add(new PGline(new PGlseg(i, (0 - i), i, (1 / i / i)))); + linesToTest.add( + new PGline(new PGlseg(new PGpoint(i, (0 - i)), new PGpoint((1 / i), (1 / i / i))))); + linesToTest + .add(new PGline(new PGlseg(new PGpoint(i, (0 - i)), new PGpoint(i, (1 / i / i))))); + } + + // Include persistence an querying if the postgresql version supports it. + if (roundTripToDatabase) { + for (PGline testLine : linesToTest) { + checkReadWrite(testLine, columnName); + } + } + + } + } + + @Test + public void testPGpoint() throws Exception { + checkReadWrite(new PGpoint(1.0, 2.0), "pointval"); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GetXXXTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GetXXXTest.java new file mode 100644 index 0000000..31e8cd7 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GetXXXTest.java @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGInterval; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.HashMap; + +/* +* Test for getObject +*/ +class GetXXXTest { + private Connection con; + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + TestUtil.createTempTable(con, "test_interval", + "initial timestamp with time zone, final timestamp with time zone"); + PreparedStatement pstmt = con.prepareStatement("insert into test_interval values (?,?)"); + Calendar cal = Calendar.getInstance(); + cal.add(Calendar.DAY_OF_YEAR, -1); + + pstmt.setTimestamp(1, new Timestamp(cal.getTime().getTime())); + pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis())); + assertEquals(1, pstmt.executeUpdate()); + pstmt.close(); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropTable(con, "test_interval"); + con.close(); + } + + @Test + void getObject() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select (final-initial) as diff from test_interval"); + while (rs.next()) { + String str = rs.getString(1); + + assertNotNull(str); + Object obj = rs.getObject(1); + assertNotNull(obj); + } + } + + @Test + void getUDT() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select (final-initial) as diff from test_interval"); + + while (rs.next()) { + // make this return a PGobject + Object obj = rs.getObject(1, new HashMap<>()); + + // it should not be an instance of PGInterval + assertTrue(obj instanceof PGInterval); + + } + + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/IntervalTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/IntervalTest.java new file mode 100644 index 0000000..f15e822 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/IntervalTest.java @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGInterval; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Calendar; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.Locale; + +class IntervalTest { + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createTable(conn, "testinterval", "v interval"); + TestUtil.createTable(conn, "testdate", "v date"); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropTable(conn, "testinterval"); + TestUtil.dropTable(conn, "testdate"); + + TestUtil.closeDB(conn); + } + + @Test + void onlineTests() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO testinterval VALUES (?)"); + pstmt.setObject(1, new PGInterval(2004, 13, 28, 0, 0, 43000.9013)); + pstmt.executeUpdate(); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT v FROM testinterval"); + assertTrue(rs.next()); + PGInterval pgi = (PGInterval) rs.getObject(1); + assertEquals(2005, pgi.getYears()); + assertEquals(1, pgi.getMonths()); + assertEquals(28, pgi.getDays()); + assertEquals(11, pgi.getHours()); + assertEquals(56, pgi.getMinutes()); + assertEquals(40.9013, pgi.getSeconds(), 0.000001); + assertFalse(rs.next()); + rs.close(); + stmt.close(); + } + + @Test + void stringToIntervalCoercion() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-01'")); + stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-02'")); + stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-04'")); + stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-05'")); + stmt.close(); + + PreparedStatement pstmt = conn.prepareStatement( + "SELECT v FROM testdate WHERE v < (?::timestamp with time zone + ? * ?::interval) ORDER BY v"); + pstmt.setObject(1, makeDate(2010, 1, 1)); + pstmt.setObject(2, 2); + pstmt.setObject(3, "1 day"); + ResultSet rs = pstmt.executeQuery(); + + assertNotNull(rs); + + java.sql.Date d; + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(2010, 1, 1), d); + + assertTrue(rs.next()); + d = rs.getDate(1); + assertNotNull(d); + assertEquals(makeDate(2010, 1, 2), d); + + assertFalse(rs.next()); + + rs.close(); + pstmt.close(); + } + + @Test + void intervalToStringCoercion() throws SQLException { + PGInterval interval = new PGInterval("1 year 3 months"); + String coercedStringValue = interval.toString(); + + assertEquals("1 years 3 mons 0 days 0 hours 0 mins 0.0 secs", coercedStringValue); + } + + @Test + void daysHours() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '101:12:00'::interval"); + assertTrue(rs.next()); + PGInterval i = (PGInterval) rs.getObject(1); + // 8.1 servers store hours and days separately. + assertEquals(0, i.getDays()); + assertEquals(101, i.getHours()); + + assertEquals(12, i.getMinutes()); + } + + @Test + void addRounding() { + PGInterval pgi = new PGInterval(0, 0, 0, 0, 0, 0.6006); + Calendar cal = Calendar.getInstance(); + long origTime = cal.getTime().getTime(); + pgi.add(cal); + long newTime = cal.getTime().getTime(); + assertEquals(601, newTime - origTime); + pgi.setSeconds(-0.6006); + pgi.add(cal); + assertEquals(origTime, cal.getTime().getTime()); + } + + @Test + void offlineTests() throws Exception { + PGInterval pgi = new PGInterval(2004, 4, 20, 15, 57, 12.1); + + assertEquals(2004, pgi.getYears()); + assertEquals(4, pgi.getMonths()); + assertEquals(20, pgi.getDays()); + assertEquals(15, pgi.getHours()); + assertEquals(57, pgi.getMinutes()); + assertEquals(12.1, pgi.getSeconds(), 0); + + PGInterval pgi2 = new PGInterval("@ 2004 years 4 mons 20 days 15 hours 57 mins 12.1 secs"); + assertEquals(pgi, pgi2); + + // Singular units + PGInterval pgi3 = new PGInterval("@ 2004 year 4 mon 20 day 15 hour 57 min 12.1 sec"); + assertEquals(pgi, pgi3); + + PGInterval pgi4 = new PGInterval("2004 years 4 mons 20 days 15:57:12.1"); + assertEquals(pgi, pgi4); + + // Ago test + pgi = new PGInterval("@ 2004 years 4 mons 20 days 15 hours 57 mins 12.1 secs ago"); + assertEquals(-2004, pgi.getYears()); + assertEquals(-4, pgi.getMonths()); + assertEquals(-20, pgi.getDays()); + assertEquals(-15, pgi.getHours()); + assertEquals(-57, pgi.getMinutes()); + assertEquals(-12.1, pgi.getSeconds(), 0); + + // Char test + pgi = new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs"); + assertEquals(2004, pgi.getYears()); + assertEquals(-4, pgi.getMonths()); + assertEquals(20, pgi.getDays()); + assertEquals(-15, pgi.getHours()); + assertEquals(57, pgi.getMinutes()); + assertEquals(-12.1, pgi.getSeconds(), 0); + + // Unjustified interval test + pgi = new PGInterval("@ 0 years 0 mons 0 days 900 hours 0 mins 0.00 secs"); + assertEquals(0, pgi.getYears()); + assertEquals(0, pgi.getMonths()); + assertEquals(0, pgi.getDays()); + assertEquals(900, pgi.getHours()); + assertEquals(0, pgi.getMinutes()); + assertEquals(0, pgi.getSeconds(), 0); + } + + private Calendar getStartCalendar() { + Calendar cal = new GregorianCalendar(); + cal.set(Calendar.YEAR, 2005); + cal.set(Calendar.MONTH, 4); + cal.set(Calendar.DAY_OF_MONTH, 29); + cal.set(Calendar.HOUR_OF_DAY, 15); + cal.set(Calendar.MINUTE, 35); + cal.set(Calendar.SECOND, 42); + cal.set(Calendar.MILLISECOND, 100); + + return cal; + } + + @Test + void calendar() throws Exception { + Calendar cal = getStartCalendar(); + + PGInterval pgi = new PGInterval("@ 1 year 1 mon 1 day 1 hour 1 minute 1 secs"); + pgi.add(cal); + + assertEquals(2006, cal.get(Calendar.YEAR)); + assertEquals(5, cal.get(Calendar.MONTH)); + assertEquals(30, cal.get(Calendar.DAY_OF_MONTH)); + assertEquals(16, cal.get(Calendar.HOUR_OF_DAY)); + assertEquals(36, cal.get(Calendar.MINUTE)); + assertEquals(43, cal.get(Calendar.SECOND)); + assertEquals(100, cal.get(Calendar.MILLISECOND)); + + pgi = new PGInterval("@ 1 year 1 mon 1 day 1 hour 1 minute 1 secs ago"); + pgi.add(cal); + + assertEquals(2005, cal.get(Calendar.YEAR)); + assertEquals(4, cal.get(Calendar.MONTH)); + assertEquals(29, cal.get(Calendar.DAY_OF_MONTH)); + assertEquals(15, cal.get(Calendar.HOUR_OF_DAY)); + assertEquals(35, cal.get(Calendar.MINUTE)); + assertEquals(42, cal.get(Calendar.SECOND)); + assertEquals(100, cal.get(Calendar.MILLISECOND)); + + cal = getStartCalendar(); + + pgi = new PGInterval("@ 1 year -23 hours -3 mins -3.30 secs"); + pgi.add(cal); + + assertEquals(2006, cal.get(Calendar.YEAR)); + assertEquals(4, cal.get(Calendar.MONTH)); + assertEquals(28, cal.get(Calendar.DAY_OF_MONTH)); + assertEquals(16, cal.get(Calendar.HOUR_OF_DAY)); + assertEquals(32, cal.get(Calendar.MINUTE)); + assertEquals(38, cal.get(Calendar.SECOND)); + assertEquals(800, cal.get(Calendar.MILLISECOND)); + + pgi = new PGInterval("@ 1 year -23 hours -3 mins -3.30 secs ago"); + pgi.add(cal); + + assertEquals(2005, cal.get(Calendar.YEAR)); + assertEquals(4, cal.get(Calendar.MONTH)); + assertEquals(29, cal.get(Calendar.DAY_OF_MONTH)); + assertEquals(15, cal.get(Calendar.HOUR_OF_DAY)); + assertEquals(35, cal.get(Calendar.MINUTE)); + assertEquals(42, cal.get(Calendar.SECOND)); + assertEquals(100, cal.get(Calendar.MILLISECOND)); + } + + @Test + void date() throws Exception { + Date date = getStartCalendar().getTime(); + Date date2 = getStartCalendar().getTime(); + + PGInterval pgi = new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs"); + pgi.add(date); + + PGInterval pgi2 = + new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs ago"); + pgi2.add(date); + + assertEquals(date2, date); + } + + @Test + void postgresDate() throws Exception { + Date date = getStartCalendar().getTime(); + Date date2 = getStartCalendar().getTime(); + + PGInterval pgi = new PGInterval("+2004 years -4 mons +20 days -15:57:12.1"); + pgi.add(date); + + PGInterval pgi2 = new PGInterval("-2004 years 4 mons -20 days 15:57:12.1"); + pgi2.add(date); + + assertEquals(date2, date); + } + + @Test + void iSO8601() throws Exception { + PGInterval pgi = new PGInterval("P1Y2M3DT4H5M6S"); + assertEquals(1, pgi.getYears()); + assertEquals(2, pgi.getMonths()); + assertEquals(3, pgi.getDays()); + assertEquals(4, pgi.getHours()); + assertEquals(5, pgi.getMinutes()); + assertEquals(6, pgi.getSeconds(), .1); + + pgi = new PGInterval("P-1Y2M3DT4H5M6S"); + assertEquals(-1, pgi.getYears()); + + pgi = new PGInterval("P1Y2M"); + assertEquals(1, pgi.getYears()); + assertEquals(2, pgi.getMonths()); + assertEquals(0, pgi.getDays()); + + pgi = new PGInterval("P3DT4H5M6S"); + assertEquals(0, pgi.getYears()); + + pgi = new PGInterval("P-1Y-2M3DT-4H-5M-6S"); + assertEquals(-1, pgi.getYears()); + assertEquals(-2, pgi.getMonths()); + assertEquals(-4, pgi.getHours()); + + pgi = new PGInterval("PT6.123456S"); + assertEquals(6.123456, pgi.getSeconds(), .0); + assertEquals(6, pgi.getWholeSeconds()); + assertEquals(123456, pgi.getMicroSeconds()); + + pgi = new PGInterval("PT-6.123456S"); + assertEquals(-6.123456, pgi.getSeconds(), .0); + assertEquals(-6, pgi.getWholeSeconds()); + assertEquals(-123456, pgi.getMicroSeconds()); + } + + @Test + void smallValue() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO testinterval VALUES (?)"); + pstmt.setObject(1, new PGInterval("0.0001 seconds")); + pstmt.executeUpdate(); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT v FROM testinterval"); + assertTrue(rs.next()); + PGInterval pgi = (PGInterval) rs.getObject(1); + assertEquals(0, pgi.getYears()); + assertEquals(0, pgi.getMonths()); + assertEquals(0, pgi.getDays()); + assertEquals(0, pgi.getHours()); + assertEquals(0, pgi.getMinutes()); + assertEquals(0, pgi.getWholeSeconds()); + assertEquals(100, pgi.getMicroSeconds()); + assertFalse(rs.next()); + rs.close(); + stmt.close(); + } + + @Test + void getValueForSmallValue() throws SQLException { + PGInterval orig = new PGInterval("0.0001 seconds"); + PGInterval copy = new PGInterval(orig.getValue()); + + assertEquals(orig, copy); + } + + @Test + void getValueForSmallValueWithCommaAsDecimalSeparatorInDefaultLocale() throws SQLException { + Locale originalLocale = Locale.getDefault(); + Locale.setDefault(Locale.GERMANY); + try { + PGInterval orig = new PGInterval("0.0001 seconds"); + PGInterval copy = new PGInterval(orig.getValue()); + + assertEquals(orig, copy); + } finally { + Locale.setDefault(originalLocale); + } + } + + @Test + void getSecondsForSmallValue() throws SQLException { + PGInterval pgi = new PGInterval("0.000001 seconds"); + + assertEquals(0.000001, pgi.getSeconds(), 0.000000001); + } + + @Test + void microSecondsAreRoundedToNearest() throws SQLException { + PGInterval pgi = new PGInterval("0.0000007 seconds"); + + assertEquals(1, pgi.getMicroSeconds()); + } + + private java.sql.Date makeDate(int y, int m, int d) { + return new java.sql.Date(y - 1900, m - 1, d); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/JBuilderTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/JBuilderTest.java new file mode 100644 index 0000000..ad3f2ee --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/JBuilderTest.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; + +/* +* Some simple tests to check that the required components needed for JBuilder stay working +* +*/ +class JBuilderTest { + + // Set up the fixture for this testcase: the tables for this test. + @BeforeEach + void setUp() throws Exception { + Connection con = TestUtil.openDB(); + + TestUtil.createTable(con, "test_c", "source text,cost money,imageid int4"); + + TestUtil.closeDB(con); + } + + // Tear down the fixture for this test case. + @AfterEach + void tearDown() throws Exception { + Connection con = TestUtil.openDB(); + TestUtil.dropTable(con, "test_c"); + TestUtil.closeDB(con); + } + + /* + * This tests that Money types work. JDBCExplorer barfs if this fails. + */ + @Test + void money() throws Exception { + Connection con = TestUtil.openDB(); + + Statement st = con.createStatement(); + ResultSet rs = st.executeQuery("select cost from test_c"); + assertNotNull(rs); + + while (rs.next()) { + rs.getDouble(1); + } + + rs.close(); + st.close(); + + TestUtil.closeDB(con); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/LoginTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/LoginTimeoutTest.java new file mode 100644 index 0000000..5878abf --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/LoginTimeoutTest.java @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.UnknownHostException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + +class LoginTimeoutTest { + + @BeforeEach + void setUp() throws Exception { + TestUtil.initDriver(); // Set up log levels, etc. + } + + @Test + void intTimeout() throws Exception { + Properties props = new Properties(); + PGProperty.USER.set(props, TestUtil.getUser()); + PGProperty.PASSWORD.set(props, TestUtil.getPassword()); + PGProperty.LOGIN_TIMEOUT.set(props, 10); + + Connection conn = DriverManager.getConnection(TestUtil.getURL(), props); + conn.close(); + } + + @Test + void floatTimeout() throws Exception { + Properties props = new Properties(); + props.setProperty("user", TestUtil.getUser()); + props.setProperty("password", TestUtil.getPassword()); + props.setProperty("loginTimeout", "10.0"); + + Connection conn = DriverManager.getConnection(TestUtil.getURL(), props); + conn.close(); + } + + @Test + void zeroTimeout() throws Exception { + Properties props = new Properties(); + props.setProperty("user", TestUtil.getUser()); + props.setProperty("password", TestUtil.getPassword()); + props.setProperty("loginTimeout", "0"); + + Connection conn = DriverManager.getConnection(TestUtil.getURL(), props); + conn.close(); + } + + @Test + void negativeTimeout() throws Exception { + Properties props = new Properties(); + props.setProperty("user", TestUtil.getUser()); + props.setProperty("password", TestUtil.getPassword()); + props.setProperty("loginTimeout", "-1"); + + Connection conn = DriverManager.getConnection(TestUtil.getURL(), props); + conn.close(); + } + + @Test + void badTimeout() throws Exception { + Properties props = new Properties(); + props.setProperty("user", TestUtil.getUser()); + props.setProperty("password", TestUtil.getPassword()); + props.setProperty("loginTimeout", "zzzz"); + + Connection conn = DriverManager.getConnection(TestUtil.getURL(), props); + conn.close(); + } + + private static class TimeoutHelper implements Runnable { + TimeoutHelper() throws IOException { + InetAddress localAddr; + try { + localAddr = InetAddress.getLocalHost(); + } catch (UnknownHostException ex) { + System.err.println("WARNING: Could not resolve local host name, trying 'localhost'. " + ex); + localAddr = InetAddress.getByName("localhost"); + } + this.listenSocket = new ServerSocket(0, 1, localAddr); + } + + String getHost() { + return listenSocket.getInetAddress().getHostAddress(); + } + + int getPort() { + return listenSocket.getLocalPort(); + } + + @Override + public void run() { + try { + Socket newSocket = listenSocket.accept(); + try { + Thread.sleep(30000); + } catch (InterruptedException e) { + // Ignore it. + } + newSocket.close(); + } catch (IOException e) { + // Ignore it. + } + } + + void kill() { + try { + listenSocket.close(); + } catch (IOException e) { + } + } + + private final ServerSocket listenSocket; + } + + @Test + void timeoutOccurs() throws Exception { + // Spawn a helper thread to accept a connection and do nothing with it; + // this should trigger a timeout. + TimeoutHelper helper = new TimeoutHelper(); + new Thread(helper, "timeout listen helper").start(); + + try { + String url = "jdbc:postgresql://" + helper.getHost() + ":" + helper.getPort() + "/dummy"; + Properties props = new Properties(); + props.setProperty("user", "dummy"); + props.setProperty("loginTimeout", "5"); + + // This is a pretty crude check, but should help distinguish + // "can't connect" from "timed out". + long startTime = System.nanoTime(); + Connection conn = null; + try { + conn = DriverManager.getConnection(url, props); + fail("connection was unexpectedly successful"); + } catch (SQLException e) { + // Ignored. + } finally { + if (conn != null) { + conn.close(); + } + } + + long endTime = System.nanoTime(); + assertTrue(endTime > startTime + (2500L * 1E6), "Connection timed before 2500ms"); + } finally { + helper.kill(); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/MiscTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/MiscTest.java new file mode 100644 index 0000000..5cceb39 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/MiscTest.java @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.ObjectOutputStream; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; + +/* +* Some simple tests based on problems reported by users. Hopefully these will help prevent previous +* problems from re-occurring ;-) +* +*/ +class MiscTest { + + /* + * Some versions of the driver would return rs as a null? + * + * Sasha was having this problem. + * + * Added Feb 13 2001 + */ + @Test + void databaseSelectNullBug() throws Exception { + Connection con = TestUtil.openDB(); + + Statement st = con.createStatement(); + ResultSet rs = st.executeQuery("select datname from pg_database"); + assertNotNull(rs); + + while (rs.next()) { + rs.getString(1); + } + + rs.close(); + st.close(); + + TestUtil.closeDB(con); + } + + /** + * Ensure the cancel call does not return before it has completed. Previously it did which + * cancelled future queries. + */ + @Test + void singleThreadCancel() throws Exception { + Connection con = TestUtil.openDB(); + Statement stmt = con.createStatement(); + for (int i = 0; i < 100; i++) { + ResultSet rs = stmt.executeQuery("SELECT 1"); + rs.close(); + stmt.cancel(); + } + TestUtil.closeDB(con); + } + + @Test + void error() throws Exception { + Connection con = TestUtil.openDB(); + try { + + // transaction mode + con.setAutoCommit(false); + Statement stmt = con.createStatement(); + stmt.execute("select 1/0"); + fail("Should not execute this, as a SQLException s/b thrown"); + con.commit(); + } catch (SQLException ex) { + // Verify that the SQLException is serializable. + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(ex); + oos.close(); + } + + con.commit(); + con.close(); + } + + @Test + void warning() throws Exception { + Connection con = TestUtil.openDB(); + Statement stmt = con.createStatement(); + stmt.execute("CREATE TEMP TABLE t(a int primary key)"); + SQLWarning warning = stmt.getWarnings(); + // We should get a warning about primary key index creation + // it's possible we won't depending on the server's + // client_min_messages setting. + while (warning != null) { + // Verify that the SQLWarning is serializable. + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(warning); + oos.close(); + warning = warning.getNextWarning(); + } + + stmt.close(); + con.close(); + } + + @Disabled + @Test + void xtestLocking() throws Exception { + Connection con = TestUtil.openDB(); + Connection con2 = TestUtil.openDB(); + + TestUtil.createTable(con, "test_lock", "name text"); + Statement st = con.createStatement(); + Statement st2 = con2.createStatement(); + con.setAutoCommit(false); + st.execute("lock table test_lock"); + st2.executeUpdate("insert into test_lock ( name ) values ('hello')"); + con.commit(); + TestUtil.dropTable(con, "test_lock"); + con.close(); + con2.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NotifyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NotifyTest.java new file mode 100644 index 0000000..4740a5a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NotifyTest.java @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGConnection; +import org.postgresql.PGNotification; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; + +class NotifyTest { + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + } + + @AfterEach + void tearDown() throws SQLException { + TestUtil.closeDB(conn); + } + + @Test + @Timeout(60) + void testNotify() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + stmt.executeUpdate("NOTIFY mynotification"); + + PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(); + assertNotNull(notifications); + assertEquals(1, notifications.length); + assertEquals("mynotification", notifications[0].getName()); + assertEquals("", notifications[0].getParameter()); + + stmt.close(); + } + + @Test + @Timeout(60) + void notifyArgument() throws Exception { + if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_0)) { + return; + } + + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + stmt.executeUpdate("NOTIFY mynotification, 'message'"); + + PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(); + assertNotNull(notifications); + assertEquals(1, notifications.length); + assertEquals("mynotification", notifications[0].getName()); + assertEquals("message", notifications[0].getParameter()); + + stmt.close(); + } + + @Test + @Timeout(60) + void asyncNotify() throws Exception { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + + // Notify on a separate connection to get an async notify on the first. + connectAndNotify("mynotification"); + + // Wait a bit to let the notify come through... Changed this so the test takes ~2 seconds + // less to run and is still as effective. + PGNotification[] notifications = null; + PGConnection connection = conn.unwrap(PGConnection.class); + for (int i = 0; i < 3000; i++) { + notifications = connection.getNotifications(); + if (notifications.length > 0) { + break; + } + Thread.sleep(10); + } + + assertNotNull(notifications, "Notification is expected to be delivered when subscription was created" + + " before sending notification"); + assertEquals(1, notifications.length); + assertEquals("mynotification", notifications[0].getName()); + assertEquals("", notifications[0].getParameter()); + + stmt.close(); + } + + /** + * To test timeouts we have to send the notification from another thread, because we + * listener is blocking. + */ + @Test + @Timeout(60) + void asyncNotifyWithTimeout() throws Exception { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + + // Here we let the getNotifications() timeout. + long startMillis = System.currentTimeMillis(); + PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(500); + long endMillis = System.currentTimeMillis(); + long runtime = endMillis - startMillis; + assertEquals("[]", Arrays.asList(notifications).toString(), "There have been notifications, although none have been expected."); + assertTrue(runtime > 450, "We didn't wait long enough! runtime=" + runtime); + + stmt.close(); + } + + @Test + @Timeout(60) + void asyncNotifyWithTimeoutAndMessagesAvailableWhenStartingListening() throws Exception { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + + // Now we check the case where notifications are already available while we are starting to + // listen for notifications + connectAndNotify("mynotification"); + + PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(10000); + assertNotNull(notifications); + assertEquals(1, notifications.length); + assertEquals("mynotification", notifications[0].getName()); + assertEquals("", notifications[0].getParameter()); + + stmt.close(); + } + + @Test + @Timeout(60) + void asyncNotifyWithEndlessTimeoutAndMessagesAvailableWhenStartingListening() throws Exception { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + + // Now we check the case where notifications are already available while we are waiting forever + connectAndNotify("mynotification"); + + PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(0); + assertNotNull(notifications); + assertEquals(1, notifications.length); + assertEquals("mynotification", notifications[0].getName()); + assertEquals("", notifications[0].getParameter()); + + stmt.close(); + } + + @Test + @Timeout(60) + void asyncNotifyWithTimeoutAndMessagesSendAfter() throws Exception { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + + // Now we check the case where notifications are send after we have started to listen for + // notifications + new Thread( new Runnable() { + @Override + public void run() { + try { + Thread.sleep(200); + } catch (InterruptedException ie) { + } + connectAndNotify("mynotification"); + } + }).start(); + + PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(10000); + assertNotNull(notifications); + assertEquals(1, notifications.length); + assertEquals("mynotification", notifications[0].getName()); + assertEquals("", notifications[0].getParameter()); + + stmt.close(); + } + + @Test + @Timeout(60) + void asyncNotifyWithEndlessTimeoutAndMessagesSendAfter() throws Exception { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + + // Now we check the case where notifications are send after we have started to listen for + // notifications forever + new Thread( new Runnable() { + @Override + public void run() { + try { + Thread.sleep(200); + } catch (InterruptedException ie) { + } + connectAndNotify("mynotification"); + } + }).start(); + + PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(0); + assertNotNull(notifications); + assertEquals(1, notifications.length); + assertEquals("mynotification", notifications[0].getName()); + assertEquals("", notifications[0].getParameter()); + + stmt.close(); + } + + @Test + @Timeout(60) + void asyncNotifyWithTimeoutAndSocketThatBecomesClosed() throws Exception { + Statement stmt = conn.createStatement(); + stmt.executeUpdate("LISTEN mynotification"); + + // Here we check what happens when the connection gets closed from another thread. This + // should be able, and this test ensures that no synchronized statements will stop the + // connection from becoming closed. + new Thread( new Runnable() { + @Override + public void run() { + try { + Thread.sleep(500); + } catch (InterruptedException ie) { + } + try { + conn.close(); + } catch (SQLException e) { + } + } + }).start(); + + try { + conn.unwrap(PGConnection.class).getNotifications(40000); + fail("The getNotifications(...) call didn't return when the socket closed."); + } catch (SQLException e) { + // We expected that + } + + stmt.close(); + } + + private static void connectAndNotify(String channel) { + Connection conn2 = null; + try { + conn2 = TestUtil.openDB(); + Statement stmt2 = conn2.createStatement(); + stmt2.executeUpdate("NOTIFY " + channel); + stmt2.close(); + } catch (Exception e) { + throw new RuntimeException("Couldn't notify '" + channel + "'.", e); + } finally { + try { + conn2.close(); + } catch (SQLException e) { + } + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest.java new file mode 100644 index 0000000..d1cea18 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.PGProperty; +import org.postgresql.core.Oid; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.math.BigDecimal; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class NumericTransferTest extends BaseTest4 { + public NumericTransferTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.NUMERIC); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Test + public void receive100000() throws SQLException { + Statement statement = con.createStatement(); + for (String sign : new String[]{"", "-"}) { + for (int i = 0; i < 100; i++) { + final String sql = "SELECT " + sign + "1E+" + i + "::numeric"; + ResultSet rs = statement.executeQuery(sql); + rs.next(); + if (i == 0) { + final String expected = sign + "1"; + assertEquals("getString for " + sql, expected, rs.getString(1)); + assertEquals("getBigDecimal for " + sql, expected, rs.getBigDecimal(1).toString()); + } else { + final String expected = sign + String.format("1%0" + i + "d", 0); + assertEquals("getString for " + sql, expected, rs.getString(1)); + assertEquals("getBigDecimal for " + sql, expected, rs.getBigDecimal(1).toString()); + } + rs.close(); + } + } + statement.close(); + } + + @Test + public void sendReceive100000() throws SQLException { + PreparedStatement statement = con.prepareStatement("select ?::numeric"); + for (String sign : new String[]{"", "-"}) { + for (int i = 0; i < 100; i++) { + final String expected = sign + (i == 0 ? 1 : String.format("1%0" + i + "d", 0)); + statement.setBigDecimal(1, new BigDecimal(expected)); + ResultSet rs = statement.executeQuery(); + rs.next(); + assertEquals("getString for " + expected, expected, rs.getString(1)); + assertEquals("getBigDecimal for " + expected, expected, rs.getBigDecimal(1).toString()); + rs.close(); + } + } + statement.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest2.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest2.java new file mode 100644 index 0000000..7a2962a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest2.java @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.postgresql.PGProperty; +import org.postgresql.core.Oid; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class NumericTransferTest2 extends BaseTest4 { + + final BigDecimal value; + + public NumericTransferTest2(BinaryMode binaryMode, BigDecimal value) { + setBinaryMode(binaryMode); + this.value = value; + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.NUMERIC); + } + + @Parameterized.Parameters(name = "binary = {0}, value = {1,number,#,###.##################################################}") + public static Iterable data() { + Collection numbers = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + numbers.add(new Object[]{binaryMode, new BigDecimal("1.0")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("0.000000000000000000000000000000000000000000000000000")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("0.100000000000000000000000000000000000000000000009900")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-1.0")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-1")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("1.2")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-2.05")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("0.000000000000000000000000000990")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-0.000000000000000000000000000990")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("10.0000000000099")}); + numbers.add(new Object[]{binaryMode, new BigDecimal(".10000000000000")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("1.10000000000000")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("99999.2")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("99999")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-99999.2")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-99999")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("2147483647")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-2147483648")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("2147483648")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-2147483649")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("9223372036854775807")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-9223372036854775808")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("9223372036854775808")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-9223372036854775809")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("10223372036850000000")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("19223372036854775807")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("19223372036854775807.300")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("-19223372036854775807.300")}); + numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(1234567890987654321L), -1)}); + numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(1234567890987654321L), -5)}); + numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(-1234567890987654321L), -3)}); + numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(6), -8)}); + numbers.add(new Object[]{binaryMode, new BigDecimal("30000")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("40000").setScale(15)}); + numbers.add(new Object[]{binaryMode, new BigDecimal("20000.00000000000000000000")}); + numbers.add(new Object[]{binaryMode, new BigDecimal("9990000").setScale(10)}); + numbers.add(new Object[]{binaryMode, new BigDecimal("1000000").setScale(20)}); + numbers.add(new Object[]{binaryMode, new BigDecimal("10000000000000000000000000000000000000").setScale(20)}); + numbers.add(new Object[]{binaryMode, new BigDecimal("90000000000000000000000000000000000000")}); + } + return numbers; + } + + @Test + public void receiveValue() throws SQLException { + final String valString = value.toPlainString(); + try (Statement statement = con.createStatement()) { + final String sql = "SELECT " + valString + "::numeric"; + try (ResultSet rs = statement.executeQuery(sql)) { + assertTrue(rs.next()); + assertEquals("getBigDecimal for " + sql, valString, rs.getBigDecimal(1).toPlainString()); + } + } + } + + @Test + public void sendReceiveValue() throws SQLException { + final String valString = value.toPlainString(); + try (PreparedStatement statement = con.prepareStatement("select ?::numeric")) { + statement.setBigDecimal(1, value); + try (ResultSet rs = statement.executeQuery()) { + rs.next(); + assertEquals("getBigDecimal for " + valString, valString, rs.getBigDecimal(1).toPlainString()); + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/OuterJoinSyntaxTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/OuterJoinSyntaxTest.java new file mode 100644 index 0000000..1511545 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/OuterJoinSyntaxTest.java @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Test; + +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.Arrays; +import java.util.List; + +/** + * Executes the query with the outer join syntax. The joined tables are encapsulated with parenthesis. + * Note: This queries worked up to driver version 9.4.1211 (postgresql-9.4.1211.jre7.jar). + * Encapsulation with parenthesis is used by third party like CrystalReports. + */ +public class OuterJoinSyntaxTest extends BaseTest4 { + + @Test + public void testOuterJoinSyntaxWithSingleJoinAndWithoutOj() throws Exception { + testOuterJoinSyntax( + "select t1.id as t1_id, t1.text as t1_text," + + " t2.id as t2_id, t2.text as t2_text" + + " from (values (1, 'one'), (2, 'two')) as t1 (id, text)" + + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)", + Arrays.asList("1,one,1,a", "2,two,null,null")); + } + + @Test + public void testOuterJoinSyntaxWithMultipleJoinsAndWithoutOj() throws Exception { + testOuterJoinSyntax( + "select t1.id as t1_id, t1.text as t1_text," + + " t2.id as t2_id, t2.text as t2_text," + + " t3.id as t3_id, t3.text as t3_text" + + " from (values (1, 'one'), (2, 'two')) as t1 (id, text)" + + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)" + + " left outer join (values (4, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)", + Arrays.asList("1,one,1,a,null,null", "2,two,null,null,null,null")); + } + + @Test + public void testOuterJoinSyntaxWithSingleJoinAndWithOj() throws Exception { + testOuterJoinSyntax( + "select t1.id as t1_id, t1.text as t1_text," + + " t2.id as t2_id, t2.text as t2_text" + + " from {oj (values (1, 'one'), (2, 'two')) as t1 (id, text)" + + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id) }", + Arrays.asList("1,one,1,a", "2,two,null,null")); + } + + @Test + public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj() throws Exception { + testOuterJoinSyntax( + "select t1.id as t1_id, t1.text as t1_text," + + " t2.id as t2_id, t2.text as t2_text," + + " t3.id as t3_id, t3.text as t3_text" + + " from {oj (values (1, 'one'), (2, 'two')) as t1 (id, text)" + + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)" + + " left outer join (values (1, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)}", + Arrays.asList("1,one,1,a,1,1", "2,two,null,null,null,null")); + } + + @Test + public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj2() throws Exception { + // multiple joins with oj and missing space character after oj + testOuterJoinSyntax( + "select t1.id as t1_id, t1.text as t1_text," + + " t2.id as t2_id, t2.text as t2_text," + + " t3.id as t3_id, t3.text as t3_text" + + " from {oj(values (1, 'one'), (2, 'two')) as t1 (id, text)" + + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)" + + " left outer join (values (4, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)}", + Arrays.asList("1,one,1,a,null,null", "2,two,null,null,null,null")); + } + + @Test + public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj3() throws Exception { + // multiple joins with oj and missing space character after oj and some more parenthesis + testOuterJoinSyntax( + "select t1.id as t1_id, t1.text as t1_text," + + " t2.id as t2_id, t2.text as t2_text," + + " t3.id as t3_id, t3.text as t3_text" + + " from {oj(((values (1, 'one'), (2, 'two')) as t1 (id, text)" + + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id))" + + " left outer join (values (1, '1'), (4, '2')) as t3 (id, text) on (t2.id = t3.id))}", + Arrays.asList("1,one,1,a,1,1", "2,two,null,null,null,null")); + } + + /** + * Executes the statement. + * + * @param theQuery the query to execute + * @param expectedResult the expected columns in result set + * @throws Exception on error + */ + private void testOuterJoinSyntax(String theQuery, List expectedResult) throws Exception { + final Statement st = con.createStatement(); + try { + final ResultSet rs = st.executeQuery(theQuery); + try { + Assert.assertEquals("SQL " + theQuery, TestUtil.join(TestUtil.resultSetToLines(rs)), TestUtil.join(expectedResult)); + } finally { + TestUtil.closeQuietly(rs); + } + } finally { + TestUtil.closeQuietly(st); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectGetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectGetTest.java new file mode 100644 index 0000000..4245f50 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectGetTest.java @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import org.postgresql.geometric.PGbox; +import org.postgresql.geometric.PGcircle; +import org.postgresql.geometric.PGline; +import org.postgresql.geometric.PGlseg; +import org.postgresql.geometric.PGpath; +import org.postgresql.geometric.PGpoint; +import org.postgresql.geometric.PGpolygon; +import org.postgresql.util.PGInterval; +import org.postgresql.util.PGmoney; +import org.postgresql.util.PGobject; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class PGObjectGetTest extends BaseTest4 { + private final String sqlExpression; + private final Class type; + private final String expected; + private final String stringValue; + + public PGObjectGetTest(BinaryMode binaryMode, String sqlExpression, + Class type, String expected, String stringValue) { + setBinaryMode(binaryMode); + this.sqlExpression = sqlExpression; + this.type = type; + this.expected = expected; + this.stringValue = stringValue; + } + + @Parameterized.Parameters(name = "binary = {0}, sql = {1}, type = {2}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode, "null::inet", PGobject.class, + "PGobject(type=inet, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::box", PGbox.class, + "PGbox(type=box, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::circle", PGcircle.class, + "PGcircle(type=circle, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::line", PGline.class, + "PGline(type=line, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::lseg", PGlseg.class, + "PGlseg(type=lseg, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::path", PGpath.class, + "PGpath(type=path, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::point", PGpoint.class, + "PGpoint(type=point, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::polygon", PGpolygon.class, + "PGpolygon(type=polygon, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::money", PGmoney.class, + "PGmoney(type=money, value=null)", null}); + ids.add(new Object[]{binaryMode, "null::interval", PGInterval.class, + "PGInterval(type=interval, value=null)", null}); + } + return ids; + } + + @Test + public void getAsPGobject() throws SQLException { + testGet(sqlExpression, expected, PGobject.class); + } + + @Test + public void getAsPGobjectSubtype() throws SQLException { + testGet(sqlExpression, expected, type); + } + + @Test + public void getAsString() throws SQLException { + PreparedStatement ps = con.prepareStatement("select " + sqlExpression); + ResultSet rs = ps.executeQuery(); + rs.next(); + assertEquals( + "'" + sqlExpression + "'.getString(1)", + stringValue, + rs.getString(1) + ); + } + + private void testGet(final String s, String expected, Class type) throws SQLException { + PreparedStatement ps = con.prepareStatement("select " + s); + ResultSet rs = ps.executeQuery(); + rs.next(); + assertEquals( + "'" + s + "'.getObject(1, " + type.getSimpleName() + ".class)", + expected, + printObject(rs.getObject(1, type)) + ); + if (expected.contains("value=null)")) { + // For some reason we return objects as nulls + assertNull( + "'select " + s + "'.getObject(1)", + rs.getObject(1) + ); + } else { + assertEquals( + "'select " + s + "'.getObject(1)", + expected, + printObject(rs.getObject(1)) + ); + } + } + + String printObject(Object object) { + if (!(object instanceof PGobject)) { + return String.valueOf(object); + } + PGobject pg = (PGobject) object; + return pg.getClass().getSimpleName() + "(type=" + pg.getType() + ", value=" + pg.getValue() + ")"; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectSetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectSetTest.java new file mode 100644 index 0000000..f3aca34 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectSetTest.java @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.postgresql.geometric.PGbox; +import org.postgresql.geometric.PGcircle; +import org.postgresql.geometric.PGline; +import org.postgresql.geometric.PGlseg; +import org.postgresql.geometric.PGpath; +import org.postgresql.geometric.PGpoint; +import org.postgresql.geometric.PGpolygon; +import org.postgresql.util.PGInterval; +import org.postgresql.util.PGmoney; +import org.postgresql.util.PGobject; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.lang.reflect.InvocationTargetException; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class PGObjectSetTest extends BaseTest4 { + private final String typeName; + private final String expected; + private final Class type; + + public PGObjectSetTest(BinaryMode binaryMode, Class type, + String typeName, String expected) { + setBinaryMode(binaryMode); + this.expected = expected; + this.type = type; + this.typeName = typeName; + } + + @Parameterized.Parameters(name = "binary = {0}, sql = {2}, type = {1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode, PGobject.class, "inet", + "PGobject(type=inet, value=null)"}); + ids.add(new Object[]{binaryMode, PGbox.class, "box", + "PGbox(type=box, value=null)"}); + ids.add(new Object[]{binaryMode, PGcircle.class, "circle", + "PGcircle(type=circle, value=null)"}); + ids.add(new Object[]{binaryMode, PGline.class, "line", + "PGline(type=line, value=null)"}); + ids.add(new Object[]{binaryMode, PGlseg.class, "lseg", + "PGlseg(type=lseg, value=null)"}); + ids.add(new Object[]{binaryMode, PGpath.class, "path", + "PGpath(type=path, value=null)"}); + ids.add(new Object[]{binaryMode, PGpoint.class, "point", + "PGpoint(type=point, value=null)"}); + ids.add(new Object[]{binaryMode, PGpolygon.class, "polygon", + "PGpolygon(type=polygon, value=null)"}); + ids.add(new Object[]{binaryMode, PGmoney.class, "money", + "PGmoney(type=money, value=null)"}); + ids.add(new Object[]{binaryMode, PGInterval.class, "interval", + "PGInterval(type=interval, value=null)"}); + } + return ids; + } + + @Test + public void setNullAsPGobject() throws SQLException { + PGobject object = new PGobject(); + object.setType(typeName); + object.setValue(null); + assertTrue("IsNull should return true", object.isNull()); + testSet(object, expected, PGobject.class); + } + + @Test + public void setNullAsPGobjectSubtype() throws SQLException, NoSuchMethodException, + IllegalAccessException, InvocationTargetException, InstantiationException { + if (type == PGobject.class) { + // We can't use PGobject without setType + return; + } + PGobject object = type.getConstructor().newInstance(); + object.setValue(null); + testSet(object, expected, type); + } + + private void testSet(PGobject value, String expected, Class type) throws SQLException { + PreparedStatement ps = con.prepareStatement("select ?::" + value.getType()); + ps.setObject(1, value); + ResultSet rs = ps.executeQuery(); + rs.next(); + assertEquals( + "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1, " + type.getSimpleName() + ".class)", + expected, + printObject(rs.getObject(1, type)) + ); + if (expected.contains("value=null)")) { + assertNull( + "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1)", + rs.getObject(1) + ); + } else { + assertEquals( + "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1)", + expected, + printObject(rs.getObject(1)) + ); + } + } + + String printObject(Object object) { + if (!(object instanceof PGobject)) { + return String.valueOf(object); + } + PGobject pg = (PGobject) object; + return pg.getClass().getSimpleName() + "(type=" + pg.getType() + ", value=" + pg.getValue() + ")"; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGPropertyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGPropertyTest.java new file mode 100644 index 0000000..1d727fe --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGPropertyTest.java @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.Driver; +import org.postgresql.PGProperty; +import org.postgresql.ds.PGSimpleDataSource; +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.jdbc.AutoSave; +import org.postgresql.test.TestUtil; +import org.postgresql.util.URLCoder; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.beans.BeanInfo; +import java.beans.Introspector; +import java.beans.PropertyDescriptor; +import java.sql.DriverPropertyInfo; +import java.util.ArrayList; +import java.util.Locale; +import java.util.Map; +import java.util.Properties; +import java.util.TreeMap; + +class PGPropertyTest { + + /** + * Some tests modify the "ssl" system property. To not disturb other test cases in the suite store + * the value of the property and restore it. + */ + private String bootSSLPropertyValue; + + @BeforeEach + void setUp() { + bootSSLPropertyValue = System.getProperty("ssl"); + } + + @AfterEach + void tearDown() { + if (bootSSLPropertyValue == null) { + System.getProperties().remove("ssl"); + } else { + System.setProperty("ssl", bootSSLPropertyValue); + } + } + + /** + * Test that we can get and set all default values and all choices (if any). + */ + @Test + void getSetAllProperties() { + Properties properties = new Properties(); + for (PGProperty property : PGProperty.values()) { + String value = property.getOrDefault(properties); + assertEquals(property.getDefaultValue(), value); + + property.set(properties, value); + assertEquals(value, property.getOrDefault(properties)); + + if (property.getChoices() != null && property.getChoices().length > 0) { + for (String choice : property.getChoices()) { + property.set(properties, choice); + assertEquals(choice, property.getOrDefault(properties)); + } + } + } + } + + @Test + void sortOrder() { + String prevName = null; + for (PGProperty property : PGProperty.values()) { + String name = property.name(); + if (prevName != null) { + assertTrue(name.compareTo(prevName) > 0, "PGProperty names should be sorted in ascending order: " + name + " < " + prevName); + } + prevName = name; + } + } + + /** + * Test that the enum constant is common with the underlying property name. + */ + @Test + void enumConstantNaming() { + for (PGProperty property : PGProperty.values()) { + String enumName = property.name().replaceAll("_", ""); + assertEquals(property.getName().toLowerCase(Locale.ROOT), enumName.toLowerCase(Locale.ROOT), "Naming of the enum constant [" + property.name() + + "] should follow the naming of its underlying property [" + property.getName() + + "] in PGProperty"); + } + } + + @Test + void driverGetPropertyInfo() { + Driver driver = new Driver(); + DriverPropertyInfo[] infos = driver.getPropertyInfo( + "jdbc:postgresql://localhost/test?user=fred&password=secret&ssl=true", + // this is the example we give in docs + new Properties()); + for (DriverPropertyInfo info : infos) { + if ("user".equals(info.name)) { + assertEquals("fred", info.value); + } else if ("password".equals(info.name)) { + assertEquals("secret", info.value); + } else if ("ssl".equals(info.name)) { + assertEquals("true", info.value); + } + } + } + + /** + * Test if the datasource has getter and setter for all properties. + */ + @Test + void dataSourceProperties() throws Exception { + PGSimpleDataSource dataSource = new PGSimpleDataSource(); + BeanInfo info = Introspector.getBeanInfo(dataSource.getClass()); + + // index PropertyDescriptors by name + Map propertyDescriptors = + new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + for (PropertyDescriptor propertyDescriptor : info.getPropertyDescriptors()) { + propertyDescriptors.put(propertyDescriptor.getName(), propertyDescriptor); + } + + // test for the existence of all read methods (getXXX/isXXX) and write methods (setXXX) for all + // known properties + for (PGProperty property : PGProperty.values()) { + if (!property.getName().startsWith("PG") && property != PGProperty.SERVICE) { + assertTrue(propertyDescriptors.containsKey(property.getName()), "Missing getter/setter for property [" + property.getName() + "] in [" + + BaseDataSource.class + "]"); + + assertNotNull(propertyDescriptors.get(property.getName()).getReadMethod(), + "No getter for property [" + property.getName() + "] in [" + + BaseDataSource.class + "]"); + + assertNotNull(propertyDescriptors.get(property.getName()).getWriteMethod(), + "No setter for property [" + property.getName() + "] in [" + + BaseDataSource.class + "]"); + } + } + + // test readability/writability of default value + for (PGProperty property : PGProperty.values()) { + if (!property.getName().startsWith("PG") && property != PGProperty.SERVICE) { + Object propertyValue = + propertyDescriptors.get(property.getName()).getReadMethod().invoke(dataSource); + propertyDescriptors.get(property.getName()).getWriteMethod().invoke(dataSource, + propertyValue); + } + } + } + + /** + * Test to make sure that setURL doesn't overwrite autosave + * more should be put in but this scratches the current itch + */ + @Test + void overWriteDSProperties() throws Exception { + PGSimpleDataSource dataSource = new PGSimpleDataSource(); + dataSource.setAutosave(AutoSave.CONSERVATIVE); + dataSource.setURL("jdbc:postgresql://localhost:5432/postgres"); + assertSame(AutoSave.CONSERVATIVE, dataSource.getAutosave()); + } + + /** + * Test that {@link PGProperty#isPresent(Properties)} returns a correct result in all cases. + */ + @Test + void isPresentWithParseURLResult() throws Exception { + Properties givenProperties = new Properties(); + givenProperties.setProperty("user", TestUtil.getUser()); + givenProperties.setProperty("password", TestUtil.getPassword()); + + Properties sysProperties = System.getProperties(); + sysProperties.remove("ssl"); + System.setProperties(sysProperties); + Properties parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties); + assertFalse(PGProperty.SSL.isPresent(parsedProperties), + "SSL property should not be present"); + + System.setProperty("ssl", "true"); + givenProperties.setProperty("ssl", "true"); + parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties); + assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present"); + + givenProperties.setProperty("ssl", "anotherValue"); + parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties); + assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present"); + + parsedProperties = Driver.parseURL(TestUtil.getURL() + "&ssl=true", null); + assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present"); + } + + /** + * Check whether the isPresent method really works. + */ + @Test + void presenceCheck() { + Properties empty = new Properties(); + Object value = PGProperty.READ_ONLY.getOrDefault(empty); + assertNotNull(value); + assertFalse(PGProperty.READ_ONLY.isPresent(empty)); + } + + @Test + void encodedUrlValues() { + String databaseName = "d&a%ta+base"; + String userName = "&u%ser"; + String password = "p%a&s^s#w!o@r*"; + String url = "jdbc:postgresql://" + + "localhost" + ":" + 5432 + "/" + + URLCoder.encode(databaseName) + + "?user=" + URLCoder.encode(userName) + + "&password=" + URLCoder.encode(password); + Properties parsed = Driver.parseURL(url, new Properties()); + assertEquals(databaseName, PGProperty.PG_DBNAME.getOrDefault(parsed), "database"); + assertEquals(userName, PGProperty.USER.getOrDefault(parsed), "user"); + assertEquals(password, PGProperty.PASSWORD.getOrDefault(parsed), "password"); + } + + @Test + void lowerCamelCase() { + // These are legacy properties excluded for backward compatibility. + ArrayList excluded = new ArrayList<>(); + excluded.add("LOG_LEVEL"); // Remove with PR #722 + excluded.add("PREPARED_STATEMENT_CACHE_SIZE_MIB"); // preparedStatementCacheSizeMi[B] + excluded.add("DATABASE_METADATA_CACHE_FIELDS_MIB"); // databaseMetadataCacheFieldsMi[B] + excluded.add("STRING_TYPE"); // string[t]ype + excluded.add("SSL_MODE"); // ssl[m]ode + excluded.add("SSL_FACTORY"); // ssl[f]actory + excluded.add("SSL_FACTORY_ARG"); // ssl[f]actory[a]rg + excluded.add("SSL_HOSTNAME_VERIFIER"); // ssl[h]ostname[v]erifier + excluded.add("SSL_CERT"); // ssl[c]ert + excluded.add("SSL_KEY"); // ssl[k]ey + excluded.add("SSL_ROOT_CERT"); // ssl[r]oot[c]ert + excluded.add("SSL_PASSWORD"); // ssl[p]assword + excluded.add("SSL_PASSWORD_CALLBACK"); // ssl[p]assword[c]allback + excluded.add("APPLICATION_NAME"); // [A]pplicationName + excluded.add("GSS_LIB"); // gss[l]ib + excluded.add("REWRITE_BATCHED_INSERTS"); // re[W]riteBatchedInserts + + for (PGProperty property : PGProperty.values()) { + if (!property.name().startsWith("PG")) { // Ignore all properties that start with PG + String[] words = property.name().split("_"); + if (words.length == 1) { + assertEquals(words[0].toLowerCase(Locale.ROOT), property.getName()); + } else { + if (!excluded.contains(property.name())) { + String word = ""; + for (int i = 0; i < words.length; i++) { + if (i == 0) { + word = words[i].toLowerCase(Locale.ROOT); + } else { + word += words[i].substring(0, 1).toUpperCase(Locale.ROOT) + words[i].substring(1).toLowerCase(Locale.ROOT); + } + } + assertEquals(word, property.getName()); + } + } + } + } + } + + @Test + void encodedUrlValuesFromDataSource() { + String databaseName = "d&a%ta+base"; + String userName = "&u%ser"; + String password = "p%a&s^s#w!o@r*"; + String applicationName = "Laurel&Hardy=Best?Yes"; + PGSimpleDataSource dataSource = new PGSimpleDataSource(); + + dataSource.setDatabaseName(databaseName); + dataSource.setUser(userName); + dataSource.setPassword(password); + dataSource.setApplicationName(applicationName); + + Properties parsed = Driver.parseURL(dataSource.getURL(), new Properties()); + assertEquals(databaseName, PGProperty.PG_DBNAME.getOrDefault(parsed), "database"); + // datasources do not pass username and password as URL parameters + assertFalse(PGProperty.USER.isPresent(parsed), "user"); + assertFalse(PGProperty.PASSWORD.isPresent(parsed), "password"); + assertEquals(applicationName, PGProperty.APPLICATION_NAME.getOrDefault(parsed), "APPLICATION_NAME"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimeTest.java new file mode 100644 index 0000000..3463f21 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimeTest.java @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGInterval; +import org.postgresql.util.PGTime; + +import org.junit.Test; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.text.SimpleDateFormat; +import java.util.Calendar; +import java.util.TimeZone; + +/** + * Tests {@link PGTime} in various scenarios including setTime, setObject for both time with + * time zone and time without time zone data types. + */ +public class PGTimeTest extends BaseTest4 { + /** + * The name of the test table. + */ + private static final String TEST_TABLE = "testtime"; + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTempTable(con, TEST_TABLE, "tm time, tz time with time zone"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, TEST_TABLE); + super.tearDown(); + } + + /** + * Tests that adding a PGInterval object to a PGTime object when + * performed as a casted string and object. + * + * @throws SQLException if a JDBC or database problem occurs. + */ + @Test + public void testTimeWithInterval() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + + Calendar cal = Calendar.getInstance(); + cal.set(1970, Calendar.JANUARY, 1); + + final long now = cal.getTimeInMillis(); + verifyTimeWithInterval(new PGTime(now), new PGInterval(0, 0, 0, 1, 2, 3.14), true); + verifyTimeWithInterval(new PGTime(now), new PGInterval(0, 0, 0, 1, 2, 3.14), false); + + verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), + new PGInterval(0, 0, 0, 1, 2, 3.14), true); + verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), + new PGInterval(0, 0, 0, 1, 2, 3.14), false); + + verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), + new PGInterval(0, 0, 0, 1, 2, 3.456), true); + verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), + new PGInterval(0, 0, 0, 1, 2, 3.456), false); + } + + /** + * Verifies that adding the given PGInterval object to a PGTime produces + * the correct results when performed as a casted string and object. + * + * @param time the time to test. + * @param interval the time interval. + * @param useSetObject true if the setObject method should be used instead of + * setTime. + * @throws SQLException if a JDBC or database problem occurs. + */ + private void verifyTimeWithInterval(PGTime time, PGInterval interval, boolean useSetObject) + throws SQLException { + // Construct the SQL query. + String sql; + if (time.getCalendar() != null) { + sql = "SELECT ?::time with time zone + ?"; + } else { + sql = "SELECT ?::time + ?"; + } + + SimpleDateFormat sdf = createSimpleDateFormat(time); + + // Execute a query using a casted time string + PGInterval. + PreparedStatement stmt = con.prepareStatement(sql); + stmt.setString(1, sdf.format(time)); + stmt.setObject(2, interval); + + ResultSet rs = stmt.executeQuery(); + assertTrue(rs.next()); + + Time result1 = rs.getTime(1); + // System.out.println(stmt + " = " + sdf.format(result1)); + stmt.close(); + + // Execute a query using with PGTime + PGInterval. + stmt = con.prepareStatement("SELECT ? + ?"); + if (useSetObject) { + stmt.setObject(1, time); + } else { + stmt.setTime(1, time); + } + stmt.setObject(2, interval); + + rs = stmt.executeQuery(); + assertTrue(rs.next()); + + Time result2 = rs.getTime(1); + // System.out.println(stmt + " = " + sdf.format(result2)); + assertEquals(result1, result2); + stmt.close(); + } + + /** + * Tests inserting and selecting PGTime objects with time and time + * with time zone columns. + * + * @throws SQLException if a JDBC or database problem occurs. + */ + @Test + public void testTimeInsertAndSelect() throws SQLException { + Calendar cal = Calendar.getInstance(); + cal.set(1970, Calendar.JANUARY, 1); + + final long now = cal.getTimeInMillis(); + verifyInsertAndSelect(new PGTime(now), true); + verifyInsertAndSelect(new PGTime(now), false); + + verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), true); + verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), + false); + + verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), + true); + verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), + false); + } + + /** + * Verifies that inserting the given PGTime as a time string and an object produces + * the same results. + * + * @param time the time to test. + * @param useSetObject true if the setObject method should be used instead of + * setTime. + * @throws SQLException if a JDBC or database problem occurs. + */ + private void verifyInsertAndSelect(PGTime time, boolean useSetObject) throws SQLException { + // Construct the INSERT statement of a casted time string. + String sql; + if (time.getCalendar() != null) { + sql = + "INSERT INTO " + TEST_TABLE + " VALUES (?::time with time zone, ?::time with time zone)"; + } else { + sql = "INSERT INTO " + TEST_TABLE + " VALUES (?::time, ?::time)"; + } + + SimpleDateFormat sdf = createSimpleDateFormat(time); + + // Insert the times as casted strings. + PreparedStatement pstmt1 = con.prepareStatement(sql); + pstmt1.setString(1, sdf.format(time)); + pstmt1.setString(2, sdf.format(time)); + assertEquals(1, pstmt1.executeUpdate()); + + // Insert the times as PGTime objects. + PreparedStatement pstmt2 = con.prepareStatement("INSERT INTO " + TEST_TABLE + " VALUES (?, ?)"); + + if (useSetObject) { + pstmt2.setObject(1, time); + pstmt2.setObject(2, time); + } else { + pstmt2.setTime(1, time); + pstmt2.setTime(2, time); + } + + assertEquals(1, pstmt2.executeUpdate()); + + // Query the values back out. + Statement stmt = con.createStatement(); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL(TEST_TABLE, "tm,tz")); + assertNotNull(rs); + + // Read the casted string values. + assertTrue(rs.next()); + + Time tm1 = rs.getTime(1); + Time tz1 = rs.getTime(2); + + // System.out.println(pstmt1 + " -> " + tm1 + ", " + sdf.format(tz1)); + + // Read the PGTime values. + assertTrue(rs.next()); + + Time tm2 = rs.getTime(1); + Time tz2 = rs.getTime(2); + + // System.out.println(pstmt2 + " -> " + tm2 + ", " + sdf.format(tz2)); + + // Verify that the first and second versions match. + assertEquals(tm1, tm2); + assertEquals(tz1, tz2); + + // Clean up. + assertEquals(2, stmt.executeUpdate("DELETE FROM " + TEST_TABLE)); + stmt.close(); + pstmt2.close(); + pstmt1.close(); + } + + /** + * Creates a {@code SimpleDateFormat} that is appropriate for the given time. + * + * @param time the time object. + * @return the new format instance. + */ + private SimpleDateFormat createSimpleDateFormat(PGTime time) { + String pattern = "HH:mm:ss.SSS"; + if (time.getCalendar() != null) { + pattern += " Z"; + } + + SimpleDateFormat sdf = new SimpleDateFormat(pattern); + if (time.getCalendar() != null) { + sdf.setTimeZone(time.getCalendar().getTimeZone()); + } + return sdf; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimestampTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimestampTest.java new file mode 100644 index 0000000..1cf2c95 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimestampTest.java @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGInterval; +import org.postgresql.util.PGTimestamp; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.Calendar; +import java.util.TimeZone; + +/** + * Tests {@link PGTimestamp} in various scenarios including setTimestamp, setObject for both + * {@code timestamp with time zone} and {@code timestamp without time zone} data types. + */ +class PGTimestampTest { + /** + * The name of the test table. + */ + private static final String TEST_TABLE = "testtimestamp"; + + private Connection con; + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + TestUtil.createTable(con, TEST_TABLE, "ts timestamp, tz timestamp with time zone"); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropTable(con, TEST_TABLE); + TestUtil.closeDB(con); + } + + /** + * Tests {@link PGTimestamp} with {@link PGInterval}. + * + * @throws SQLException if a JDBC or database problem occurs. + */ + @Test + void timestampWithInterval() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + PGTimestamp timestamp = new PGTimestamp(System.currentTimeMillis()); + PGInterval interval = new PGInterval(0, 0, 0, 1, 2, 3.14); + verifyTimestampWithInterval(timestamp, interval, true); + verifyTimestampWithInterval(timestamp, interval, false); + + timestamp = new PGTimestamp(System.currentTimeMillis(), + Calendar.getInstance(TimeZone.getTimeZone("GMT"))); + interval = new PGInterval(0, 0, 0, 1, 2, 3.14); + verifyTimestampWithInterval(timestamp, interval, true); + verifyTimestampWithInterval(timestamp, interval, false); + + timestamp = new PGTimestamp(System.currentTimeMillis(), + Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))); + interval = new PGInterval(-3, -2, -1, 1, 2, 3.14); + verifyTimestampWithInterval(timestamp, interval, true); + verifyTimestampWithInterval(timestamp, interval, false); + } + + /** + * Executes a test with the given timestamp and interval. + * + * @param timestamp the timestamp under test. + * @param interval the interval. + * @param useSetObject indicates if setObject should be used instead of setTimestamp. + * @throws SQLException if a JDBC or database problem occurs. + */ + private void verifyTimestampWithInterval(PGTimestamp timestamp, PGInterval interval, + boolean useSetObject) throws SQLException { + // Construct the SQL query. + String sql; + if (timestamp.getCalendar() != null) { + sql = "SELECT ?::timestamp with time zone + ?"; + } else { + sql = "SELECT ?::timestamp + ?"; + } + + // Execute a query using a casted timestamp string + PGInterval. + PreparedStatement ps = con.prepareStatement(sql); + SimpleDateFormat sdf = createSimpleDateFormat(timestamp); + final String timestampString = sdf.format(timestamp); + ps.setString(1, timestampString); + ps.setObject(2, interval); + ResultSet rs = ps.executeQuery(); + assertNotNull(rs); + + assertTrue(rs.next()); + Timestamp result1 = rs.getTimestamp(1); + assertNotNull(result1); + ps.close(); + + // Execute a query as PGTimestamp + PGInterval. + ps = con.prepareStatement("SELECT ? + ?"); + if (useSetObject) { + ps.setObject(1, timestamp); + } else { + ps.setTimestamp(1, timestamp); + } + ps.setObject(2, interval); + rs = ps.executeQuery(); + + // Verify that the query produces the same results. + assertTrue(rs.next()); + Timestamp result2 = rs.getTimestamp(1); + assertEquals(result1, result2); + ps.close(); + } + + /** + * Tests inserting and selecting {@code PGTimestamp} objects with {@code timestamp} and + * {@code timestamp with time zone} columns. + * + * @throws SQLException if a JDBC or database problem occurs. + */ + @Test + void timeInsertAndSelect() throws SQLException { + final long now = System.currentTimeMillis(); + verifyInsertAndSelect(new PGTimestamp(now), true); + verifyInsertAndSelect(new PGTimestamp(now), false); + + verifyInsertAndSelect(new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), + true); + verifyInsertAndSelect(new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), + false); + + verifyInsertAndSelect( + new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), true); + verifyInsertAndSelect( + new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), false); + } + + /** + * Verifies that inserting the given {@code PGTimestamp} as a timestamp string and an object + * produces the same results. + * + * @param timestamp the timestamp to test. + * @param useSetObject {@code true} if the setObject method should be used instead of + * setTimestamp. + * @throws SQLException if a JDBC or database problem occurs. + */ + private void verifyInsertAndSelect(PGTimestamp timestamp, boolean useSetObject) + throws SQLException { + // Construct the INSERT statement of a casted timestamp string. + String sql; + if (timestamp.getCalendar() != null) { + sql = "INSERT INTO " + TEST_TABLE + + " VALUES (?::timestamp with time zone, ?::timestamp with time zone)"; + } else { + sql = "INSERT INTO " + TEST_TABLE + " VALUES (?::timestamp, ?::timestamp)"; + } + + SimpleDateFormat sdf = createSimpleDateFormat(timestamp); + + // Insert the timestamps as casted strings. + PreparedStatement pstmt1 = con.prepareStatement(sql); + pstmt1.setString(1, sdf.format(timestamp)); + pstmt1.setString(2, sdf.format(timestamp)); + assertEquals(1, pstmt1.executeUpdate()); + + // Insert the timestamps as PGTimestamp objects. + PreparedStatement pstmt2 = con.prepareStatement("INSERT INTO " + TEST_TABLE + " VALUES (?, ?)"); + + if (useSetObject) { + pstmt2.setObject(1, timestamp); + pstmt2.setObject(2, timestamp); + } else { + pstmt2.setTimestamp(1, timestamp); + pstmt2.setTimestamp(2, timestamp); + } + + assertEquals(1, pstmt2.executeUpdate()); + + // Query the values back out. + Statement stmt = con.createStatement(); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL(TEST_TABLE, "ts,tz")); + assertNotNull(rs); + + // Read the casted string values. + assertTrue(rs.next()); + + Timestamp ts1 = rs.getTimestamp(1); + Timestamp tz1 = rs.getTimestamp(2); + + // System.out.println(pstmt1 + " -> " + ts1 + ", " + sdf.format(tz1)); + + // Read the PGTimestamp values. + assertTrue(rs.next()); + + Timestamp ts2 = rs.getTimestamp(1); + Timestamp tz2 = rs.getTimestamp(2); + + // System.out.println(pstmt2 + " -> " + ts2 + ", " + sdf.format(tz2)); + + // Verify that the first and second versions match. + assertEquals(ts1, ts2); + assertEquals(tz1, tz2); + + // Clean up. + assertEquals(2, stmt.executeUpdate("DELETE FROM " + TEST_TABLE)); + stmt.close(); + pstmt2.close(); + pstmt1.close(); + } + + /** + * Creates a {@code SimpleDateFormat} that is appropriate for the given timestamp. + * + * @param timestamp the timestamp object. + * @return the new format instance. + */ + private SimpleDateFormat createSimpleDateFormat(PGTimestamp timestamp) { + String pattern = "yyyy-MM-dd HH:mm:ss.SSS"; + if (timestamp.getCalendar() != null) { + pattern += " Z"; + } + + SimpleDateFormat sdf = new SimpleDateFormat(pattern); + if (timestamp.getCalendar() != null) { + sdf.setTimeZone(timestamp.getCalendar().getTimeZone()); + } + return sdf; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ParameterStatusTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ParameterStatusTest.java new file mode 100644 index 0000000..06902c8 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ParameterStatusTest.java @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.PGConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.hamcrest.MatcherAssert; +import org.hamcrest.core.StringStartsWith; +import org.junit.Assert; +import org.junit.Test; + +import java.sql.Statement; +import java.util.Map; +import java.util.TimeZone; +import java.util.logging.Logger; + +/** + * Test JDBC extension API for server reported parameter status messages. + * + *

This test covers client interface for server ParameterStatus messages + * (GUC_REPORT) parameters via PGConnection.getParameterStatuses() and + * PGConnection.getParameterStatus().

+ */ +public class ParameterStatusTest extends BaseTest4 { + + private final TimeZone tzPlus0800 = TimeZone.getTimeZone("GMT+8:00"); + private final Logger logger = Logger.getLogger(ParameterStatusTest.class.getName()); + + @Override + public void tearDown() { + TimeZone.setDefault(null); + } + + @Test + public void expectedInitialParameters() throws Exception { + TimeZone.setDefault(tzPlus0800); + con = TestUtil.openDB(); + + Map params = ((PGConnection) con).getParameterStatuses(); + + // PgJDBC forces the following parameters + Assert.assertEquals("UTF8", params.get("client_encoding")); + Assert.assertNotNull(params.get("DateStyle")); + MatcherAssert.assertThat(params.get("DateStyle"), StringStartsWith.startsWith("ISO")); + + // PgJDBC sets TimeZone via Java's TimeZone.getDefault() + // Pg reports POSIX timezones which are negated, so: + Assert.assertEquals("GMT-08:00", params.get("TimeZone")); + + // Must be reported. All these exist in 8.2 or above, and we don't bother + // with test coverage older than that. + Assert.assertNotNull(params.get("integer_datetimes")); + Assert.assertNotNull(params.get("is_superuser")); + Assert.assertNotNull(params.get("server_encoding")); + Assert.assertNotNull(params.get("server_version")); + Assert.assertNotNull(params.get("session_authorization")); + Assert.assertNotNull(params.get("standard_conforming_strings")); + + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) { + Assert.assertNotNull(params.get("IntervalStyle")); + } else { + Assert.assertNull(params.get("IntervalStyle")); + } + + // TestUtil forces "ApplicationName=Driver Tests" + // if application_name is supported (9.0 or newer) + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + Assert.assertEquals("Driver Tests", params.get("application_name")); + } else { + Assert.assertNull(params.get("application_name")); + } + + // Not reported + Assert.assertNull(params.get("nonexistent")); + Assert.assertNull(params.get("enable_hashjoin")); + + TestUtil.closeDB(con); + } + + @Test + public void reportUpdatedParameters() throws Exception { + con = TestUtil.openDB(); + + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + /* This test uses application_name which was added in 9.0 */ + return; + } + + con.setAutoCommit(false); + Statement stmt = con.createStatement(); + + stmt.executeUpdate("SET application_name = 'pgjdbc_ParameterStatusTest2';"); + stmt.close(); + + // Parameter status should be reported before the ReadyForQuery so we will + // have already processed it + Assert.assertEquals("pgjdbc_ParameterStatusTest2", ((PGConnection) con).getParameterStatus("application_name")); + + TestUtil.closeDB(con); + } + + // Run a txn-level SET then a txn-level SET LOCAL so we can make sure we keep + // track of the right GUC value at each point. + private void transactionalParametersCommon() throws Exception { + Statement stmt = con.createStatement(); + + // Initial value assigned by TestUtil + Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name")); + + // PgJDBC begins an explicit txn here due to autocommit=off so the effect + // should be lost on rollback but retained on commit per the docs. + stmt.executeUpdate("SET application_name = 'pgjdbc_ParameterStatusTestTxn';"); + Assert.assertEquals("pgjdbc_ParameterStatusTestTxn", ((PGConnection) con).getParameterStatus("application_name")); + + // SET LOCAL is always txn scoped so the effect here will always be + // unwound on txn end. + stmt.executeUpdate("SET LOCAL application_name = 'pgjdbc_ParameterStatusTestLocal';"); + Assert.assertEquals("pgjdbc_ParameterStatusTestLocal", ((PGConnection) con).getParameterStatus("application_name")); + + stmt.close(); + } + + @Test + public void transactionalParametersRollback() throws Exception { + con = TestUtil.openDB(); + + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + /* This test uses application_name which was added in 9.0 */ + return; + } + + con.setAutoCommit(false); + + transactionalParametersCommon(); + + // SET unwinds on ROLLBACK + con.rollback(); + + Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name")); + + TestUtil.closeDB(con); + } + + @Test + public void transactionalParametersCommit() throws Exception { + con = TestUtil.openDB(); + + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + /* This test uses application_name which was added in 9.0 */ + return; + } + + con.setAutoCommit(false); + + transactionalParametersCommon(); + + // SET is retained on commit but SET LOCAL is unwound + con.commit(); + + Assert.assertEquals("pgjdbc_ParameterStatusTestTxn", ((PGConnection) con).getParameterStatus("application_name")); + + TestUtil.closeDB(con); + } + + @Test + public void transactionalParametersAutocommit() throws Exception { + con = TestUtil.openDB(); + + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + /* This test uses application_name which was added in 9.0 */ + return; + } + + con.setAutoCommit(true); + Statement stmt = con.createStatement(); + + // A SET LOCAL in autocommit should have no visible effect as we report the reset value too + Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name")); + stmt.executeUpdate("SET LOCAL application_name = 'pgjdbc_ParameterStatusTestLocal';"); + Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name")); + + stmt.close(); + TestUtil.closeDB(con); + } + + @Test(expected = UnsupportedOperationException.class) + public void parameterMapReadOnly() throws Exception { + try { + con = TestUtil.openDB(); + Map params = ((PGConnection) con).getParameterStatuses(); + params.put("DateStyle", "invalid"); + Assert.fail("Attempt to write to exposed parameters map must throw"); + } finally { + TestUtil.closeDB(con); + } + } + + @Test + public void parameterMapIsView() throws Exception { + con = TestUtil.openDB(); + + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + /* This test uses application_name which was added in 9.0 */ + return; + } + + Map params = ((PGConnection) con).getParameterStatuses(); + + Statement stmt = con.createStatement(); + + Assert.assertEquals("Driver Tests", params.get("application_name")); + stmt.executeUpdate("SET application_name = 'pgjdbc_paramstatus_view';"); + Assert.assertEquals("pgjdbc_paramstatus_view", params.get("application_name")); + + stmt.close(); + TestUtil.closeDB(con); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PreparedStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PreparedStatementTest.java new file mode 100644 index 0000000..d43bf05 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PreparedStatementTest.java @@ -0,0 +1,1601 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGStatement; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PgStatement; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.test.util.BrokenInputStream; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; + +@RunWith(Parameterized.class) +public class PreparedStatementTest extends BaseTest4 { + + private static final int NUMERIC_MAX_PRECISION = 1000; + private static final int NUMERIC_MAX_DISPLAY_SCALE = NUMERIC_MAX_PRECISION; + + public PreparedStatementTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "streamtable", "bin bytea, str text"); + TestUtil.createTable(con, "texttable", "ch char(3), te text, vc varchar(3)"); + TestUtil.createTable(con, "intervaltable", "i interval"); + TestUtil.createTable(con, "inttable", "a int"); + TestUtil.createTable(con, "bool_tab", "bool_val boolean, null_val boolean, tf_val boolean, " + + "truefalse_val boolean, yn_val boolean, yesno_val boolean, " + + "onoff_val boolean, onezero_val boolean"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "streamtable"); + TestUtil.dropTable(con, "texttable"); + TestUtil.dropTable(con, "intervaltable"); + TestUtil.dropTable(con, "inttable"); + TestUtil.dropTable(con, "bool_tab"); + super.tearDown(); + } + + private int getNumberOfServerPreparedStatements(String sql) + throws SQLException { + PreparedStatement pstmt = null; + ResultSet rs = null; + try { + pstmt = con.prepareStatement( + "select count(*) from pg_prepared_statements where statement = ?"); + pstmt.setString(1, sql); + rs = pstmt.executeQuery(); + rs.next(); + return rs.getInt(1); + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(pstmt); + } + } + + @Test + public void testSetBinaryStream() throws SQLException { + assumeByteaSupported(); + ByteArrayInputStream bais; + byte[] buf = new byte[10]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) i; + } + + bais = null; + doSetBinaryStream(bais, 0); + + bais = new ByteArrayInputStream(new byte[0]); + doSetBinaryStream(bais, 0); + + bais = new ByteArrayInputStream(buf); + doSetBinaryStream(bais, 0); + + bais = new ByteArrayInputStream(buf); + doSetBinaryStream(bais, 10); + } + + @Test + public void testSetAsciiStream() throws Exception { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(new OutputStreamWriter(baos, "ASCII")); + pw.println("Hello"); + pw.flush(); + + ByteArrayInputStream bais; + + bais = new ByteArrayInputStream(baos.toByteArray()); + doSetAsciiStream(bais, 0); + + bais = new ByteArrayInputStream(baos.toByteArray()); + doSetAsciiStream(bais, 6); + + bais = new ByteArrayInputStream(baos.toByteArray()); + doSetAsciiStream(bais, 100); + } + + @Test + public void testExecuteStringOnPreparedStatement() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT 1"); + + try { + pstmt.executeQuery("SELECT 2"); + fail("Expected an exception when executing a new SQL query on a prepared statement"); + } catch (SQLException e) { + } + + try { + pstmt.executeUpdate("UPDATE streamtable SET bin=bin"); + fail("Expected an exception when executing a new SQL update on a prepared statement"); + } catch (SQLException e) { + } + + try { + pstmt.execute("UPDATE streamtable SET bin=bin"); + fail("Expected an exception when executing a new SQL statement on a prepared statement"); + } catch (SQLException e) { + } + } + + @Test + public void testBinaryStreamErrorsRestartable() throws SQLException { + byte[] buf = new byte[10]; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte) i; + } + + // InputStream is shorter than the length argument implies. + InputStream is = new ByteArrayInputStream(buf); + runBrokenStream(is, buf.length + 1); + + // InputStream throws an Exception during read. + is = new BrokenInputStream(new ByteArrayInputStream(buf), buf.length / 2); + runBrokenStream(is, buf.length); + + // Invalid length < 0. + is = new ByteArrayInputStream(buf); + runBrokenStream(is, -1); + + // Total Bind message length too long. + is = new ByteArrayInputStream(buf); + runBrokenStream(is, Integer.MAX_VALUE); + } + + private void runBrokenStream(InputStream is, int length) throws SQLException { + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)"); + pstmt.setBinaryStream(1, is, length); + pstmt.setString(2, "Other"); + pstmt.executeUpdate(); + fail("This isn't supposed to work."); + } catch (SQLException sqle) { + // don't need to rollback because we're in autocommit mode + pstmt.close(); + + // verify the connection is still valid and the row didn't go in. + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM streamtable"); + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + rs.close(); + stmt.close(); + } + } + + private void doSetBinaryStream(ByteArrayInputStream bais, int length) throws SQLException { + PreparedStatement pstmt = + con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)"); + pstmt.setBinaryStream(1, bais, length); + pstmt.setString(2, null); + pstmt.executeUpdate(); + pstmt.close(); + } + + private void doSetAsciiStream(InputStream is, int length) throws SQLException { + PreparedStatement pstmt = + con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)"); + pstmt.setBytes(1, null); + pstmt.setAsciiStream(2, is, length); + pstmt.executeUpdate(); + pstmt.close(); + } + + @Test + public void testTrailingSpaces() throws SQLException { + PreparedStatement pstmt = + con.prepareStatement("INSERT INTO texttable (ch, te, vc) VALUES (?, ?, ?) "); + String str = "a "; + pstmt.setString(1, str); + pstmt.setString(2, str); + pstmt.setString(3, str); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("SELECT ch, te, vc FROM texttable WHERE ch=? AND te=? AND vc=?"); + pstmt.setString(1, str); + pstmt.setString(2, str); + pstmt.setString(3, str); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(str, rs.getString(1)); + assertEquals(str, rs.getString(2)); + assertEquals(str, rs.getString(3)); + rs.close(); + pstmt.close(); + } + + @Test + public void testBinds() throws SQLException { + // braces around (42) are required to puzzle the parser + String query = "INSERT INTO inttable(a) VALUES (?);SELECT (42)"; + PreparedStatement ps = con.prepareStatement(query); + ps.setInt(1, 100500); + ps.execute(); + ResultSet rs = ps.getResultSet(); + Assert.assertNull("insert produces no results ==> getResultSet should be null", rs); + Assert.assertTrue("There are two statements => getMoreResults should be true", ps.getMoreResults()); + rs = ps.getResultSet(); + Assert.assertNotNull("select produces results ==> getResultSet should be not null", rs); + Assert.assertTrue("select produces 1 row ==> rs.next should be true", rs.next()); + Assert.assertEquals("second result of query " + query, 42, rs.getInt(1)); + + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(ps); + } + + @Test + public void testSetNull() throws SQLException { + // valid: fully qualified type to setNull() + PreparedStatement pstmt = con.prepareStatement("INSERT INTO texttable (te) VALUES (?)"); + pstmt.setNull(1, Types.VARCHAR); + pstmt.executeUpdate(); + + // valid: fully qualified type to setObject() + pstmt.setObject(1, null, Types.VARCHAR); + pstmt.executeUpdate(); + + // valid: setObject() with partial type info and a typed "null object instance" + org.postgresql.util.PGobject dummy = new org.postgresql.util.PGobject(); + dummy.setType("text"); + dummy.setValue(null); + pstmt.setObject(1, dummy, Types.OTHER); + pstmt.executeUpdate(); + + // setObject() with no type info + pstmt.setObject(1, null); + pstmt.executeUpdate(); + + // setObject() with insufficient type info + pstmt.setObject(1, null, Types.OTHER); + pstmt.executeUpdate(); + + // setNull() with insufficient type info + pstmt.setNull(1, Types.OTHER); + pstmt.executeUpdate(); + + pstmt.close(); + + assumeMinimumServerVersion(ServerVersion.v8_3); + pstmt = con.prepareStatement("select 'ok' where ?=? or (? is null) "); + pstmt.setObject(1, UUID.randomUUID(), Types.OTHER); + pstmt.setNull(2, Types.OTHER, "uuid"); + pstmt.setNull(3, Types.OTHER, "uuid"); + ResultSet rs = pstmt.executeQuery(); + + assertTrue(rs.next()); + assertEquals("ok", rs.getObject(1)); + + rs.close(); + pstmt.close(); + + } + + @Test + public void testSingleQuotes() throws SQLException { + String[] testStrings = new String[]{ + "bare ? question mark", + "quoted \\' single quote", + "doubled '' single quote", + "octal \\060 constant", + "escaped \\? question mark", + "double \\\\ backslash", + "double \" quote",}; + + String[] testStringsStdConf = new String[]{ + "bare ? question mark", + "quoted '' single quote", + "doubled '' single quote", + "octal 0 constant", + "escaped ? question mark", + "double \\ backslash", + "double \" quote",}; + + String[] expected = new String[]{ + "bare ? question mark", + "quoted ' single quote", + "doubled ' single quote", + "octal 0 constant", + "escaped ? question mark", + "double \\ backslash", + "double \" quote",}; + + boolean oldStdStrings = TestUtil.getStandardConformingStrings(con); + Statement stmt = con.createStatement(); + + // Test with standard_conforming_strings turned off. + stmt.execute("SET standard_conforming_strings TO off"); + for (int i = 0; i < testStrings.length; i++) { + PreparedStatement pstmt = con.prepareStatement("SELECT '" + testStrings[i] + "'"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(expected[i], rs.getString(1)); + rs.close(); + pstmt.close(); + } + + // Test with standard_conforming_strings turned off... + // ... using the escape string syntax (E''). + stmt.execute("SET standard_conforming_strings TO on"); + for (int i = 0; i < testStrings.length; i++) { + PreparedStatement pstmt = con.prepareStatement("SELECT E'" + testStrings[i] + "'"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(expected[i], rs.getString(1)); + rs.close(); + pstmt.close(); + } + // ... using standard conforming input strings. + for (int i = 0; i < testStrings.length; i++) { + PreparedStatement pstmt = con.prepareStatement("SELECT '" + testStringsStdConf[i] + "'"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(expected[i], rs.getString(1)); + rs.close(); + pstmt.close(); + } + + stmt.execute("SET standard_conforming_strings TO " + (oldStdStrings ? "on" : "off")); + stmt.close(); + } + + @Test + public void testDoubleQuotes() throws SQLException { + String[] testStrings = new String[]{ + "bare ? question mark", + "single ' quote", + "doubled '' single quote", + "doubled \"\" double quote", + "no backslash interpretation here: \\", + }; + + for (String testString : testStrings) { + PreparedStatement pstmt = + con.prepareStatement("CREATE TABLE \"" + testString + "\" (i integer)"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("DROP TABLE \"" + testString + "\""); + pstmt.executeUpdate(); + pstmt.close(); + } + } + + @Test + public void testDollarQuotes() throws SQLException { + // dollar-quotes are supported in the backend since version 8.0 + PreparedStatement st; + ResultSet rs; + + st = con.prepareStatement("SELECT $$;$$ WHERE $x$?$x$=$_0$?$_0$ AND $$?$$=?"); + st.setString(1, "?"); + rs = st.executeQuery(); + assertTrue(rs.next()); + assertEquals(";", rs.getString(1)); + assertFalse(rs.next()); + st.close(); + + st = con.prepareStatement( + "SELECT $__$;$__$ WHERE ''''=$q_1$'$q_1$ AND ';'=?;" + + "SELECT $x$$a$;$x $a$$x$ WHERE $$;$$=? OR ''=$c$c$;$c$;" + + "SELECT ?"); + st.setString(1, ";"); + st.setString(2, ";"); + st.setString(3, "$a$ $a$"); + + assertTrue(st.execute()); + rs = st.getResultSet(); + assertTrue(rs.next()); + assertEquals(";", rs.getString(1)); + assertFalse(rs.next()); + + assertTrue(st.getMoreResults()); + rs = st.getResultSet(); + assertTrue(rs.next()); + assertEquals("$a$;$x $a$", rs.getString(1)); + assertFalse(rs.next()); + + assertTrue(st.getMoreResults()); + rs = st.getResultSet(); + assertTrue(rs.next()); + assertEquals("$a$ $a$", rs.getString(1)); + assertFalse(rs.next()); + st.close(); + } + + @Test + public void testDollarQuotesAndIdentifiers() throws SQLException { + // dollar-quotes are supported in the backend since version 8.0 + PreparedStatement st; + + con.createStatement().execute("CREATE TEMP TABLE a$b$c(a varchar, b varchar)"); + st = con.prepareStatement("INSERT INTO a$b$c (a, b) VALUES (?, ?)"); + st.setString(1, "a"); + st.setString(2, "b"); + st.executeUpdate(); + st.close(); + + con.createStatement().execute("CREATE TEMP TABLE e$f$g(h varchar, e$f$g varchar) "); + st = con.prepareStatement("UPDATE e$f$g SET h = ? || e$f$g"); + st.setString(1, "a"); + st.executeUpdate(); + st.close(); + } + + @Test + public void testComments() throws SQLException { + PreparedStatement st; + ResultSet rs; + + st = con.prepareStatement("SELECT /*?*/ /*/*/*/**/*/*/*/1;SELECT ?;--SELECT ?"); + st.setString(1, "a"); + assertTrue(st.execute()); + assertTrue(st.getMoreResults()); + assertFalse(st.getMoreResults()); + st.close(); + + st = con.prepareStatement("SELECT /**/'?'/*/**/*/ WHERE '?'=/*/*/*?*/*/*/--?\n?"); + st.setString(1, "?"); + rs = st.executeQuery(); + assertTrue(rs.next()); + assertEquals("?", rs.getString(1)); + assertFalse(rs.next()); + st.close(); + } + + @Test + public void testDoubleQuestionMark() throws SQLException { + PreparedStatement st; + ResultSet rs; + + st = con.prepareStatement("select ??- lseg '((-1,0),(1,0))';"); + rs = st.executeQuery(); + assertTrue(rs.next()); + // Bool values in binary mode are first converted to their Java type (Boolean), and then + // converted to String, which means that we receive 'true'. Bool values in text mode are + // returned as the same text value that was returned by the server, i.e. 't'. + assertEquals(binaryMode == BinaryMode.FORCE && preferQueryMode != PreferQueryMode.SIMPLE ? "true" : "t", rs.getString(1)); + assertFalse(rs.next()); + st.close(); + + st = con.prepareStatement("select lseg '((-1,0),(1,0))' ??# box '((-2,-2),(2,2))';"); + rs = st.executeQuery(); + assertTrue(rs.next()); + assertEquals(binaryMode == BinaryMode.FORCE && preferQueryMode != PreferQueryMode.SIMPLE ? "true" : "t", rs.getString(1)); + assertFalse(rs.next()); + st.close(); + } + + @Test + public void testNumeric() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE TEMP TABLE numeric_tab (max_numeric_positive numeric, min_numeric_positive numeric, max_numeric_negative numeric, min_numeric_negative numeric, null_value numeric)"); + pstmt.executeUpdate(); + pstmt.close(); + + char[] wholeDigits = new char[NUMERIC_MAX_DISPLAY_SCALE]; + for (int i = 0; i < NUMERIC_MAX_DISPLAY_SCALE; i++) { + wholeDigits[i] = '9'; + } + + char[] fractionDigits = new char[NUMERIC_MAX_PRECISION]; + for (int i = 0; i < NUMERIC_MAX_PRECISION; i++) { + fractionDigits[i] = '9'; + } + + String maxValueString = new String(wholeDigits); + String minValueString = new String(fractionDigits); + BigDecimal[] values = new BigDecimal[4]; + values[0] = new BigDecimal(maxValueString); + values[1] = new BigDecimal("-" + maxValueString); + values[2] = new BigDecimal(minValueString); + values[3] = new BigDecimal("-" + minValueString); + + pstmt = con.prepareStatement("insert into numeric_tab values (?,?,?,?,?)"); + for (int i = 1; i < 5; i++) { + pstmt.setBigDecimal(i, values[i - 1]); + } + + pstmt.setNull(5, Types.NUMERIC); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from numeric_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + for (int i = 1; i < 5; i++) { + assertTrue(rs.getBigDecimal(i).compareTo(values[i - 1]) == 0); + } + rs.getDouble(5); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testDouble() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE TEMP TABLE double_tab (max_double float, min_double float, null_value float)"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("insert into double_tab values (?,?,?)"); + pstmt.setDouble(1, 1.0E125); + pstmt.setDouble(2, 1.0E-130); + pstmt.setNull(3, Types.DOUBLE); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from double_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + double d = rs.getDouble(1); + assertTrue(rs.getDouble(1) == 1.0E125); + assertTrue(rs.getDouble(2) == 1.0E-130); + rs.getDouble(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testFloat() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE TEMP TABLE float_tab (max_float real, min_float real, null_value real)"); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("insert into float_tab values (?,?,?)"); + pstmt.setFloat(1, (float) 1.0E37); + pstmt.setFloat(2, (float) 1.0E-37); + pstmt.setNull(3, Types.FLOAT); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from float_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + float f = rs.getFloat(1); + assertTrue("expected 1.0E37,received " + rs.getFloat(1), rs.getFloat(1) == (float) 1.0E37); + assertTrue("expected 1.0E-37,received " + rs.getFloat(2), rs.getFloat(2) == (float) 1.0E-37); + rs.getDouble(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testNaNLiteralsSimpleStatement() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select 'NaN'::numeric, 'NaN'::real, 'NaN'::double precision"); + checkNaNLiterals(stmt, rs); + } + + @Test + public void testNaNLiteralsPreparedStatement() throws SQLException { + PreparedStatement stmt = con.prepareStatement("select 'NaN'::numeric, 'NaN'::real, 'NaN'::double precision"); + checkNaNLiterals(stmt, stmt.executeQuery()); + } + + private void checkNaNLiterals(Statement stmt, ResultSet rs) throws SQLException { + rs.next(); + assertTrue("Double.isNaN((Double) rs.getObject", Double.isNaN((Double) rs.getObject(3))); + assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(3))); + assertTrue("Float.isNaN((Float) rs.getObject", Float.isNaN((Float) rs.getObject(2))); + assertTrue("Float.isNaN(rs.getFloat", Float.isNaN(rs.getFloat(2))); + assertTrue("Double.isNaN((Double) rs.getObject", Double.isNaN((Double) rs.getObject(1))); + assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(1))); + rs.close(); + stmt.close(); + } + + @Test + public void testNaNSetDoubleFloat() throws SQLException { + PreparedStatement ps = con.prepareStatement("select ?, ?"); + ps.setFloat(1, Float.NaN); + ps.setDouble(2, Double.NaN); + + checkNaNParams(ps); + } + + @Test + public void testNaNSetObject() throws SQLException { + PreparedStatement ps = con.prepareStatement("select ?, ?"); + ps.setObject(1, Float.NaN); + ps.setObject(2, Double.NaN); + + checkNaNParams(ps); + } + + private void checkNaNParams(PreparedStatement ps) throws SQLException { + ResultSet rs = ps.executeQuery(); + rs.next(); + + assertTrue("Float.isNaN((Float) rs.getObject", Float.isNaN((Float) rs.getObject(1))); + assertTrue("Float.isNaN(rs.getFloat", Float.isNaN(rs.getFloat(1))); + assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(2))); + assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(2))); + + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(ps); + } + + @Test + public void testBoolean() throws SQLException { + testBoolean(0); + testBoolean(1); + testBoolean(5); + testBoolean(-1); + } + + public void testBoolean(int prepareThreshold) throws SQLException { + PreparedStatement pstmt = con.prepareStatement("insert into bool_tab values (?,?,?,?,?,?,?,?)"); + ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold); + + // Test TRUE values + pstmt.setBoolean(1, true); + pstmt.setObject(1, Boolean.TRUE); + pstmt.setNull(2, Types.BIT); + pstmt.setObject(3, 't', Types.BIT); + pstmt.setObject(3, 'T', Types.BIT); + pstmt.setObject(3, "t", Types.BIT); + pstmt.setObject(4, "true", Types.BIT); + pstmt.setObject(5, 'y', Types.BIT); + pstmt.setObject(5, 'Y', Types.BIT); + pstmt.setObject(5, "Y", Types.BIT); + pstmt.setObject(6, "YES", Types.BIT); + pstmt.setObject(7, "On", Types.BIT); + pstmt.setObject(8, '1', Types.BIT); + pstmt.setObject(8, "1", Types.BIT); + assertEquals("one row inserted, true values", 1, pstmt.executeUpdate()); + // Test FALSE values + pstmt.setBoolean(1, false); + pstmt.setObject(1, Boolean.FALSE); + pstmt.setNull(2, Types.BOOLEAN); + pstmt.setObject(3, 'f', Types.BOOLEAN); + pstmt.setObject(3, 'F', Types.BOOLEAN); + pstmt.setObject(3, "F", Types.BOOLEAN); + pstmt.setObject(4, "false", Types.BOOLEAN); + pstmt.setObject(5, 'n', Types.BOOLEAN); + pstmt.setObject(5, 'N', Types.BOOLEAN); + pstmt.setObject(5, "N", Types.BOOLEAN); + pstmt.setObject(6, "NO", Types.BOOLEAN); + pstmt.setObject(7, "Off", Types.BOOLEAN); + pstmt.setObject(8, "0", Types.BOOLEAN); + pstmt.setObject(8, '0', Types.BOOLEAN); + assertEquals("one row inserted, false values", 1, pstmt.executeUpdate()); + // Test weird values + pstmt.setObject(1, (byte) 0, Types.BOOLEAN); + pstmt.setObject(2, BigDecimal.ONE, Types.BOOLEAN); + pstmt.setObject(3, 0L, Types.BOOLEAN); + pstmt.setObject(4, 0x1, Types.BOOLEAN); + pstmt.setObject(5, (float) 0, Types.BOOLEAN); + pstmt.setObject(5, 1.0d, Types.BOOLEAN); + pstmt.setObject(5, 0.0f, Types.BOOLEAN); + pstmt.setObject(6, Integer.valueOf("1"), Types.BOOLEAN); + pstmt.setObject(7, new java.math.BigInteger("0"), Types.BOOLEAN); + pstmt.clearParameters(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from bool_tab"); + ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold); + ResultSet rs = pstmt.executeQuery(); + + assertTrue(rs.next()); + assertTrue("expected true, received " + rs.getBoolean(1), rs.getBoolean(1)); + rs.getFloat(2); + assertTrue(rs.wasNull()); + assertTrue("expected true, received " + rs.getBoolean(3), rs.getBoolean(3)); + assertTrue("expected true, received " + rs.getBoolean(4), rs.getBoolean(4)); + assertTrue("expected true, received " + rs.getBoolean(5), rs.getBoolean(5)); + assertTrue("expected true, received " + rs.getBoolean(6), rs.getBoolean(6)); + assertTrue("expected true, received " + rs.getBoolean(7), rs.getBoolean(7)); + assertTrue("expected true, received " + rs.getBoolean(8), rs.getBoolean(8)); + + assertTrue(rs.next()); + assertFalse("expected false, received " + rs.getBoolean(1), rs.getBoolean(1)); + rs.getBoolean(2); + assertTrue(rs.wasNull()); + assertFalse("expected false, received " + rs.getBoolean(3), rs.getBoolean(3)); + assertFalse("expected false, received " + rs.getBoolean(4), rs.getBoolean(4)); + assertFalse("expected false, received " + rs.getBoolean(5), rs.getBoolean(5)); + assertFalse("expected false, received " + rs.getBoolean(6), rs.getBoolean(6)); + assertFalse("expected false, received " + rs.getBoolean(7), rs.getBoolean(7)); + assertFalse("expected false, received " + rs.getBoolean(8), rs.getBoolean(8)); + + rs.close(); + pstmt.close(); + + pstmt = con.prepareStatement("TRUNCATE TABLE bool_tab"); + pstmt.executeUpdate(); + pstmt.close(); + } + + @Test + public void testBadBoolean() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("INSERT INTO bad_bool VALUES (?)"); + try { + pstmt.setObject(1, "this is not boolean", Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"this is not boolean\"", e.getMessage()); + } + try { + pstmt.setObject(1, 'X', Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"X\"", e.getMessage()); + } + try { + java.io.File obj = new java.io.File(""); + pstmt.setObject(1, obj, Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean", e.getMessage()); + } + try { + pstmt.setObject(1, "1.0", Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"1.0\"", e.getMessage()); + } + try { + pstmt.setObject(1, "-1", Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"-1\"", e.getMessage()); + } + try { + pstmt.setObject(1, "ok", Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"ok\"", e.getMessage()); + } + try { + pstmt.setObject(1, 0.99f, Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"0.99\"", e.getMessage()); + } + try { + pstmt.setObject(1, -0.01d, Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"-0.01\"", e.getMessage()); + } + try { + pstmt.setObject(1, new java.sql.Date(0), Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean", e.getMessage()); + } + try { + pstmt.setObject(1, new java.math.BigInteger("1000"), Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"1000\"", e.getMessage()); + } + try { + pstmt.setObject(1, Math.PI, Types.BOOLEAN); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"3.141592653589793\"", e.getMessage()); + } + pstmt.close(); + } + + @Test + public void testSetFloatInteger() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE temp TABLE float_tab (max_val float8, min_val float, null_val float8)"); + pstmt.executeUpdate(); + pstmt.close(); + + Integer maxInteger = 2147483647; + Integer minInteger = -2147483648; + + Double maxFloat = 2147483647.0; + Double minFloat = (double) -2147483648; + + pstmt = con.prepareStatement("insert into float_tab values (?,?,?)"); + pstmt.setObject(1, maxInteger, Types.FLOAT); + pstmt.setObject(2, minInteger, Types.FLOAT); + pstmt.setNull(3, Types.FLOAT); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from float_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue("expected " + maxFloat + " ,received " + rs.getObject(1), + rs.getObject(1).equals(maxFloat)); + assertTrue("expected " + minFloat + " ,received " + rs.getObject(2), + rs.getObject(2).equals(minFloat)); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetFloatString() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE temp TABLE float_tab (max_val float8, min_val float8, null_val float8)"); + pstmt.executeUpdate(); + pstmt.close(); + + String maxStringFloat = "1.0E37"; + String minStringFloat = "1.0E-37"; + Double maxFloat = 1.0E37; + Double minFloat = 1.0E-37; + + pstmt = con.prepareStatement("insert into float_tab values (?,?,?)"); + pstmt.setObject(1, maxStringFloat, Types.FLOAT); + pstmt.setObject(2, minStringFloat, Types.FLOAT); + pstmt.setNull(3, Types.FLOAT); + pstmt.executeUpdate(); + pstmt.setObject(1, "1.0", Types.FLOAT); + pstmt.setObject(2, "0.0", Types.FLOAT); + pstmt.setNull(3, Types.FLOAT); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from float_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue(((Double) rs.getObject(1)).equals(maxFloat)); + assertTrue(((Double) rs.getObject(2)).equals(minFloat)); + assertTrue(rs.getDouble(1) == maxFloat); + assertTrue(rs.getDouble(2) == minFloat); + rs.getFloat(3); + assertTrue(rs.wasNull()); + + assertTrue(rs.next()); + assertTrue("expected true, received " + rs.getBoolean(1), rs.getBoolean(1)); + assertFalse("expected false,received " + rs.getBoolean(2), rs.getBoolean(2)); + + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetFloatBigDecimal() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE temp TABLE float_tab (max_val float8, min_val float8, null_val float8)"); + pstmt.executeUpdate(); + pstmt.close(); + + BigDecimal maxBigDecimalFloat = new BigDecimal("1.0E37"); + BigDecimal minBigDecimalFloat = new BigDecimal("1.0E-37"); + Double maxFloat = 1.0E37; + Double minFloat = 1.0E-37; + + pstmt = con.prepareStatement("insert into float_tab values (?,?,?)"); + pstmt.setObject(1, maxBigDecimalFloat, Types.FLOAT); + pstmt.setObject(2, minBigDecimalFloat, Types.FLOAT); + pstmt.setNull(3, Types.FLOAT); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from float_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue("expected " + maxFloat + " ,received " + rs.getObject(1), + ((Double) rs.getObject(1)).equals(maxFloat)); + assertTrue("expected " + minFloat + " ,received " + rs.getObject(2), + ((Double) rs.getObject(2)).equals(minFloat)); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetTinyIntFloat() throws SQLException { + PreparedStatement pstmt = con + .prepareStatement("CREATE temp TABLE tiny_int (max_val int4, min_val int4, null_val int4)"); + pstmt.executeUpdate(); + pstmt.close(); + + Integer maxInt = 127; + Integer minInt = -127; + Float maxIntFloat = 127F; + Float minIntFloat = (float) -127; + + pstmt = con.prepareStatement("insert into tiny_int values (?,?,?)"); + pstmt.setObject(1, maxIntFloat, Types.TINYINT); + pstmt.setObject(2, minIntFloat, Types.TINYINT); + pstmt.setNull(3, Types.TINYINT); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from tiny_int"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertEquals("maxInt as rs.getObject", maxInt, rs.getObject(1)); + assertEquals("minInt as rs.getObject", minInt, rs.getObject(2)); + rs.getObject(3); + assertTrue("rs.wasNull after rs.getObject", rs.wasNull()); + assertEquals("maxInt as rs.getInt", maxInt, (Integer) rs.getInt(1)); + assertEquals("minInt as rs.getInt", minInt, (Integer) rs.getInt(2)); + rs.getInt(3); + assertTrue("rs.wasNull after rs.getInt", rs.wasNull()); + assertEquals("maxInt as rs.getLong", Long.valueOf(maxInt), (Long) rs.getLong(1)); + assertEquals("minInt as rs.getLong", Long.valueOf(minInt), (Long) rs.getLong(2)); + rs.getLong(3); + assertTrue("rs.wasNull after rs.getLong", rs.wasNull()); + assertEquals("maxInt as rs.getBigDecimal", BigDecimal.valueOf(maxInt), rs.getBigDecimal(1)); + assertEquals("minInt as rs.getBigDecimal", BigDecimal.valueOf(minInt), rs.getBigDecimal(2)); + assertNull("rs.getBigDecimal", rs.getBigDecimal(3)); + assertTrue("rs.getBigDecimal after rs.getLong", rs.wasNull()); + assertEquals("maxInt as rs.getBigDecimal(scale=0)", BigDecimal.valueOf(maxInt), + rs.getBigDecimal(1, 0)); + assertEquals("minInt as rs.getBigDecimal(scale=0)", BigDecimal.valueOf(minInt), + rs.getBigDecimal(2, 0)); + assertNull("rs.getBigDecimal(scale=0)", rs.getBigDecimal(3, 0)); + assertTrue("rs.getBigDecimal after rs.getLong", rs.wasNull()); + assertEquals("maxInt as rs.getBigDecimal(scale=1)", + BigDecimal.valueOf(maxInt).setScale(1, RoundingMode.HALF_EVEN), rs.getBigDecimal(1, 1)); + assertEquals("minInt as rs.getBigDecimal(scale=1)", + BigDecimal.valueOf(minInt).setScale(1, RoundingMode.HALF_EVEN), rs.getBigDecimal(2, 1)); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetSmallIntFloat() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE temp TABLE small_int (max_val int4, min_val int4, null_val int4)"); + pstmt.executeUpdate(); + pstmt.close(); + + Integer maxInt = 32767; + Integer minInt = -32768; + Float maxIntFloat = 32767F; + Float minIntFloat = (float) -32768; + + pstmt = con.prepareStatement("insert into small_int values (?,?,?)"); + pstmt.setObject(1, maxIntFloat, Types.SMALLINT); + pstmt.setObject(2, minIntFloat, Types.SMALLINT); + pstmt.setNull(3, Types.TINYINT); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from small_int"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue("expected " + maxInt + " ,received " + rs.getObject(1), + rs.getObject(1).equals(maxInt)); + assertTrue("expected " + minInt + " ,received " + rs.getObject(2), + rs.getObject(2).equals(minInt)); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + } + + @Test + public void testSetIntFloat() throws SQLException { + PreparedStatement pstmt = con + .prepareStatement("CREATE temp TABLE int_TAB (max_val int4, min_val int4, null_val int4)"); + pstmt.executeUpdate(); + pstmt.close(); + + Integer maxInt = 1000; + Integer minInt = -1000; + Float maxIntFloat = 1000F; + Float minIntFloat = (float) -1000; + + pstmt = con.prepareStatement("insert into int_tab values (?,?,?)"); + pstmt.setObject(1, maxIntFloat, Types.INTEGER); + pstmt.setObject(2, minIntFloat, Types.INTEGER); + pstmt.setNull(3, Types.INTEGER); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from int_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue("expected " + maxInt + " ,received " + rs.getObject(1), + ((Integer) rs.getObject(1)).equals(maxInt)); + assertTrue("expected " + minInt + " ,received " + rs.getObject(2), + ((Integer) rs.getObject(2)).equals(minInt)); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetBooleanDouble() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE temp TABLE double_tab (max_val float, min_val float, null_val float)"); + pstmt.executeUpdate(); + pstmt.close(); + + Double dBooleanTrue = 1.0; + Double dBooleanFalse = (double) 0; + + pstmt = con.prepareStatement("insert into double_tab values (?,?,?)"); + pstmt.setObject(1, Boolean.TRUE, Types.DOUBLE); + pstmt.setObject(2, Boolean.FALSE, Types.DOUBLE); + pstmt.setNull(3, Types.DOUBLE); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from double_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1), + rs.getObject(1).equals(dBooleanTrue)); + assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2), + rs.getObject(2).equals(dBooleanFalse)); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetBooleanNumeric() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE temp TABLE numeric_tab (max_val numeric(30,15), min_val numeric(30,15), null_val numeric(30,15))"); + pstmt.executeUpdate(); + pstmt.close(); + + BigDecimal dBooleanTrue = new BigDecimal(1); + BigDecimal dBooleanFalse = new BigDecimal(0); + + pstmt = con.prepareStatement("insert into numeric_tab values (?,?,?)"); + pstmt.setObject(1, Boolean.TRUE, Types.NUMERIC, 2); + pstmt.setObject(2, Boolean.FALSE, Types.NUMERIC, 2); + pstmt.setNull(3, Types.DOUBLE); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from numeric_tab"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1), + ((BigDecimal) rs.getObject(1)).compareTo(dBooleanTrue) == 0); + assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2), + ((BigDecimal) rs.getObject(2)).compareTo(dBooleanFalse) == 0); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetBooleanDecimal() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "CREATE temp TABLE DECIMAL_TAB (max_val numeric(30,15), min_val numeric(30,15), null_val numeric(30,15))"); + pstmt.executeUpdate(); + pstmt.close(); + + BigDecimal dBooleanTrue = new BigDecimal(1); + BigDecimal dBooleanFalse = new BigDecimal(0); + + pstmt = con.prepareStatement("insert into DECIMAL_TAB values (?,?,?)"); + pstmt.setObject(1, Boolean.TRUE, Types.DECIMAL, 2); + pstmt.setObject(2, Boolean.FALSE, Types.DECIMAL, 2); + pstmt.setNull(3, Types.DOUBLE); + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select * from DECIMAL_TAB"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + + assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1), + ((BigDecimal) rs.getObject(1)).compareTo(dBooleanTrue) == 0); + assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2), + ((BigDecimal) rs.getObject(2)).compareTo(dBooleanFalse) == 0); + rs.getFloat(3); + assertTrue(rs.wasNull()); + rs.close(); + pstmt.close(); + + } + + @Test + public void testSetObjectBigDecimalUnscaled() throws SQLException { + TestUtil.createTempTable(con, "decimal_scale", + "n1 numeric, n2 numeric, n3 numeric, n4 numeric"); + PreparedStatement pstmt = con.prepareStatement("insert into decimal_scale values(?,?,?,?)"); + BigDecimal v = new BigDecimal("3.141593"); + pstmt.setObject(1, v, Types.NUMERIC); + + String vs = v.toPlainString(); + pstmt.setObject(2, vs, Types.NUMERIC); + + Float vf = Float.valueOf(vs); + pstmt.setObject(3, vf, Types.NUMERIC); + + Double vd = Double.valueOf(vs); + pstmt.setObject(4, vd, Types.NUMERIC); + + pstmt.executeUpdate(); + pstmt.close(); + + pstmt = con.prepareStatement("select n1,n2,n3,n4 from decimal_scale"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertTrue("expected numeric set via BigDecimal " + v + " stored as " + rs.getBigDecimal(1), + v.compareTo(rs.getBigDecimal(1)) == 0); + assertTrue("expected numeric set via String" + vs + " stored as " + rs.getBigDecimal(2), + v.compareTo(rs.getBigDecimal(2)) == 0); + // float is really bad... + assertTrue("expected numeric set via Float" + vf + " stored as " + rs.getBigDecimal(3), + v.compareTo(rs.getBigDecimal(3).setScale(6, RoundingMode.HALF_UP)) == 0); + assertTrue("expected numeric set via Double" + vd + " stored as " + rs.getBigDecimal(4), + v.compareTo(rs.getBigDecimal(4)) == 0); + + rs.close(); + pstmt.close(); + } + + @Test + public void testSetObjectBigDecimalWithScale() throws SQLException { + TestUtil.createTempTable(con, "decimal_scale", + "n1 numeric, n2 numeric, n3 numeric, n4 numeric"); + PreparedStatement psinsert = con.prepareStatement("insert into decimal_scale values(?,?,?,?)"); + PreparedStatement psselect = con.prepareStatement("select n1,n2,n3,n4 from decimal_scale"); + PreparedStatement pstruncate = con.prepareStatement("truncate table decimal_scale"); + + BigDecimal v = new BigDecimal("3.141593"); + String vs = v.toPlainString(); + Float vf = Float.valueOf(vs); + Double vd = Double.valueOf(vs); + + for (int s = 0; s < 6; s++) { + psinsert.setObject(1, v, Types.NUMERIC, s); + psinsert.setObject(2, vs, Types.NUMERIC, s); + psinsert.setObject(3, vf, Types.NUMERIC, s); + psinsert.setObject(4, vd, Types.NUMERIC, s); + + psinsert.executeUpdate(); + + ResultSet rs = psselect.executeQuery(); + assertTrue(rs.next()); + BigDecimal vscaled = v.setScale(s, RoundingMode.HALF_UP); + assertTrue( + "expected numeric set via BigDecimal " + v + " with scale " + s + " stored as " + vscaled, + vscaled.compareTo(rs.getBigDecimal(1)) == 0); + assertTrue( + "expected numeric set via String" + vs + " with scale " + s + " stored as " + vscaled, + vscaled.compareTo(rs.getBigDecimal(2)) == 0); + assertTrue( + "expected numeric set via Float" + vf + " with scale " + s + " stored as " + vscaled, + vscaled.compareTo(rs.getBigDecimal(3)) == 0); + assertTrue( + "expected numeric set via Double" + vd + " with scale " + s + " stored as " + vscaled, + vscaled.compareTo(rs.getBigDecimal(4)) == 0); + rs.close(); + pstruncate.executeUpdate(); + } + + psinsert.close(); + psselect.close(); + pstruncate.close(); + } + + @Test + public void testSetObjectWithBigDecimal() throws SQLException { + TestUtil.createTempTable(con, "number_fallback", + "n1 numeric"); + PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)"); + PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback"); + + psinsert.setObject(1, new BigDecimal("733")); + psinsert.execute(); + + ResultSet rs = psselect.executeQuery(); + assertTrue(rs.next()); + assertTrue( + "expected 733, but received " + rs.getBigDecimal(1), + new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0); + + psinsert.close(); + psselect.close(); + } + + @Test + public void testSetObjectNumberFallbackWithBigInteger() throws SQLException { + TestUtil.createTempTable(con, "number_fallback", + "n1 numeric"); + PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)"); + PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback"); + + psinsert.setObject(1, new BigInteger("733")); + psinsert.execute(); + + ResultSet rs = psselect.executeQuery(); + assertTrue(rs.next()); + assertTrue( + "expected 733, but received " + rs.getBigDecimal(1), + new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0); + + psinsert.close(); + psselect.close(); + } + + @Test + public void testSetObjectNumberFallbackWithAtomicLong() throws SQLException { + TestUtil.createTempTable(con, "number_fallback", + "n1 numeric"); + PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)"); + PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback"); + + psinsert.setObject(1, new AtomicLong(733)); + psinsert.execute(); + + ResultSet rs = psselect.executeQuery(); + assertTrue(rs.next()); + assertTrue( + "expected 733, but received " + rs.getBigDecimal(1), + new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0); + + psinsert.close(); + psselect.close(); + } + + @Test + public void testUnknownSetObject() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("INSERT INTO intervaltable(i) VALUES (?)"); + + pstmt.setString(1, "1 week"); + try { + pstmt.executeUpdate(); + assertTrue("When using extended protocol, interval vs character varying type mismatch error is expected", + preferQueryMode == PreferQueryMode.SIMPLE); + } catch (SQLException sqle) { + // ERROR: column "i" is of type interval but expression is of type character varying + } + + pstmt.setObject(1, "1 week", Types.OTHER); + pstmt.executeUpdate(); + pstmt.close(); + } + + /** + * With autoboxing this apparently happens more often now. + */ + @Test + public void testSetObjectCharacter() throws SQLException { + PreparedStatement ps = con.prepareStatement("INSERT INTO texttable(te) VALUES (?)"); + ps.setObject(1, 'z'); + ps.executeUpdate(); + ps.close(); + } + + /** + * When we have parameters of unknown type and it's not using the unnamed statement, we issue a + * protocol level statement describe message for the V3 protocol. This test just makes sure that + * works. + */ + @Test + public void testStatementDescribe() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT ?::int"); + pstmt.setObject(1, 2, Types.OTHER); + for (int i = 0; i < 10; i++) { + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + } + pstmt.close(); + } + + @Test + public void testBatchWithPrepareThreshold5() throws SQLException { + assumeBinaryModeRegular(); + Assume.assumeTrue("simple protocol only does not support prepared statement requests", + preferQueryMode != PreferQueryMode.SIMPLE); + + PreparedStatement pstmt = con.prepareStatement("CREATE temp TABLE batch_tab_threshold5 (id bigint, val bigint)"); + pstmt.executeUpdate(); + pstmt.close(); + + // When using a prepareThreshold of 5, a batch update should use server-side prepare + pstmt = con.prepareStatement("INSERT INTO batch_tab_threshold5 (id, val) VALUES (?,?)"); + ((PgStatement) pstmt).setPrepareThreshold(5); + for (int p = 0; p < 5; p++) { + for (int i = 0; i <= 5; i++) { + pstmt.setLong(1, i); + pstmt.setLong(2, i); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + pstmt.close(); + assertTrue("prepareThreshold=5, so the statement should be server-prepared", + ((PGStatement) pstmt).isUseServerPrepare()); + assertEquals("prepareThreshold=5, so the statement should be server-prepared", 1, + getNumberOfServerPreparedStatements("INSERT INTO batch_tab_threshold5 (id, val) VALUES ($1,$2)")); + } + + @Test + public void testBatchWithPrepareThreshold0() throws SQLException { + assumeBinaryModeRegular(); + Assume.assumeTrue("simple protocol only does not support prepared statement requests", + preferQueryMode != PreferQueryMode.SIMPLE); + + PreparedStatement pstmt = con.prepareStatement("CREATE temp TABLE batch_tab_threshold0 (id bigint, val bigint)"); + pstmt.executeUpdate(); + pstmt.close(); + + // When using a prepareThreshold of 0, a batch update should not use server-side prepare + pstmt = con.prepareStatement("INSERT INTO batch_tab_threshold0 (id, val) VALUES (?,?)"); + ((PgStatement) pstmt).setPrepareThreshold(0); + for (int p = 0; p < 5; p++) { + for (int i = 0; i <= 5; i++) { + pstmt.setLong(1, i); + pstmt.setLong(2, i); + pstmt.addBatch(); + } + pstmt.executeBatch(); + } + pstmt.close(); + + assertFalse("prepareThreshold=0, so the statement should not be server-prepared", + ((PGStatement) pstmt).isUseServerPrepare()); + assertEquals("prepareThreshold=0, so the statement should not be server-prepared", 0, + getNumberOfServerPreparedStatements("INSERT INTO batch_tab_threshold0 (id, val) VALUES ($1,$2)")); + } + + @Test + public void testSelectPrepareThreshold0AutoCommitFalseFetchSizeNonZero() throws SQLException { + assumeBinaryModeRegular(); + Assume.assumeTrue("simple protocol only does not support prepared statement requests", + preferQueryMode != PreferQueryMode.SIMPLE); + + con.setAutoCommit(false); + PreparedStatement pstmt = null; + ResultSet rs = null; + try { + pstmt = con.prepareStatement("SELECT 42"); + ((PgStatement) pstmt).setPrepareThreshold(0); + pstmt.setFetchSize(1); + rs = pstmt.executeQuery(); + rs.next(); + assertEquals(42, rs.getInt(1)); + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(pstmt); + } + + assertFalse("prepareThreshold=0, so the statement should not be server-prepared", + ((PGStatement) pstmt).isUseServerPrepare()); + + assertEquals("prepareThreshold=0, so the statement should not be server-prepared", 0, + getNumberOfServerPreparedStatements("SELECT 42")); + } + + @Test + public void testInappropriateStatementSharing() throws SQLException { + PreparedStatement ps = con.prepareStatement("SELECT ?::timestamp"); + assertFirstParameterTypeName("after prepare ?::timestamp bind type should be timestamp", "timestamp", ps); + try { + Timestamp ts = new Timestamp(1474997614836L); + // Since PreparedStatement isn't cached immediately, we need to some warm up + for (int i = 0; i < 3; i++) { + ResultSet rs; + + // Flip statement to use Oid.DATE + ps.setNull(1, Types.DATE); + assertFirstParameterTypeName("set parameter to DATE", "date", ps); + rs = ps.executeQuery(); + assertFirstParameterTypeName("set parameter to DATE (executeQuery should not affect parameterMetadata)", + "date", ps); + try { + assertTrue(rs.next()); + assertNull("NULL DATE converted to TIMESTAMP should return NULL value on getObject", + rs.getObject(1)); + } finally { + rs.close(); + } + + // Flop statement to use Oid.UNSPECIFIED + ps.setTimestamp(1, ts); + assertFirstParameterTypeName("set parameter to Timestamp", "timestamp", ps); + rs = ps.executeQuery(); + assertFirstParameterTypeName("set parameter to Timestamp (executeQuery should not affect parameterMetadata)", + "timestamp", ps); + try { + assertTrue(rs.next()); + assertEquals( + "Looks like we got a narrowing of the data (TIMESTAMP -> DATE). It might caused by inappropriate caching of the statement.", + ts, rs.getObject(1)); + } finally { + rs.close(); + } + } + } finally { + ps.close(); + } + } + + private void assertFirstParameterTypeName(String msg, String expected, PreparedStatement ps) throws SQLException { + if (preferQueryMode == PreferQueryMode.SIMPLE) { + return; + } + ParameterMetaData pmd = ps.getParameterMetaData(); + assertEquals("getParameterMetaData().getParameterTypeName(1) " + msg, + expected, pmd.getParameterTypeName(1)); + } + + @Test + public void testAlternatingBindType() throws SQLException { + assumeBinaryModeForce(); + PreparedStatement ps = con.prepareStatement("SELECT /*testAlternatingBindType*/ ?"); + ResultSet rs; + Logger log = Logger.getLogger("org.postgresql.core.v3.SimpleQuery"); + Level prevLevel = log.getLevel(); + if (prevLevel == null || prevLevel.intValue() > Level.FINER.intValue()) { + log.setLevel(Level.FINER); + } + final AtomicInteger numOfReParses = new AtomicInteger(); + Handler handler = new Handler() { + @Override + public void publish(LogRecord record) { + if (record.getMessage().contains("un-prepare it and parse")) { + numOfReParses.incrementAndGet(); + } + } + + @Override + public void flush() { + } + + @Override + public void close() throws SecurityException { + } + }; + log.addHandler(handler); + try { + ps.setString(1, "42"); + rs = ps.executeQuery(); + rs.next(); + Assert.assertEquals("setString(1, \"42\") -> \"42\" expected", "42", rs.getObject(1)); + rs.close(); + + // The bind type is flipped from VARCHAR to INTEGER, and it causes the driver to prepare statement again + ps.setNull(1, Types.INTEGER); + rs = ps.executeQuery(); + rs.next(); + Assert.assertNull("setNull(1, Types.INTEGER) -> null expected", rs.getObject(1)); + Assert.assertEquals("A re-parse was expected, so the number of parses should be 1", + 1, numOfReParses.get()); + rs.close(); + + // The bind type is flipped from INTEGER to VARCHAR, and it causes the driver to prepare statement again + ps.setString(1, "42"); + rs = ps.executeQuery(); + rs.next(); + Assert.assertEquals("setString(1, \"42\") -> \"42\" expected", "42", rs.getObject(1)); + Assert.assertEquals("One more re-parse is expected, so the number of parses should be 2", + 2, numOfReParses.get()); + rs.close(); + + // Types.OTHER null is sent as UNSPECIFIED, and pgjdbc does not re-parse on UNSPECIFIED nulls + // Note: do not rely on absence of re-parse on using Types.OTHER. Try using consistent data types + ps.setNull(1, Types.OTHER); + rs = ps.executeQuery(); + rs.next(); + Assert.assertNull("setNull(1, Types.OTHER) -> null expected", rs.getObject(1)); + Assert.assertEquals("setNull(, Types.OTHER) should not cause re-parse", + 2, numOfReParses.get()); + + // Types.INTEGER null is sent as int4 null, and it leads to re-parse + ps.setNull(1, Types.INTEGER); + rs = ps.executeQuery(); + rs.next(); + Assert.assertNull("setNull(1, Types.INTEGER) -> null expected", rs.getObject(1)); + Assert.assertEquals("setNull(, Types.INTEGER) causes re-parse", + 3, numOfReParses.get()); + rs.close(); + } finally { + TestUtil.closeQuietly(ps); + log.removeHandler(handler); + log.setLevel(prevLevel); + } + } + + @Test + public void testNoParametersNPE() throws SQLException { + try { + PreparedStatement ps = con.prepareStatement("select 1"); + ps.setString(1, "null"); + } catch ( NullPointerException ex ) { + fail("Should throw a SQLException"); + } catch (SQLException ex) { + // ignore + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/QuotationTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/QuotationTest.java new file mode 100644 index 0000000..84637af --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/QuotationTest.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.test.SlowTests; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class QuotationTest extends BaseTest4 { + private enum QuoteStyle { + SIMPLE("'"), DOLLAR_NOTAG("$$"), DOLLAR_A("$a$"), DOLLAR_DEF("$DEF$"), + SMILING_FACE("$o‿o$") + ; + + private final String quote; + + QuoteStyle(String quote) { + this.quote = quote; + } + + @Override + public String toString() { + return quote; + } + } + + private final String expr; + private final String expected; + + public QuotationTest(QuoteStyle quoteStyle, String expected, String expr) { + this.expected = expected; + this.expr = expr; + } + + @Parameterized.Parameters(name = "{index}: quotes(style={0}, src={1}, quoted={2})") + public static Iterable data() { + Collection prefix = new ArrayList<>(); + // Too many prefixes make test run long + prefix.add(""); + prefix.add("/*\n$\n*//* ? *//*{fn *//* now} */"); + prefix.add("-- $\n"); + prefix.add("--\n/* $ */"); + + Collection ids = new ArrayList<>(); + Collection garbageValues = new ArrayList<>(); + garbageValues.add("{fn now}"); + garbageValues.add("{extract}"); + garbageValues.add("{select}"); + garbageValues.add("?select"); + garbageValues.add("select?"); + garbageValues.add("??select"); + garbageValues.add("}{"); + garbageValues.add("{"); + garbageValues.add("}"); + garbageValues.add("--"); + garbageValues.add("/*"); + garbageValues.add("*/"); + for (QuoteStyle quoteStyle : QuoteStyle.values()) { + garbageValues.add(quoteStyle.toString()); + } + for (char ch = 'a'; ch <= 'z'; ch++) { + garbageValues.add(Character.toString(ch)); + } + + for (QuoteStyle quoteStyle : QuoteStyle.values()) { + for (String garbage : garbageValues) { + String unquoted = garbage; + for (int i = 0; i < 3; i++) { + String quoted = unquoted; + if (quoteStyle == QuoteStyle.SIMPLE) { + quoted = quoted.replaceAll("'", "''"); + } + quoted = quoteStyle.toString() + quoted + quoteStyle.toString(); + if (quoted.endsWith("$$$") && quoteStyle == QuoteStyle.DOLLAR_NOTAG) { + // $$$a$$$ is parsed like $$ $a $$ $ -> thus we skip this test + continue; + } + if (quoteStyle != QuoteStyle.SIMPLE && garbage.equals(quoteStyle.toString())) { + // $a$$a$$a$ is not valid + continue; + } + String expected = unquoted; + for (String p : prefix) { + ids.add(new Object[]{quoteStyle, expected, p + quoted}); + } + if (unquoted.length() == 1) { + char ch = unquoted.charAt(0); + if (ch >= 'a' && ch <= 'z') { + // Will assume if 'a' works, then 'aa', 'aaa' will also work + break; + } + } + unquoted += garbage; + } + } + } + + return ids; + } + + @Test + @Category(SlowTests.class) + public void quotedString() throws SQLException { + PreparedStatement ps = con.prepareStatement("select " + expr); + try { + ResultSet rs = ps.executeQuery(); + rs.next(); + String val = rs.getString(1); + Assert.assertEquals(expected, val); + } catch (SQLException e) { + TestUtil.closeQuietly(ps); + } + } + + @Test + @Category(SlowTests.class) + public void bindInTheMiddle() throws SQLException { + PreparedStatement ps = con.prepareStatement("select " + expr + ", ?, " + expr); + try { + ps.setInt(1, 42); + ResultSet rs = ps.executeQuery(); + rs.next(); + String val1 = rs.getString(1); + String val3 = rs.getString(3); + Assert.assertEquals(expected, val1); + Assert.assertEquals(expected, val3); + } catch (SQLException e) { + TestUtil.closeQuietly(ps); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorFetchTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorFetchTest.java new file mode 100644 index 0000000..a56a310 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorFetchTest.java @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class RefCursorFetchTest extends BaseTest4 { + private final int numRows; + private final Integer defaultFetchSize; + private final Integer statementFetchSize; + private final Integer resultSetFetchSize; + private final AutoCommit autoCommit; + private final boolean commitAfterExecute; + + public RefCursorFetchTest(BinaryMode binaryMode, int numRows, + Integer defaultFetchSize, + Integer statementFetchSize, + Integer resultSetFetchSize, + AutoCommit autoCommit, boolean commitAfterExecute) { + this.numRows = numRows; + this.defaultFetchSize = defaultFetchSize; + this.statementFetchSize = statementFetchSize; + this.resultSetFetchSize = resultSetFetchSize; + this.autoCommit = autoCommit; + this.commitAfterExecute = commitAfterExecute; + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}, numRows = {1}, defaultFetchSize = {2}, statementFetchSize = {3}, resultSetFetchSize = {4}, autoCommit = {5}, commitAfterExecute = {6}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + for (int numRows : new int[]{0, 10, 101}) { + for (Integer defaultFetchSize : new Integer[]{null, 0, 9, 50}) { + for (AutoCommit autoCommit : AutoCommit.values()) { + for (boolean commitAfterExecute : new boolean[]{true, false}) { + for (Integer resultSetFetchSize : new Integer[]{null, 0, 9, 50}) { + for (Integer statementFetchSize : new Integer[]{null, 0, 9, 50}) { + ids.add(new Object[]{binaryMode, numRows, defaultFetchSize, statementFetchSize, resultSetFetchSize, autoCommit, commitAfterExecute}); + } + } + } + } + } + } + } + return ids; + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + if (defaultFetchSize != null) { + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(props, defaultFetchSize); + } + } + + @BeforeClass + public static void beforeClass() throws Exception { + TestUtil.assumeHaveMinimumServerVersion(ServerVersion.v9_0); + try (Connection con = TestUtil.openDB()) { + assumeCallableStatementsSupported(con); + TestUtil.createTable(con, "test_blob", "content bytea"); + TestUtil.execute(con, ""); + TestUtil.execute(con, "--create function to read data\n" + + "CREATE OR REPLACE FUNCTION test_blob(p_cur OUT REFCURSOR, p_limit int4) AS $body$\n" + + "BEGIN\n" + + "OPEN p_cur FOR SELECT content FROM test_blob LIMIT p_limit;\n" + + "END;\n" + + "$body$ LANGUAGE plpgsql STABLE"); + + TestUtil.execute(con, "--generate 101 rows with 4096 bytes:\n" + + "insert into test_blob\n" + + "select(select decode(string_agg(lpad(to_hex(width_bucket(random(), 0, 1, 256) - 1), 2, '0'), ''), 'hex')" + + " FROM generate_series(1, 4096))\n" + + "from generate_series (1, 200)"); + } + } + + @AfterClass + public static void afterClass() throws Exception { + try (Connection con = TestUtil.openDB()) { + TestUtil.dropTable(con, "test_blob"); + TestUtil.dropFunction(con, "test_blob", "REFCURSOR, int4"); + } + } + + @Override + public void setUp() throws Exception { + super.setUp(); + con.setAutoCommit(autoCommit == AutoCommit.YES); + } + + @Test + public void fetchAllRows() throws SQLException { + int cnt = 0; + try (CallableStatement call = con.prepareCall("{? = call test_blob(?)}")) { + con.setAutoCommit(false); // ref cursors only work if auto commit is off + if (statementFetchSize != null) { + call.setFetchSize(statementFetchSize); + } + call.registerOutParameter(1, Types.REF_CURSOR); + call.setInt(2, numRows); + call.execute(); + if (commitAfterExecute) { + if (autoCommit == AutoCommit.NO) { + con.commit(); + } else { + con.setAutoCommit(false); + con.setAutoCommit(true); + } + } + try (ResultSet rs = (ResultSet) call.getObject(1)) { + if (resultSetFetchSize != null) { + rs.setFetchSize(resultSetFetchSize); + } + while (rs.next()) { + cnt++; + } + assertEquals("number of rows from test_blob(...) call", numRows, cnt); + } catch (SQLException e) { + if (commitAfterExecute && "34000".equals(e.getSQLState())) { + // Transaction commit closes refcursor, so the fetch call is expected to fail + // File: postgres.c, Routine: exec_execute_message, Line: 2070 + // Server SQLState: 34000 + // TODO: add statementFetchSize, resultSetFetchSize when implemented + Integer fetchSize = defaultFetchSize; + int expectedRows = + fetchSize != null && fetchSize != 0 ? Math.min(fetchSize, numRows) : numRows; + assertEquals( + "The transaction was committed before processing the results," + + " so expecting ResultSet to buffer fetchSize=" + fetchSize + " rows out of " + + numRows, + expectedRows, + cnt + ); + return; + } + throw e; + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorTest.java new file mode 100644 index 0000000..2358c57 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorTest.java @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.Arrays; + +/** + * RefCursor ResultSet tests. This test case is basically the same as the ResultSet test case. + * + *

For backwards compatibility reasons we verify that ref cursors can be + * registered with both {@link Types#OTHER} and {@link Types#REF_CURSOR}.

+ * + * @author Nic Ferrier (nferrier@tapsellferrier.co.uk) + */ +@RunWith(Parameterized.class) +public class RefCursorTest extends BaseTest4 { + + private final int cursorType; + + public RefCursorTest(String typeName, int cursorType) { + this.cursorType = cursorType; + } + + @Parameterized.Parameters(name = "typeName = {0}, cursorType = {1}") + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {"OTHER", Types.OTHER}, + {"REF_CURSOR", Types.REF_CURSOR}, + }); + } + + @BeforeClass + public static void beforeClass() throws Exception { + try (Connection con = TestUtil.openDB()) { + assumeCallableStatementsSupported(con); + } + } + + @Override + public void setUp() throws Exception { + // this is the same as the ResultSet setup. + super.setUp(); + Statement stmt = con.createStatement(); + + TestUtil.createTable(con, "testrs", "id integer primary key"); + + stmt.executeUpdate("INSERT INTO testrs VALUES (1)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (2)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (3)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (4)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (6)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (9)"); + + // Create the functions. + stmt.execute("CREATE OR REPLACE FUNCTION testspg__getRefcursor () RETURNS refcursor AS '" + + "declare v_resset refcursor; begin open v_resset for select id from testrs order by id; " + + "return v_resset; end;' LANGUAGE plpgsql;"); + stmt.execute("CREATE OR REPLACE FUNCTION testspg__getEmptyRefcursor () RETURNS refcursor AS '" + + "declare v_resset refcursor; begin open v_resset for select id from testrs where id < 1 order by id; " + + "return v_resset; end;' LANGUAGE plpgsql;"); + stmt.close(); + con.setAutoCommit(false); + } + + @Override + public void tearDown() throws SQLException { + con.setAutoCommit(true); + Statement stmt = con.createStatement(); + stmt.execute("drop FUNCTION testspg__getRefcursor ();"); + stmt.execute("drop FUNCTION testspg__getEmptyRefcursor ();"); + TestUtil.dropTable(con, "testrs"); + super.tearDown(); + } + + @Test + public void testResult() throws SQLException { + CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }"); + call.registerOutParameter(1, cursorType); + call.execute(); + ResultSet rs = (ResultSet) call.getObject(1); + + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(4, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(6, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(9, rs.getInt(1)); + + assertFalse(rs.next()); + rs.close(); + + call.close(); + } + + @Test + public void testEmptyResult() throws SQLException { + CallableStatement call = con.prepareCall("{ ? = call testspg__getEmptyRefcursor () }"); + call.registerOutParameter(1, cursorType); + call.execute(); + + ResultSet rs = (ResultSet) call.getObject(1); + assertTrue(!rs.next()); + rs.close(); + + call.close(); + } + + @Test + public void testMetaData() throws SQLException { + CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }"); + call.registerOutParameter(1, cursorType); + call.execute(); + + ResultSet rs = (ResultSet) call.getObject(1); + ResultSetMetaData rsmd = rs.getMetaData(); + assertNotNull(rsmd); + assertEquals(1, rsmd.getColumnCount()); + assertEquals(Types.INTEGER, rsmd.getColumnType(1)); + assertEquals("int4", rsmd.getColumnTypeName(1)); + rs.close(); + + call.close(); + } + + @Test + public void testResultType() throws SQLException { + CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }", + ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + call.registerOutParameter(1, cursorType); + call.execute(); + ResultSet rs = (ResultSet) call.getObject(1); + + assertEquals(rs.getType(), ResultSet.TYPE_SCROLL_INSENSITIVE); + assertEquals(rs.getConcurrency(), ResultSet.CONCUR_READ_ONLY); + + assertTrue(rs.last()); + assertEquals(6, rs.getRow()); + rs.close(); + call.close(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ReplaceProcessingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ReplaceProcessingTest.java new file mode 100644 index 0000000..506f0a5 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ReplaceProcessingTest.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.SQLException; +import java.util.Arrays; + +@RunWith(Parameterized.class) +public class ReplaceProcessingTest extends BaseTest4 { + + @Parameterized.Parameter(0) + public String input; + @Parameterized.Parameter(1) + public String expected; + + @Parameterized.Parameters(name = "input={0}, expected={1}") + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {"{fn timestampadd(SQL_TSI_YEAR, ?, {fn now()})}", "(CAST( $1||' year' as interval)+ now())"}, + {"{fn timestampadd(SQL_TSI_MONTH, ?, {fn now()})}", "(CAST( $1||' month' as interval)+ now())"}, + {"{fn timestampadd(SQL_TSI_DAY, ?, {fn now()})}", "(CAST( $1||' day' as interval)+ now())"}, + {"{fn timestampadd(SQL_TSI_WEEK, ?, {fn now()})}", "(CAST( $1||' week' as interval)+ now())"}, + {"{fn timestampadd(SQL_TSI_MINUTE, ?, {fn now()})}", "(CAST( $1||' minute' as interval)+ now())"}, + {"{fn timestampadd(SQL_TSI_SECOND, ?, {fn now()})}", "(CAST( $1||' second' as interval)+ now())"}, + {"{fn user()}", "user"}, + {"{fn ifnull(?,?)}", "coalesce($1,$2)"}, + {"{fn database()}", "current_database()"}, + // Not yet supported + // {"{fn timestampadd(SQL_TSI_QUARTER, ?, {fn now()})}", "(CAST( $1||' quarter' as interval)+ now())"}, + // {"{fn timestampadd(SQL_TSI_FRAC_SECOND, ?, {fn now()})}", "(CAST( $1||' second' as interval)+ now())"}, + }); + } + + @Test + public void run() throws SQLException { + Assert.assertEquals(input, expected, con.nativeSQL(input)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetMetaDataTest.java new file mode 100644 index 0000000..d8aa0a2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetMetaDataTest.java @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.postgresql.PGProperty; +import org.postgresql.PGResultSetMetaData; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class ResultSetMetaDataTest extends BaseTest4 { + Connection conn; + private final Integer databaseMetadataCacheFields; + private final Integer databaseMetadataCacheFieldsMib; + + public ResultSetMetaDataTest(Integer databaseMetadataCacheFields, Integer databaseMetadataCacheFieldsMib) { + this.databaseMetadataCacheFields = databaseMetadataCacheFields; + this.databaseMetadataCacheFieldsMib = databaseMetadataCacheFieldsMib; + } + + @Parameterized.Parameters(name = "databaseMetadataCacheFields = {0}, databaseMetadataCacheFieldsMib = {1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (Integer fields : new Integer[]{null, 0}) { + for (Integer fieldsMib : new Integer[]{null, 0}) { + ids.add(new Object[]{fields, fieldsMib}); + } + } + return ids; + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + if (databaseMetadataCacheFields != null) { + PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(props, databaseMetadataCacheFields); + } + if (databaseMetadataCacheFieldsMib != null) { + PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(props, databaseMetadataCacheFieldsMib); + } + } + + @Override + public void setUp() throws Exception { + super.setUp(); + conn = con; + TestUtil.createTable(conn, "rsmd1", "a int primary key, b text, c decimal(10,2)"); + TestUtil.createTable(conn, "rsmd_cache", "a int primary key"); + TestUtil.createTable(conn, "timetest", + "tm time(3), tmtz timetz, ts timestamp without time zone, tstz timestamp(6) with time zone"); + + TestUtil.dropSequence(conn, "serialtest_a_seq"); + TestUtil.dropSequence(conn, "serialtest_b_seq"); + + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) { + TestUtil.createTable(conn, "identitytest", "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY"); + } + + TestUtil.createTable(conn, "serialtest", "a serial, b bigserial, c int"); + TestUtil.createTable(conn, "alltypes", + "bool boolean, i2 int2, i4 int4, i8 int8, num numeric(10,2), re real, fl float, ch char(3), vc varchar(3), tx text, d date, t time without time zone, tz time with time zone, ts timestamp without time zone, tsz timestamp with time zone, bt bytea"); + TestUtil.createTable(conn, "sizetest", + "fixedchar char(5), fixedvarchar varchar(5), unfixedvarchar varchar, txt text, bytearr bytea, num64 numeric(6,4), num60 numeric(6,0), num numeric, ip inet"); + TestUtil.createTable(conn, "compositetest", "col rsmd1"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(conn, "compositetest"); + TestUtil.dropTable(conn, "rsmd1"); + TestUtil.dropTable(conn, "rsmd_cache"); + TestUtil.dropTable(conn, "timetest"); + TestUtil.dropTable(conn, "serialtest"); + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) { + TestUtil.dropTable(conn, "identitytest"); + } + TestUtil.dropTable(conn, "alltypes"); + TestUtil.dropTable(conn, "sizetest"); + TestUtil.dropSequence(conn, "serialtest_a_seq"); + TestUtil.dropSequence(conn, "serialtest_b_seq"); + super.tearDown(); + } + + @Test + public void testStandardResultSet() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT a,b,c,a+c as total, b as d FROM rsmd1"); + runStandardTests(rs.getMetaData()); + rs.close(); + stmt.close(); + } + + @Test + public void testPreparedResultSet() throws SQLException { + assumePreparedStatementMetadataSupported(); + + PreparedStatement pstmt = + conn.prepareStatement("SELECT a,b,c,a+c as total, b as d FROM rsmd1 WHERE b = ?"); + runStandardTests(pstmt.getMetaData()); + pstmt.close(); + } + + private void runStandardTests(ResultSetMetaData rsmd) throws SQLException { + PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rsmd; + + assertEquals(5, rsmd.getColumnCount()); + + assertEquals("a", rsmd.getColumnLabel(1)); + assertEquals("total", rsmd.getColumnLabel(4)); + + assertEquals("a", rsmd.getColumnName(1)); + assertEquals("", pgrsmd.getBaseColumnName(4)); + assertEquals("b", pgrsmd.getBaseColumnName(5)); + + assertEquals(Types.INTEGER, rsmd.getColumnType(1)); + assertEquals(Types.VARCHAR, rsmd.getColumnType(2)); + + assertEquals("int4", rsmd.getColumnTypeName(1)); + assertEquals("text", rsmd.getColumnTypeName(2)); + + assertEquals(10, rsmd.getPrecision(3)); + + assertEquals(2, rsmd.getScale(3)); + + assertEquals("", rsmd.getSchemaName(1)); + assertEquals("", rsmd.getSchemaName(4)); + assertEquals("public", pgrsmd.getBaseSchemaName(1)); + assertEquals("", pgrsmd.getBaseSchemaName(4)); + + assertEquals("rsmd1", rsmd.getTableName(1)); + assertEquals("", rsmd.getTableName(4)); + assertEquals("rsmd1", pgrsmd.getBaseTableName(1)); + assertEquals("", pgrsmd.getBaseTableName(4)); + + assertEquals(ResultSetMetaData.columnNoNulls, rsmd.isNullable(1)); + assertEquals(ResultSetMetaData.columnNullable, rsmd.isNullable(2)); + assertEquals(ResultSetMetaData.columnNullableUnknown, rsmd.isNullable(4)); + } + + // verify that a prepared update statement returns no metadata and doesn't execute. + @Test + public void testPreparedUpdate() throws SQLException { + assumePreparedStatementMetadataSupported(); + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO rsmd1(a,b) VALUES(?,?)"); + pstmt.setInt(1, 1); + pstmt.setString(2, "hello"); + ResultSetMetaData rsmd = pstmt.getMetaData(); + assertNull(rsmd); + pstmt.close(); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM rsmd1"); + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + rs.close(); + stmt.close(); + } + + @Test + public void testDatabaseMetaDataNames() throws SQLException { + DatabaseMetaData databaseMetaData = conn.getMetaData(); + ResultSet resultSet = databaseMetaData.getTableTypes(); + ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); + assertEquals(1, resultSetMetaData.getColumnCount()); + assertEquals("TABLE_TYPE", resultSetMetaData.getColumnName(1)); + resultSet.close(); + } + + @Test + public void testTimestampInfo() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT tm, tmtz, ts, tstz FROM timetest"); + ResultSetMetaData rsmd = rs.getMetaData(); + + // For reference: + // TestUtil.createTable(con, "timetest", "tm time(3), tmtz timetz, ts timestamp without time + // zone, tstz timestamp(6) with time zone"); + + assertEquals(3, rsmd.getScale(1)); + assertEquals(6, rsmd.getScale(2)); + assertEquals(6, rsmd.getScale(3)); + assertEquals(6, rsmd.getScale(4)); + + assertEquals(12, rsmd.getColumnDisplaySize(1)); + assertEquals(21, rsmd.getColumnDisplaySize(2)); + assertEquals(29, rsmd.getColumnDisplaySize(3)); + assertEquals(35, rsmd.getColumnDisplaySize(4)); + + rs.close(); + stmt.close(); + } + + @Test + public void testColumnDisplaySize() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery( + "SELECT fixedchar, fixedvarchar, unfixedvarchar, txt, bytearr, num64, num60, num, ip FROM sizetest"); + ResultSetMetaData rsmd = rs.getMetaData(); + + assertEquals(5, rsmd.getColumnDisplaySize(1)); + assertEquals(5, rsmd.getColumnDisplaySize(2)); + assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(3)); + assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(4)); + assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(5)); + assertEquals(8, rsmd.getColumnDisplaySize(6)); + assertEquals(7, rsmd.getColumnDisplaySize(7)); + assertEquals(131089, rsmd.getColumnDisplaySize(8)); + assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(9)); + } + + @Test + public void testIsAutoIncrement() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT c,b,a FROM serialtest"); + ResultSetMetaData rsmd = rs.getMetaData(); + + assertTrue(!rsmd.isAutoIncrement(1)); + assertTrue(rsmd.isAutoIncrement(2)); + assertTrue(rsmd.isAutoIncrement(3)); + assertEquals("bigserial", rsmd.getColumnTypeName(2)); + assertEquals("serial", rsmd.getColumnTypeName(3)); + + rs.close(); + stmt.close(); + } + + @Test + public void testClassesMatch() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate( + "INSERT INTO alltypes (bool, i2, i4, i8, num, re, fl, ch, vc, tx, d, t, tz, ts, tsz, bt) VALUES ('t', 2, 4, 8, 3.1, 3.14, 3.141, 'c', 'vc', 'tx', '2004-04-09', '09:01:00', '11:11:00-01','2004-04-09 09:01:00','1999-09-19 14:23:12-09', '\\\\123')"); + ResultSet rs = stmt.executeQuery("SELECT * FROM alltypes"); + ResultSetMetaData rsmd = rs.getMetaData(); + assertTrue(rs.next()); + for (int i = 0; i < rsmd.getColumnCount(); i++) { + assertEquals(rs.getObject(i + 1).getClass().getName(), rsmd.getColumnClassName(i + 1)); + } + } + + @Test + public void testComposite() throws Exception { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT col FROM compositetest"); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(Types.STRUCT, rsmd.getColumnType(1)); + assertEquals("rsmd1", rsmd.getColumnTypeName(1)); + } + + @Test + public void testUnexecutedStatement() throws Exception { + assumePreparedStatementMetadataSupported(); + PreparedStatement pstmt = conn.prepareStatement("SELECT col FROM compositetest"); + // we have not executed the statement but we can still get the metadata + ResultSetMetaData rsmd = pstmt.getMetaData(); + assertEquals(Types.STRUCT, rsmd.getColumnType(1)); + assertEquals("rsmd1", rsmd.getColumnTypeName(1)); + } + + @Test + public void testClosedResultSet() throws Exception { + assumePreparedStatementMetadataSupported(); + PreparedStatement pstmt = conn.prepareStatement("SELECT col FROM compositetest"); + ResultSet rs = pstmt.executeQuery(); + rs.close(); + // close the statement and make sure we can still get the metadata + ResultSetMetaData rsmd = pstmt.getMetaData(); + assertEquals(Types.STRUCT, rsmd.getColumnType(1)); + assertEquals("rsmd1", rsmd.getColumnTypeName(1)); + } + + @Test + public void testIdentityColumn() throws Exception { + assumeMinimumServerVersion(ServerVersion.v10); + assumePreparedStatementMetadataSupported(); + PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM identitytest"); + ResultSet rs = pstmt.executeQuery(); + ResultSetMetaData rsmd = pstmt.getMetaData(); + Assert.assertTrue(rsmd.isAutoIncrement(1)); + } + + // Verifies that the field metadatacache will cache when enabled and also functions properly + // when disabled. + @Test + public void testCache() throws Exception { + boolean isCacheDisabled = Integer.valueOf(0).equals(databaseMetadataCacheFields) + || Integer.valueOf(0).equals(databaseMetadataCacheFieldsMib); + + { + PreparedStatement pstmt = conn.prepareStatement("SELECT a FROM rsmd_cache"); + ResultSet rs = pstmt.executeQuery(); + PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rs.getMetaData(); + assertEquals("a", pgrsmd.getBaseColumnName(1)); + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(pstmt); + } + + Statement stmt = conn.createStatement(); + stmt.execute("ALTER TABLE rsmd_cache RENAME COLUMN a TO b"); + TestUtil.closeQuietly(stmt); + + { + PreparedStatement pstmt = conn.prepareStatement("SELECT b FROM rsmd_cache"); + ResultSet rs = pstmt.executeQuery(); + PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rs.getMetaData(); + // Unless the cache is disabled, we expect to see stale results. + assertEquals(isCacheDisabled ? "b" : "a", pgrsmd.getBaseColumnName(1)); + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(pstmt); + } + } + + private void assumePreparedStatementMetadataSupported() { + Assume.assumeTrue("prepared statement metadata is not supported for simple protocol", + preferQueryMode.compareTo(PreferQueryMode.EXTENDED_FOR_PREPARED) >= 0); + } + + @Test + public void testSmallSerialColumns() throws SQLException { + org.junit.Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)); + TestUtil.createTable(con, "smallserial_test", "a smallserial"); + + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT a FROM smallserial_test"); + ResultSetMetaData rsmd = rs.getMetaData(); + assertTrue(rsmd.isAutoIncrement(1)); + assertEquals("smallserial_test", rsmd.getTableName(1)); + assertEquals("a", rsmd.getColumnName(1)); + assertEquals(Types.SMALLINT, rsmd.getColumnType(1)); + assertEquals("smallserial", rsmd.getColumnTypeName(1)); + rs.close(); + + TestUtil.dropTable(con, "smallserial_test"); + } + + @Test + public void testSmallSerialSequenceLikeColumns() throws SQLException { + Statement stmt = con.createStatement(); + // This is the equivalent of the smallserial, not the actual smallserial + stmt.execute("CREATE SEQUENCE smallserial_test_a_seq;\n" + + "CREATE TABLE smallserial_test (\n" + + " a smallint NOT NULL DEFAULT nextval('smallserial_test_a_seq')\n" + + ");\n" + + "ALTER SEQUENCE smallserial_test_a_seq OWNED BY smallserial_test.a;"); + + ResultSet rs = stmt.executeQuery("SELECT a FROM smallserial_test"); + ResultSetMetaData rsmd = rs.getMetaData(); + assertTrue(rsmd.isAutoIncrement(1)); + assertEquals("smallserial_test", rsmd.getTableName(1)); + assertEquals("a", rsmd.getColumnName(1)); + assertEquals(Types.SMALLINT, rsmd.getColumnType(1)); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) { + // in Pg 9.2+ it behaves like smallserial + assertEquals("smallserial", rsmd.getColumnTypeName(1)); + } else { + assertEquals("int2", rsmd.getColumnTypeName(1)); + } + rs.close(); + + stmt.execute("DROP TABLE smallserial_test"); + stmt.close(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetRefreshTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetRefreshTest.java new file mode 100644 index 0000000..023f271 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetRefreshTest.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2022, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.Test; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class ResultSetRefreshTest extends BaseTest4 { + @Test + public void testWithDataColumnThatRequiresEscaping() throws Exception { + TestUtil.dropTable(con, "refresh_row_bad_ident"); + TestUtil.execute(con, "CREATE TABLE refresh_row_bad_ident (id int PRIMARY KEY, \"1 FROM refresh_row_bad_ident; SELECT 2; SELECT *\" int)"); + TestUtil.execute(con, "INSERT INTO refresh_row_bad_ident (id) VALUES (1), (2), (3)"); + + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = stmt.executeQuery("SELECT * FROM refresh_row_bad_ident"); + assertTrue(rs.next()); + try { + rs.refreshRow(); + } catch (SQLException ex) { + throw new RuntimeException("ResultSet.refreshRow() did not handle escaping data column identifiers", ex); + } + rs.close(); + stmt.close(); + } + + @Test + public void testWithKeyColumnThatRequiresEscaping() throws Exception { + TestUtil.dropTable(con, "refresh_row_bad_ident"); + TestUtil.execute(con, "CREATE TABLE refresh_row_bad_ident (\"my key\" int PRIMARY KEY)"); + TestUtil.execute(con, "INSERT INTO refresh_row_bad_ident VALUES (1), (2), (3)"); + + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = stmt.executeQuery("SELECT * FROM refresh_row_bad_ident"); + assertTrue(rs.next()); + try { + rs.refreshRow(); + } catch (SQLException ex) { + throw new RuntimeException("ResultSet.refreshRow() did not handle escaping key column identifiers", ex); + } + rs.close(); + stmt.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetTest.java new file mode 100644 index 0000000..0968e61 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetTest.java @@ -0,0 +1,1445 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGobject; +import org.postgresql.util.PSQLException; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.lang.reflect.Field; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/* + * ResultSet tests. + */ +@RunWith(Parameterized.class) +public class ResultSetTest extends BaseTest4 { + + public ResultSetTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + Statement stmt = con.createStatement(); + + TestUtil.createTable(con, "testrs", "id integer"); + + stmt.executeUpdate("INSERT INTO testrs VALUES (1)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (2)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (3)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (4)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (6)"); + stmt.executeUpdate("INSERT INTO testrs VALUES (9)"); + + TestUtil.createTable(con, "teststring", "a text"); + stmt.executeUpdate("INSERT INTO teststring VALUES ('12345')"); + + TestUtil.createTable(con, "testint", "a int"); + stmt.executeUpdate("INSERT INTO testint VALUES (12345)"); + + // Boolean Tests + TestUtil.createTable(con, "testbool", "a boolean, b int"); + stmt.executeUpdate("INSERT INTO testbool VALUES(true, 1)"); + stmt.executeUpdate("INSERT INTO testbool VALUES(false, 0)"); + + TestUtil.createTable(con, "testboolstring", "a varchar(30), b boolean"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('1 ', true)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('0', false)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES(' t', true)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('f', false)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('True', true)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES(' False ', false)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('yes', true)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES(' no ', false)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('y', true)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('n', false)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('oN', true)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('oFf', false)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('OK', null)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('NOT', null)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('not a boolean', null)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('1.0', null)"); + stmt.executeUpdate("INSERT INTO testboolstring VALUES('0.0', null)"); + + TestUtil.createTable(con, "testboolfloat", "i int, a float4, b boolean"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(1, '1.0'::real, true)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(2, '0.0'::real, false)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(3, 1.000::real, true)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(4, 0.000::real, false)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(5, '1.001'::real, null)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(6, '-1.001'::real, null)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(7, 123.4::real, null)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(8, 1.234e2::real, null)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(9, 100.00e-2::real, true)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(10, '9223371487098961921', null)"); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(11, '10223372036850000000', null)"); + String floatVal = Float.toString(StrictMath.nextDown(Long.MAX_VALUE - 1)); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(12, " + floatVal + ", null)"); + floatVal = Float.toString(StrictMath.nextDown(Long.MAX_VALUE + 1)); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(13, " + floatVal + ", null)"); + floatVal = Float.toString(StrictMath.nextUp(Long.MIN_VALUE - 1)); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(14, " + floatVal + ", null)"); + floatVal = Float.toString(StrictMath.nextUp(Long.MIN_VALUE + 1)); + stmt.executeUpdate("INSERT INTO testboolfloat VALUES(15, " + floatVal + ", null)"); + + TestUtil.createTable(con, "testboolint", "a bigint, b boolean"); + stmt.executeUpdate("INSERT INTO testboolint VALUES(1, true)"); + stmt.executeUpdate("INSERT INTO testboolint VALUES(0, false)"); + stmt.executeUpdate("INSERT INTO testboolint VALUES(-1, null)"); + stmt.executeUpdate("INSERT INTO testboolint VALUES(9223372036854775807, null)"); + stmt.executeUpdate("INSERT INTO testboolint VALUES(-9223372036854775808, null)"); + + // End Boolean Tests + + // TestUtil.createTable(con, "testbit", "a bit"); + + TestUtil.createTable(con, "testnumeric", "t text, a numeric"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.0', '1.0')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('0.0', '0.0')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('-1.0', '-1.0')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.2', '1.2')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('-2.5', '-2.5')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('0.000000000000000000000000000990', '0.000000000000000000000000000990')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('10.0000000000099', '10.0000000000099')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('.10000000000000', '.10000000000000')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('.10', '.10')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.10000000000000', '1.10000000000000')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('99999.2', '99999.2')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('99999', '99999')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('-99999.2', '-99999.2')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('-99999', '-99999')"); + + // Integer.MaxValue + stmt.execute("INSERT INTO testnumeric VALUES('2147483647', '2147483647')"); + + // Integer.MinValue + stmt.execute("INSERT INTO testnumeric VALUES( '-2147483648', '-2147483648')"); + + stmt.executeUpdate("INSERT INTO testnumeric VALUES('2147483648', '2147483648')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('-2147483649', '-2147483649')"); + + // Long.MaxValue + stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775807','9223372036854775807')"); + stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775807.9', '9223372036854775807.9')"); + + // Long.MinValue + stmt.executeUpdate("INSERT INTO testnumeric VALUES('-9223372036854775808', '-9223372036854775808')"); + + // Long.MaxValue +1 + stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775808', '9223372036854775808')"); + + // Long.Minvalue -1 + stmt.executeUpdate("INSERT INTO testnumeric VALUES('-9223372036854775809', '-9223372036854775809')"); + + stmt.executeUpdate("INSERT INTO testnumeric VALUES('10223372036850000000', '10223372036850000000')"); + + TestUtil.createTable(con, "testpgobject", "id integer NOT NULL, d date, PRIMARY KEY (id)"); + stmt.execute("INSERT INTO testpgobject VALUES(1, '2010-11-3')"); + + stmt.close(); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "testrs"); + TestUtil.dropTable(con, "teststring"); + TestUtil.dropTable(con, "testint"); + // TestUtil.dropTable(con, "testbit"); + TestUtil.dropTable(con, "testboolstring"); + TestUtil.dropTable(con, "testboolfloat"); + TestUtil.dropTable(con, "testboolint"); + TestUtil.dropTable(con, "testnumeric"); + TestUtil.dropTable(con, "testpgobject"); + super.tearDown(); + } + + @Test + public void testBackward() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("SELECT * FROM testrs"); + rs.afterLast(); + assertTrue(rs.previous()); + rs.close(); + stmt.close(); + } + + @Test + public void testAbsolute() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("SELECT * FROM testrs"); + + assertTrue(!rs.absolute(0)); + assertEquals(0, rs.getRow()); + + assertTrue(rs.absolute(-1)); + assertEquals(6, rs.getRow()); + + assertTrue(rs.absolute(1)); + assertEquals(1, rs.getRow()); + + assertTrue(!rs.absolute(-10)); + assertEquals(0, rs.getRow()); + assertTrue(rs.next()); + assertEquals(1, rs.getRow()); + + assertTrue(!rs.absolute(10)); + assertEquals(0, rs.getRow()); + assertTrue(rs.previous()); + assertEquals(6, rs.getRow()); + + stmt.close(); + } + + @Test + public void testRelative() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("SELECT * FROM testrs"); + + assertTrue(!rs.relative(0)); + assertEquals(0, rs.getRow()); + assertTrue(rs.isBeforeFirst()); + + assertTrue(rs.relative(2)); + assertEquals(2, rs.getRow()); + + assertTrue(rs.relative(1)); + assertEquals(3, rs.getRow()); + + assertTrue(rs.relative(0)); + assertEquals(3, rs.getRow()); + + assertTrue(!rs.relative(-3)); + assertEquals(0, rs.getRow()); + assertTrue(rs.isBeforeFirst()); + + assertTrue(rs.relative(4)); + assertEquals(4, rs.getRow()); + + assertTrue(rs.relative(-1)); + assertEquals(3, rs.getRow()); + + assertTrue(!rs.relative(6)); + assertEquals(0, rs.getRow()); + assertTrue(rs.isAfterLast()); + + assertTrue(rs.relative(-4)); + assertEquals(3, rs.getRow()); + + assertTrue(!rs.relative(-6)); + assertEquals(0, rs.getRow()); + assertTrue(rs.isBeforeFirst()); + + stmt.close(); + } + + @Test + public void testEmptyResult() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("SELECT * FROM testrs where id=100"); + rs.beforeFirst(); + rs.afterLast(); + assertTrue(!rs.first()); + assertTrue(!rs.last()); + assertTrue(!rs.next()); + } + + @Test + public void testMaxFieldSize() throws SQLException { + Statement stmt = con.createStatement(); + stmt.setMaxFieldSize(2); + + ResultSet rs = stmt.executeQuery("select * from testint"); + + // max should not apply to the following since per the spec + // it should apply only to binary and char/varchar columns + rs.next(); + assertEquals("12345", rs.getString(1)); + // getBytes returns 5 bytes for txt transfer, 4 for bin transfer + assertTrue(rs.getBytes(1).length >= 4); + + // max should apply to the following since the column is + // a varchar column + rs = stmt.executeQuery("select * from teststring"); + rs.next(); + assertEquals("12", rs.getString(1)); + assertEquals("12", new String(rs.getBytes(1))); + } + + @Test + public void testBooleanBool() throws SQLException { + testBoolean("testbool", 0); + testBoolean("testbool", 1); + testBoolean("testbool", 5); + testBoolean("testbool", -1); + } + + @Test + public void testBooleanString() throws SQLException { + testBoolean("testboolstring", 0); + testBoolean("testboolstring", 1); + testBoolean("testboolstring", 5); + testBoolean("testboolstring", -1); + } + + @Test + public void testBooleanFloat() throws SQLException { + testBoolean("testboolfloat", 0); + testBoolean("testboolfloat", 1); + testBoolean("testboolfloat", 5); + testBoolean("testboolfloat", -1); + } + + @Test + public void testBooleanInt() throws SQLException { + testBoolean("testboolint", 0); + testBoolean("testboolint", 1); + testBoolean("testboolint", 5); + testBoolean("testboolint", -1); + } + + public void testBoolean(String table, int prepareThreshold) throws SQLException { + PreparedStatement pstmt = con.prepareStatement("select a, b from " + table); + ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + rs.getBoolean(2); + Boolean expected = rs.wasNull() ? null : rs.getBoolean(2); // Hack to get SQL NULL + if (expected != null) { + assertEquals(expected, rs.getBoolean(1)); + } else { + // expected value with null are bad values + try { + rs.getBoolean(1); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + } + } + } + rs.close(); + pstmt.close(); + } + + @Test + public void testGetBooleanJDBCCompliance() throws SQLException { + // The JDBC specification in Table B-6 "Use of ResultSet getter Methods to Retrieve JDBC Data Types" + // the getBoolean have this Supported JDBC Type: TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT, + // DOUBLE, DECIMAL, NUMERIC, BIT, BOOLEAN, CHAR, VARCHAR, LONGVARCHAR + + // There is no TINYINT in PostgreSQL + testgetBoolean("int2"); // SMALLINT + testgetBoolean("int4"); // INTEGER + testgetBoolean("int8"); // BIGINT + testgetBoolean("float4"); // REAL + testgetBoolean("float8"); // FLOAT, DOUBLE + testgetBoolean("numeric"); // DECIMAL, NUMERIC + testgetBoolean("bpchar"); // CHAR + testgetBoolean("varchar"); // VARCHAR + testgetBoolean("text"); // LONGVARCHAR? + } + + public void testgetBoolean(String dataType) throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select 1::" + dataType + ", 0::" + dataType + ", 2::" + dataType); + assertTrue(rs.next()); + assertEquals(true, rs.getBoolean(1)); + assertEquals(false, rs.getBoolean(2)); + + try { + // The JDBC ResultSet JavaDoc states that only 1 and 0 are valid values, so 2 should return error. + rs.getBoolean(3); + fail(); + } catch (SQLException e) { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + // message can be 2 or 2.0 depending on whether binary or text + final String message = e.getMessage(); + if (!"Cannot cast to boolean: \"2.0\"".equals(message)) { + assertEquals("Cannot cast to boolean: \"2\"", message); + } + } + rs.close(); + stmt.close(); + } + + @Test + public void testgetBadBoolean() throws SQLException { + testBadBoolean("'2017-03-13 14:25:48.130861'::timestamp", "2017-03-13 14:25:48.130861"); + testBadBoolean("'2017-03-13'::date", "2017-03-13"); + testBadBoolean("'2017-03-13 14:25:48.130861'::time", "14:25:48.130861"); + testBadBoolean("ARRAY[[1,0],[0,1]]", "{{1,0},{0,1}}"); + testBadBoolean("29::bit(4)", "1101"); + } + + @Test + public void testGetBadUuidBoolean() throws SQLException { + assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)); + testBadBoolean("'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'::uuid", "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11"); + } + + public void testBadBoolean(String select, String value) throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select " + select); + assertTrue(rs.next()); + try { + rs.getBoolean(1); + fail(); + } catch (SQLException e) { + //binary transfer gets different error code and message + if (org.postgresql.util.PSQLState.DATA_TYPE_MISMATCH.getState().equals(e.getSQLState())) { + final String message = e.getMessage(); + if (!message.startsWith("Cannot convert the column of type ")) { + fail(message); + } + if (!message.endsWith(" to requested type boolean.")) { + fail(message); + } + } else { + assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + assertEquals("Cannot cast to boolean: \"" + value + "\"", e.getMessage()); + } + } + rs.close(); + stmt.close(); + } + + @Test + public void testgetByte() throws SQLException { + ResultSet rs = con.createStatement().executeQuery("select a from testnumeric"); + + assertTrue(rs.next()); + assertEquals(1, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(-1, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(1, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(-2, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(10, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getByte(1)); + + assertTrue(rs.next()); + assertEquals(1, rs.getByte(1)); + + while (rs.next()) { + try { + rs.getByte(1); + fail("Exception expected."); + } catch (SQLException e) { + assertEquals(e.getSQLState(), "22003"); + } + } + rs.close(); + } + + @Test + public void testgetShort() throws SQLException { + ResultSet rs = con.createStatement().executeQuery("select a from testnumeric"); + + assertTrue(rs.next()); + assertEquals(1, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(-1, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(1, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(-2, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(10, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getShort(1)); + + assertTrue(rs.next()); + assertEquals(1, rs.getShort(1)); + + while (rs.next()) { + try { + rs.getShort(1); + fail("Exception expected."); + } catch (SQLException e) { + } + } + rs.close(); + } + + @Test + public void testgetInt() throws SQLException { + ResultSet rs = con.createStatement().executeQuery("select a from testnumeric"); + + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(-1, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(-2, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(10, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(0, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(99999, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(99999, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(-99999, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(-99999, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(Integer.MAX_VALUE, rs.getInt(1)); + + assertTrue(rs.next()); + assertEquals(Integer.MIN_VALUE, rs.getInt(1)); + + while (rs.next()) { + try { + rs.getInt(1); + fail("Exception expected." + rs.getString(1)); + } catch (SQLException e) { + } + } + rs.close(); + // test for Issue #2748 + rs = con.createStatement().executeQuery("select 2.0 :: double precision"); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + } + + @Test + public void testgetLong() throws SQLException { + ResultSet rs = null; + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.0'"); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.0'"); + assertTrue(rs.next()); + assertEquals(0, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-1.0'"); + assertTrue(rs.next()); + assertEquals(-1, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.2'"); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2.5'"); + assertTrue(rs.next()); + assertEquals(-2, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.000000000000000000000000000990'"); + assertTrue(rs.next()); + assertEquals(0, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '10.0000000000099'"); + assertTrue(rs.next()); + assertEquals(10, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10000000000000'"); + assertTrue(rs.next()); + assertEquals(0, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10'"); + assertTrue(rs.next()); + assertEquals(0, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.10000000000000'"); + assertTrue(rs.next()); + assertEquals(1, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999.2'"); + assertTrue(rs.next()); + assertEquals(99999, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999'"); + assertTrue(rs.next()); + assertEquals(99999, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999.2'"); + assertTrue(rs.next()); + assertEquals(-99999, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999'"); + assertTrue(rs.next()); + assertEquals(-99999, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483647'"); + assertTrue(rs.next()); + assertEquals((Integer.MAX_VALUE), rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483648'"); + assertTrue(rs.next()); + assertEquals((Integer.MIN_VALUE), rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483648'"); + assertTrue(rs.next()); + assertEquals(((long) Integer.MAX_VALUE) + 1, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483649'"); + assertTrue(rs.next()); + assertEquals(((long) Integer.MIN_VALUE) - 1, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807'"); + assertTrue(rs.next()); + assertEquals(Long.MAX_VALUE, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807.9'"); + assertTrue(rs.next()); + assertEquals(Long.MAX_VALUE, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775808'"); + assertTrue(rs.next()); + assertEquals(Long.MIN_VALUE, rs.getLong(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775808'"); + assertTrue(rs.next()); + try { + rs.getLong(1); + fail("Exception expected. " + rs.getString(1)); + } catch (SQLException e) { + } + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775809'"); + assertTrue(rs.next()); + try { + rs.getLong(1); + fail("Exception expected. " + rs.getString(1)); + } catch (SQLException e) { + } + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '10223372036850000000'"); + assertTrue(rs.next()); + try { + rs.getLong(1); + fail("Exception expected. " + rs.getString(1)); + } catch (SQLException e) { + } + rs.close(); + + rs = con.createStatement().executeQuery("select i, a from testboolfloat order by i"); + + assertTrue(rs.next()); + assertEquals(1, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(0, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(1, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(0, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(1, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(-1, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(123, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(123, rs.getLong(2)); + + assertTrue(rs.next()); + assertEquals(1, rs.getLong(2)); + + assertTrue(rs.next()); + // the string value from database trims the significant digits, leading to larger variance than binary + // the liberica jdk gets similar variance, even in forced binary mode + assertEquals(9223371487098961921.0, rs.getLong(2), 1.0e11); + + assertTrue(rs.next()); + do { + try { + int row = rs.getInt(1); + long l = rs.getLong(2); + if ( row == 12 ) { + assertEquals(9223371487098961920.0, l, 1.0e11); + } else if ( row == 15 ) { + assertEquals(-9223371487098961920.0, l, 1.0e11); + } else { + fail("Exception expected." + rs.getString(2)); + } + } catch (SQLException e) { + } + } while (rs.next()); + + rs.close(); + } + + @Test + public void testgetBigDecimal() throws SQLException { + ResultSet rs = null; + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.0'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(1.0), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.0'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(0.0), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-1.0'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(-1.0), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.2'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(1.2), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2.5'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(-2.5), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.000000000000000000000000000990'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("0.000000000000000000000000000990"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '10.0000000000099'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("10.0000000000099"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10000000000000'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("0.10000000000000"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("0.10"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.10000000000000'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("1.10000000000000"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999.2'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(99999.2), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(99999), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999.2'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(-99999.2), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(-99999), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483647'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(2147483647), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483648'"); + assertTrue(rs.next()); + assertEquals(BigDecimal.valueOf(-2147483648), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483648'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("2147483648"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483649'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("-2147483649"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("9223372036854775807"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807.9'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("9223372036854775807.9"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775808'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("-9223372036854775808"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775808'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("9223372036854775808"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775809'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("-9223372036854775809"), rs.getBigDecimal(1)); + rs.close(); + + rs = con.createStatement().executeQuery("select a from testnumeric where t = '10223372036850000000'"); + assertTrue(rs.next()); + assertEquals(new BigDecimal("10223372036850000000"), rs.getBigDecimal(1)); + rs.close(); + } + + @Test + public void testParameters() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); + stmt.setFetchSize(100); + stmt.setFetchDirection(ResultSet.FETCH_UNKNOWN); + + ResultSet rs = stmt.executeQuery("SELECT * FROM testrs"); + + assertEquals(ResultSet.CONCUR_UPDATABLE, stmt.getResultSetConcurrency()); + assertEquals(ResultSet.TYPE_SCROLL_SENSITIVE, stmt.getResultSetType()); + assertEquals(100, stmt.getFetchSize()); + assertEquals(ResultSet.FETCH_UNKNOWN, stmt.getFetchDirection()); + + assertEquals(ResultSet.CONCUR_UPDATABLE, rs.getConcurrency()); + assertEquals(ResultSet.TYPE_SCROLL_SENSITIVE, rs.getType()); + assertEquals(100, rs.getFetchSize()); + assertEquals(ResultSet.FETCH_UNKNOWN, rs.getFetchDirection()); + + rs.close(); + stmt.close(); + } + + @Test + public void testCreateStatementWithInvalidResultSetParams() throws SQLException { + assertThrows(PSQLException.class, () -> con.createStatement(-1, -1,-1)); + } + + @Test + public void testCreateStatementWithInvalidResultSetConcurrency() throws SQLException { + assertThrows(PSQLException.class, () -> con.createStatement( ResultSet.TYPE_SCROLL_INSENSITIVE, -1) ); + } + + @Test + public void testCreateStatementWithInvalidResultSetHoldability() throws SQLException { + assertThrows(PSQLException.class, () -> con.createStatement( ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1) ); + } + + @Test + public void testPrepareStatementWithInvalidResultSetParams() throws SQLException { + assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", -1, -1,-1)); + } + + @Test + public void testPrepareStatementWithInvalidResultSetConcurrency() throws SQLException { + assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, -1) ); + } + + @Test + public void testPrepareStatementWithInvalidResultSetHoldability() throws SQLException { + assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1) ); + } + + @Test + public void testPrepareCallWithInvalidResultSetParams() throws SQLException { + assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", -1, -1,-1)); + } + + @Test + public void testPrepareCallWithInvalidResultSetConcurrency() throws SQLException { + assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, -1) ); + } + + @Test + public void testPrepareCallWithInvalidResultSetHoldability() throws SQLException { + assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1) ); + } + + @Test + public void testZeroRowResultPositioning() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = + stmt.executeQuery("SELECT * FROM pg_database WHERE datname='nonexistentdatabase'"); + assertTrue(!rs.previous()); + assertTrue(!rs.previous()); + assertTrue(!rs.next()); + assertTrue(!rs.next()); + assertTrue(!rs.next()); + assertTrue(!rs.next()); + assertTrue(!rs.next()); + assertTrue(!rs.previous()); + assertTrue(!rs.first()); + assertTrue(!rs.last()); + assertEquals(0, rs.getRow()); + assertTrue(!rs.absolute(1)); + assertTrue(!rs.relative(1)); + assertTrue(!rs.isBeforeFirst()); + assertTrue(!rs.isAfterLast()); + assertTrue(!rs.isFirst()); + assertTrue(!rs.isLast()); + rs.close(); + stmt.close(); + } + + @Test + public void testRowResultPositioning() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + // Create a one row result set. + ResultSet rs = stmt.executeQuery("SELECT * FROM pg_database WHERE datname='template1'"); + + assertTrue(rs.isBeforeFirst()); + assertTrue(!rs.isAfterLast()); + assertTrue(!rs.isFirst()); + assertTrue(!rs.isLast()); + + assertTrue(rs.next()); + + assertTrue(!rs.isBeforeFirst()); + assertTrue(!rs.isAfterLast()); + assertTrue(rs.isFirst()); + assertTrue(rs.isLast()); + + assertTrue(!rs.next()); + + assertTrue(!rs.isBeforeFirst()); + assertTrue(rs.isAfterLast()); + assertTrue(!rs.isFirst()); + assertTrue(!rs.isLast()); + + assertTrue(rs.previous()); + + assertTrue(!rs.isBeforeFirst()); + assertTrue(!rs.isAfterLast()); + assertTrue(rs.isFirst()); + assertTrue(rs.isLast()); + + assertTrue(rs.absolute(1)); + + assertTrue(!rs.isBeforeFirst()); + assertTrue(!rs.isAfterLast()); + assertTrue(rs.isFirst()); + assertTrue(rs.isLast()); + + assertTrue(!rs.absolute(0)); + + assertTrue(rs.isBeforeFirst()); + assertTrue(!rs.isAfterLast()); + assertTrue(!rs.isFirst()); + assertTrue(!rs.isLast()); + + assertTrue(!rs.absolute(2)); + + assertTrue(!rs.isBeforeFirst()); + assertTrue(rs.isAfterLast()); + assertTrue(!rs.isFirst()); + assertTrue(!rs.isLast()); + + rs.close(); + stmt.close(); + } + + @Test + public void testForwardOnlyExceptions() throws SQLException { + // Test that illegal operations on a TYPE_FORWARD_ONLY resultset + // correctly result in throwing an exception. + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = stmt.executeQuery("SELECT * FROM testnumeric"); + + try { + rs.absolute(1); + fail("absolute() on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + try { + rs.afterLast(); + fail( + "afterLast() on a TYPE_FORWARD_ONLY resultset did not throw an exception on a TYPE_FORWARD_ONLY resultset"); + } catch (SQLException e) { + } + try { + rs.beforeFirst(); + fail("beforeFirst() on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + try { + rs.first(); + fail("first() on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + try { + rs.last(); + fail("last() on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + try { + rs.previous(); + fail("previous() on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + try { + rs.relative(1); + fail("relative() on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + + try { + rs.setFetchDirection(ResultSet.FETCH_REVERSE); + fail( + "setFetchDirection(FETCH_REVERSE) on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + + try { + rs.setFetchDirection(ResultSet.FETCH_UNKNOWN); + fail( + "setFetchDirection(FETCH_UNKNOWN) on a TYPE_FORWARD_ONLY resultset did not throw an exception"); + } catch (SQLException e) { + } + + rs.close(); + stmt.close(); + } + + @Test + public void testCaseInsensitiveFindColumn() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT id, id AS \"ID2\" FROM testrs"); + assertEquals(1, rs.findColumn("id")); + assertEquals(1, rs.findColumn("ID")); + assertEquals(1, rs.findColumn("Id")); + assertEquals(2, rs.findColumn("id2")); + assertEquals(2, rs.findColumn("ID2")); + assertEquals(2, rs.findColumn("Id2")); + try { + rs.findColumn("id3"); + fail("There isn't an id3 column in the ResultSet."); + } catch (SQLException sqle) { + } + } + + @Test + public void testGetOutOfBounds() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT id FROM testrs"); + assertTrue(rs.next()); + + try { + rs.getInt(-9); + } catch (SQLException sqle) { + } + + try { + rs.getInt(1000); + } catch (SQLException sqle) { + } + } + + @Test + public void testClosedResult() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = stmt.executeQuery("SELECT id FROM testrs"); + rs.close(); + + rs.close(); // Closing twice is allowed. + try { + rs.getInt(1); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.getInt("id"); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.getType(); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.wasNull(); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.absolute(3); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.isBeforeFirst(); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.setFetchSize(10); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.getMetaData(); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.rowUpdated(); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.updateInt(1, 1); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.moveToInsertRow(); + fail("Expected SQLException"); + } catch (SQLException e) { + } + try { + rs.clearWarnings(); + fail("Expected SQLException"); + } catch (SQLException e) { + } + } + + /* + * The JDBC spec says when you have duplicate column names, the first one should be returned. + */ + @Test + public void testDuplicateColumnNameOrder() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT 1 AS a, 2 AS a"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt("a")); + } + + @Test + public void testTurkishLocale() throws SQLException { + Locale current = Locale.getDefault(); + try { + Locale.setDefault(new Locale("tr", "TR")); + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT id FROM testrs"); + int sum = 0; + while (rs.next()) { + sum += rs.getInt("ID"); + } + rs.close(); + assertEquals(25, sum); + } finally { + Locale.setDefault(current); + } + } + + @Test + public void testUpdateWithPGobject() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + + ResultSet rs = stmt.executeQuery("select * from testpgobject where id = 1"); + assertTrue(rs.next()); + assertEquals("2010-11-03", rs.getDate("d").toString()); + + PGobject pgobj = new PGobject(); + pgobj.setType("date"); + pgobj.setValue("2014-12-23"); + rs.updateObject("d", pgobj); + rs.updateRow(); + rs.close(); + + ResultSet rs1 = stmt.executeQuery("select * from testpgobject where id = 1"); + assertTrue(rs1.next()); + assertEquals("2014-12-23", rs1.getDate("d").toString()); + rs1.close(); + + stmt.close(); + } + + /** + * Test the behavior of the result set column mapping cache for simple statements. + */ + @Test + public void testStatementResultSetColumnMappingCache() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select * from testrs"); + Map columnNameIndexMap; + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertEquals(null, columnNameIndexMap); + assertTrue(rs.next()); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertEquals(null, columnNameIndexMap); + rs.getInt("ID"); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertNotNull(columnNameIndexMap); + rs.getInt("id"); + assertSame(columnNameIndexMap, getResultSetColumnNameIndexMap(rs)); + rs.close(); + rs = stmt.executeQuery("select * from testrs"); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertEquals(null, columnNameIndexMap); + assertTrue(rs.next()); + rs.getInt("Id"); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertNotNull(columnNameIndexMap); + rs.close(); + stmt.close(); + } + + /** + * Test the behavior of the result set column mapping cache for prepared statements. + */ + @Test + public void testPreparedStatementResultSetColumnMappingCache() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT id FROM testrs"); + ResultSet rs = pstmt.executeQuery(); + Map columnNameIndexMap; + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertEquals(null, columnNameIndexMap); + assertTrue(rs.next()); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertEquals(null, columnNameIndexMap); + rs.getInt("id"); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertNotNull(columnNameIndexMap); + rs.close(); + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertEquals(null, columnNameIndexMap); + rs.getInt("id"); + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertNotNull(columnNameIndexMap); + rs.close(); + pstmt.close(); + } + + /** + * Test the behavior of the result set column mapping cache for prepared statements once the + * statement is named. + */ + @Test + public void testNamedPreparedStatementResultSetColumnMappingCache() throws SQLException { + assumeTrue("Simple protocol only mode does not support server-prepared statements", + preferQueryMode != PreferQueryMode.SIMPLE); + PreparedStatement pstmt = con.prepareStatement("SELECT id FROM testrs"); + ResultSet rs; + // Make sure the prepared statement is named. + // This ensures column mapping cache is reused across different result sets. + for (int i = 0; i < 5; i++) { + rs = pstmt.executeQuery(); + rs.close(); + } + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + rs.getInt("id"); + Map columnNameIndexMap; + columnNameIndexMap = getResultSetColumnNameIndexMap(rs); + assertNotNull(columnNameIndexMap); + rs.close(); + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + rs.getInt("id"); + assertSame( + "Cached mapping should be same between different result sets of same named prepared statement", + columnNameIndexMap, getResultSetColumnNameIndexMap(rs)); + rs.close(); + pstmt.close(); + } + + @SuppressWarnings("unchecked") + private Map getResultSetColumnNameIndexMap(ResultSet stmt) { + try { + Field columnNameIndexMapField = stmt.getClass().getDeclaredField("columnNameIndexMap"); + columnNameIndexMapField.setAccessible(true); + return (Map) columnNameIndexMapField.get(stmt); + } catch (Exception e) { + } + return null; + } + + private static class SelectTimestampManyTimes implements Callable { + + private final Connection connection; + private final int expectedYear; + + protected SelectTimestampManyTimes(Connection connection, int expectedYear) { + this.connection = connection; + this.expectedYear = expectedYear; + } + + @Override + public Integer call() throws SQLException { + int year = expectedYear; + try (Statement statement = connection.createStatement()) { + for (int i = 0; i < 10; i++) { + try (ResultSet resultSet = statement.executeQuery( + String.format("SELECT unnest(array_fill('8/10/%d'::timestamp, ARRAY[%d]))", + expectedYear, 500))) { + while (resultSet.next()) { + Timestamp d = resultSet.getTimestamp(1); + year = 1900 + d.getYear(); + if (year != expectedYear) { + return year; + } + } + } + } + } + return year; + } + + } + + @Test + public void testTimestamp() throws InterruptedException, ExecutionException, TimeoutException { + ExecutorService e = Executors.newFixedThreadPool(2); + Integer year1 = 7777; + Future future1 = e.submit(new SelectTimestampManyTimes(con, year1)); + Integer year2 = 2017; + Future future2 = e.submit(new SelectTimestampManyTimes(con, year2)); + assertEquals("Year was changed in another thread", year1, future1.get(1, TimeUnit.MINUTES)); + assertEquals("Year was changed in another thread", year2, future2.get(1, TimeUnit.MINUTES)); + e.shutdown(); + e.awaitTermination(1, TimeUnit.MINUTES); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SearchPathLookupTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SearchPathLookupTest.java new file mode 100644 index 0000000..e87b803 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SearchPathLookupTest.java @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.TypeInfo; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.ResultSet; +import java.sql.Statement; + +/* +* TestCase to test the internal functionality of org.postgresql.jdbc2.DatabaseMetaData +* +*/ +class SearchPathLookupTest { + private BaseConnection con; + + @BeforeEach + void setUp() throws Exception { + con = (BaseConnection) TestUtil.openDB(); + } + + // TODO: make @getMetaData() consider search_path as well + + /** + * This usecase is most common, here the object we are searching for is in the current_schema (the + * first schema in the search_path). + */ + @Test + void searchPathNormalLookup() throws Exception { + Statement stmt = con.createStatement(); + try { + TestUtil.createSchema(con, "first_schema"); + TestUtil.createTable(con, "first_schema.x", "first_schema_field_n int4"); + TestUtil.createSchema(con, "second_schema"); + TestUtil.createTable(con, "second_schema.x", "second_schema_field_n text"); + TestUtil.createSchema(con, "third_schema"); + TestUtil.createTable(con, "third_schema.x", "third_schema_field_n float"); + TestUtil.createSchema(con, "last_schema"); + TestUtil.createTable(con, "last_schema.x", "last_schema_field_n text"); + stmt.execute("SET search_path TO third_schema;"); + TypeInfo typeInfo = con.getTypeInfo(); + int oid = typeInfo.getPGType("x"); + ResultSet rs = stmt.executeQuery("SELECT 'third_schema.x'::regtype::oid"); + assertTrue(rs.next()); + assertEquals(oid, rs.getInt(1)); + assertFalse(rs.next()); + TestUtil.dropSchema(con, "first_schema"); + TestUtil.dropSchema(con, "second_schema"); + TestUtil.dropSchema(con, "third_schema"); + TestUtil.dropSchema(con, "last_schema"); + } finally { + if (stmt != null) { + stmt.close(); + } + TestUtil.closeDB(con); + } + } + + /** + * This usecase is for the situations, when an object is located in a schema, that is in the + * search_path, but not in the current_schema, for example a public schema or some kind of schema, + * that is used for keeping utility objects. + */ + @Test + void searchPathHiddenLookup() throws Exception { + Statement stmt = con.createStatement(); + try { + TestUtil.createSchema(con, "first_schema"); + TestUtil.createTable(con, "first_schema.x", "first_schema_field_n int4"); + TestUtil.createSchema(con, "second_schema"); + TestUtil.createTable(con, "second_schema.y", "second_schema_field_n text"); + TestUtil.createSchema(con, "third_schema"); + TestUtil.createTable(con, "third_schema.x", "third_schema_field_n float"); + TestUtil.createSchema(con, "last_schema"); + TestUtil.createTable(con, "last_schema.y", "last_schema_field_n text"); + stmt.execute("SET search_path TO first_schema, second_schema, last_schema, public;"); + TypeInfo typeInfo = con.getTypeInfo(); + int oid = typeInfo.getPGType("y"); + ResultSet rs = stmt.executeQuery("SELECT 'second_schema.y'::regtype::oid"); + assertTrue(rs.next()); + assertEquals(oid, rs.getInt(1)); + assertFalse(rs.next()); + TestUtil.dropSchema(con, "first_schema"); + TestUtil.dropSchema(con, "second_schema"); + TestUtil.dropSchema(con, "third_schema"); + TestUtil.dropSchema(con, "last_schema"); + } finally { + if (stmt != null) { + stmt.close(); + } + TestUtil.closeDB(con); + } + } + + @Test + void searchPathBackwardsCompatibleLookup() throws Exception { + Statement stmt = con.createStatement(); + try { + TestUtil.createSchema(con, "first_schema"); + TestUtil.createTable(con, "first_schema.x", "first_schema_field int4"); + TestUtil.createSchema(con, "second_schema"); + TestUtil.createTable(con, "second_schema.x", "second_schema_field text"); + TypeInfo typeInfo = con.getTypeInfo(); + int oid = typeInfo.getPGType("x"); + ResultSet rs = stmt + .executeQuery("SELECT oid FROM pg_type WHERE typname = 'x' ORDER BY oid DESC LIMIT 1"); + assertTrue(rs.next()); + assertEquals(oid, rs.getInt(1)); + assertFalse(rs.next()); + TestUtil.dropSchema(con, "first_schema"); + TestUtil.dropSchema(con, "second_schema"); + } finally { + TestUtil.closeDB(con); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerCursorTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerCursorTest.java new file mode 100644 index 0000000..1c24f17 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerCursorTest.java @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.test.TestUtil; + +import org.junit.Test; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +/* + * Tests for using non-zero setFetchSize(). + */ +public class ServerCursorTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "test_fetch", "value integer,data bytea"); + con.setAutoCommit(false); + } + + @Override + public void tearDown() throws SQLException { + con.rollback(); + con.setAutoCommit(true); + TestUtil.dropTable(con, "test_fetch"); + super.tearDown(); + } + + protected void createRows(int count) throws Exception { + PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value,data) values(?,?)"); + for (int i = 0; i < count; i++) { + stmt.setInt(1, i + 1); + stmt.setBytes(2, DATA_STRING.getBytes("UTF8")); + stmt.executeUpdate(); + } + con.commit(); + } + + // Test regular cursor fetching + @Test + public void testBasicFetch() throws Exception { + assumeByteaSupported(); + createRows(1); + + PreparedStatement stmt = + con.prepareStatement("declare test_cursor cursor for select * from test_fetch"); + stmt.execute(); + + stmt = con.prepareStatement("fetch forward from test_cursor"); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + // there should only be one row returned + assertEquals("query value error", 1, rs.getInt(1)); + byte[] dataBytes = rs.getBytes(2); + assertEquals("binary data got munged", DATA_STRING, new String(dataBytes, "UTF8")); + } + + } + + // Test binary cursor fetching + @Test + public void testBinaryFetch() throws Exception { + assumeByteaSupported(); + createRows(1); + + PreparedStatement stmt = + con.prepareStatement("declare test_cursor binary cursor for select * from test_fetch"); + stmt.execute(); + + stmt = con.prepareStatement("fetch forward from test_cursor"); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + // there should only be one row returned + byte[] dataBytes = rs.getBytes(2); + assertEquals("binary data got munged", DATA_STRING, new String(dataBytes, "UTF8")); + } + + } + + //CHECKSTYLE: OFF + // This string contains a variety different data: + // three japanese characters representing "japanese" in japanese + // the four characters "\000" + // a null character + // the seven ascii characters "english" + private static final String DATA_STRING = "\u65E5\u672C\u8A9E\\000\u0000english"; + //CHECKSTYLE: ON + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerErrorTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerErrorTest.java new file mode 100644 index 0000000..9f06d46 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerErrorTest.java @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2013, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; +import org.postgresql.util.ServerErrorMessage; + +import org.junit.Test; + +import java.sql.SQLException; +import java.sql.Statement; + +/* + * Test that enhanced error reports return the correct origin for constraint violation errors. + */ +public class ServerErrorTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeMinimumServerVersion(ServerVersion.v9_3); + Statement stmt = con.createStatement(); + + stmt.execute("CREATE DOMAIN testdom AS int4 CHECK (value < 10)"); + TestUtil.createTable(con, "testerr", "id int not null, val testdom not null"); + stmt.execute("ALTER TABLE testerr ADD CONSTRAINT testerr_pk PRIMARY KEY (id)"); + stmt.close(); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "testerr"); + Statement stmt = con.createStatement(); + stmt.execute("DROP DOMAIN IF EXISTS testdom"); + stmt.close(); + super.tearDown(); + } + + @Test + public void testPrimaryKey() throws Exception { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)"); + try { + stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)"); + fail("Should have thrown a duplicate key exception."); + } catch (SQLException sqle) { + ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage(); + assertEquals("public", err.getSchema()); + assertEquals("testerr", err.getTable()); + assertEquals("testerr_pk", err.getConstraint()); + assertEquals(PSQLState.UNIQUE_VIOLATION.getState(), err.getSQLState()); + assertNull(err.getDatatype()); + assertNull(err.getColumn()); + } + stmt.close(); + } + + @Test + public void testColumn() throws Exception { + Statement stmt = con.createStatement(); + try { + stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, NULL)"); + fail("Should have thrown a not null constraint violation."); + } catch (SQLException sqle) { + ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage(); + assertEquals("public", err.getSchema()); + assertEquals("testerr", err.getTable()); + assertEquals("val", err.getColumn()); + assertNull(err.getDatatype()); + assertNull(err.getConstraint()); + } + stmt.close(); + } + + @Test + public void testDatatype() throws Exception { + Statement stmt = con.createStatement(); + try { + stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 20)"); + fail("Should have thrown a constraint violation."); + } catch (SQLException sqle) { + ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage(); + assertEquals("public", err.getSchema()); + assertEquals("testdom", err.getDatatype()); + assertEquals("testdom_check", err.getConstraint()); + } + stmt.close(); + } + + @Test + public void testNotNullConstraint() throws Exception { + Statement stmt = con.createStatement(); + try { + stmt.executeUpdate("INSERT INTO testerr (val) VALUES (1)"); + fail("Should have thrown a not-null exception."); + } catch (SQLException sqle) { + ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage(); + assertEquals("public", err.getSchema()); + assertEquals("testerr", err.getTable()); + assertEquals("id", err.getColumn()); + assertEquals(PSQLState.NOT_NULL_VIOLATION.getState(), err.getSQLState()); + assertNull(err.getDatatype()); + } + stmt.close(); + } + + @Test + public void testForeignKeyConstraint() throws Exception { + TestUtil.createTable(con, "testerr_foreign", "id int not null, testerr_id int," + + "CONSTRAINT testerr FOREIGN KEY (testerr_id) references testerr(id)"); + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)"); + try { + stmt.executeUpdate("INSERT INTO testerr_foreign (id, testerr_id) VALUES (1, 2)"); + fail("Should have thrown a foreign key exception."); + } catch (SQLException sqle) { + ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage(); + assertEquals("public", err.getSchema()); + assertEquals("testerr_foreign", err.getTable()); + assertEquals(PSQLState.FOREIGN_KEY_VIOLATION.getState(), err.getSQLState()); + assertNull(err.getDatatype()); + assertNull(err.getColumn()); + } + TestUtil.dropTable(con, "testerr_foreign"); + stmt.close(); + } + + @Test + public void testCheckConstraint() throws Exception { + TestUtil.createTable(con, "testerr_check", "id int not null, max10 int CHECK (max10 < 11)"); + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO testerr_check (id, max10) VALUES (1, 5)"); + try { + stmt.executeUpdate("INSERT INTO testerr_check (id, max10) VALUES (2, 11)"); + fail("Should have thrown a check exception."); + } catch (SQLException sqle) { + ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage(); + assertEquals("public", err.getSchema()); + assertEquals("testerr_check", err.getTable()); + assertEquals(PSQLState.CHECK_VIOLATION.getState(), err.getSQLState()); + assertNull(err.getDatatype()); + assertNull(err.getColumn()); + } + TestUtil.dropTable(con, "testerr_check"); + stmt.close(); + } + + @Test + public void testExclusionConstraint() throws Exception { + TestUtil.createTable(con, "testerr_exclude", "id int, EXCLUDE (id WITH =)"); + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO testerr_exclude (id) VALUES (1108)"); + try { + stmt.executeUpdate("INSERT INTO testerr_exclude (id) VALUES (1108)"); + fail("Should have thrown an exclusion exception."); + } catch (SQLException sqle) { + ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage(); + assertEquals("public", err.getSchema()); + assertEquals("testerr_exclude", err.getTable()); + assertEquals(PSQLState.EXCLUSION_VIOLATION.getState(), err.getSQLState()); + assertNull(err.getDatatype()); + assertNull(err.getColumn()); + } + TestUtil.dropTable(con, "testerr_exclude"); + stmt.close(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerPreparedStmtTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerPreparedStmtTest.java new file mode 100644 index 0000000..372ac66 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerPreparedStmtTest.java @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.postgresql.PGStatement; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; + +import org.junit.Assume; +import org.junit.Test; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +/* + * Tests for using server side prepared statements + */ +public class ServerPreparedStmtTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + + Assume.assumeTrue("Server-prepared statements are not supported in simple protocol, thus ignoring the tests", + preferQueryMode != PreferQueryMode.SIMPLE); + + Statement stmt = con.createStatement(); + + TestUtil.createTable(con, "testsps", "id integer, value boolean"); + + stmt.executeUpdate("INSERT INTO testsps VALUES (1,'t')"); + stmt.executeUpdate("INSERT INTO testsps VALUES (2,'t')"); + stmt.executeUpdate("INSERT INTO testsps VALUES (3,'t')"); + stmt.executeUpdate("INSERT INTO testsps VALUES (4,'t')"); + stmt.executeUpdate("INSERT INTO testsps VALUES (6,'t')"); + stmt.executeUpdate("INSERT INTO testsps VALUES (9,'f')"); + + stmt.close(); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "testsps"); + super.tearDown(); + } + + @SuppressWarnings("deprecation") + private static void setUseServerPrepare(PreparedStatement pstmt, boolean flag) throws SQLException { + pstmt.unwrap(PGStatement.class).setUseServerPrepare(flag); + } + + @Test + public void testEmptyResults() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?"); + setUseServerPrepare(pstmt, true); + for (int i = 0; i < 10; i++) { + pstmt.setInt(1, -1); + ResultSet rs = pstmt.executeQuery(); + assertFalse(rs.next()); + rs.close(); + } + pstmt.close(); + } + + @Test + public void testPreparedExecuteCount() throws Exception { + PreparedStatement pstmt = con.prepareStatement("UPDATE testsps SET id = id + 44"); + setUseServerPrepare(pstmt, true); + int count = pstmt.executeUpdate(); + assertEquals(6, count); + pstmt.close(); + } + + @Test + public void testPreparedStatementsNoBinds() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = 2"); + setUseServerPrepare(pstmt, true); + assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + // Test that basic functionality works + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + // Verify that subsequent calls still work + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + // Verify that using the statement still works after turning off prepares + if (Boolean.getBoolean("org.postgresql.forceBinary")) { + return; + } + setUseServerPrepare(pstmt, false); + assertTrue(!pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + pstmt.close(); + } + + @Test + public void testPreparedStatementsWithOneBind() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?"); + setUseServerPrepare(pstmt, true); + assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + // Test that basic functionality works + pstmt.setInt(1, 2); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + // Verify that subsequent calls still work + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + // Verify that using the statement still works after turning off prepares + if (Boolean.getBoolean("org.postgresql.forceBinary")) { + return; + } + + setUseServerPrepare(pstmt, false); + assertTrue(!pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + pstmt.setInt(1, 9); + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(9, rs.getInt(1)); + rs.close(); + + pstmt.close(); + } + + // Verify we can bind booleans-as-objects ok. + @Test + public void testBooleanObjectBind() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE value = ?"); + setUseServerPrepare(pstmt, true); + assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + pstmt.setObject(1, Boolean.FALSE, java.sql.Types.BIT); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(9, rs.getInt(1)); + rs.close(); + } + + // Verify we can bind booleans-as-integers ok. + @Test + public void testBooleanIntegerBind() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?"); + setUseServerPrepare(pstmt, true); + assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + pstmt.setObject(1, Boolean.TRUE, java.sql.Types.INTEGER); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + rs.close(); + } + + // Verify we can bind booleans-as-native-types ok. + @Test + public void testBooleanBind() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE value = ?"); + setUseServerPrepare(pstmt, true); + assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + pstmt.setBoolean(1, false); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(9, rs.getInt(1)); + rs.close(); + } + + @Test + public void testPreparedStatementsWithBinds() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ? or id = ?"); + setUseServerPrepare(pstmt, true); + assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare()); + + // Test that basic functionality works + // bind different datatypes + pstmt.setInt(1, 2); + pstmt.setLong(2, 2); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + // Verify that subsequent calls still work + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + pstmt.close(); + } + + @Test + public void testSPSToggle() throws Exception { + // Verify we can toggle UseServerPrepare safely before a query is executed. + PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = 2"); + setUseServerPrepare(pstmt, true); + setUseServerPrepare(pstmt, false); + } + + @Test + public void testBytea() throws Exception { + // Verify we can use setBytes() with a server-prepared update. + try { + TestUtil.createTable(con, "testsps_bytea", "data bytea"); + + PreparedStatement pstmt = con.prepareStatement("INSERT INTO testsps_bytea(data) VALUES (?)"); + setUseServerPrepare(pstmt, true); + pstmt.setBytes(1, new byte[100]); + pstmt.executeUpdate(); + } finally { + TestUtil.dropTable(con, "testsps_bytea"); + } + } + + // Check statements are not transformed when they shouldn't be. + @Test + public void testCreateTable() throws Exception { + // CREATE TABLE isn't supported by PREPARE; the driver should realize this and + // still complete without error. + PreparedStatement pstmt = con.prepareStatement("CREATE TABLE testsps_bad(data int)"); + setUseServerPrepare(pstmt, true); + pstmt.executeUpdate(); + TestUtil.dropTable(con, "testsps_bad"); + } + + @Test + public void testMultistatement() throws Exception { + // Shouldn't try to PREPARE this one, if we do we get: + // PREPARE x(int,int) AS INSERT .... $1 ; INSERT ... $2 -- syntax error + try { + TestUtil.createTable(con, "testsps_multiple", "data int"); + PreparedStatement pstmt = con.prepareStatement( + "INSERT INTO testsps_multiple(data) VALUES (?); INSERT INTO testsps_multiple(data) VALUES (?)"); + setUseServerPrepare(pstmt, true); + pstmt.setInt(1, 1); + pstmt.setInt(2, 2); + pstmt.executeUpdate(); // Two inserts. + + pstmt.setInt(1, 3); + pstmt.setInt(2, 4); + pstmt.executeUpdate(); // Two more inserts. + + ResultSet check = con.createStatement().executeQuery("SELECT COUNT(*) FROM testsps_multiple"); + assertTrue(check.next()); + assertEquals(4, check.getInt(1)); + } finally { + TestUtil.dropTable(con, "testsps_multiple"); + } + } + + @Test + public void testTypeChange() throws Exception { + PreparedStatement pstmt = con.prepareStatement("SELECT CAST (? AS TEXT)"); + setUseServerPrepare(pstmt, true); + + // Prepare with int parameter. + pstmt.setInt(1, 1); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(!rs.next()); + + // Change to text parameter, check it still works. + pstmt.setString(1, "test string"); + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals("test string", rs.getString(1)); + assertTrue(!rs.next()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SocketTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SocketTimeoutTest.java new file mode 100644 index 0000000..253aae7 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SocketTimeoutTest.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +class SocketTimeoutTest { + + @Test + void socketTimeoutEnforcement() throws Exception { + Properties properties = new Properties(); + PGProperty.SOCKET_TIMEOUT.set(properties, 1); + + Connection conn = TestUtil.openDB(properties); + Statement stmt = null; + try { + stmt = conn.createStatement(); + stmt.execute("SELECT pg_sleep(2)"); + fail("Connection with socketTimeout did not throw expected exception"); + } catch (SQLException e) { + assertTrue(conn.isClosed()); + } finally { + TestUtil.closeQuietly(stmt); + TestUtil.closeDB(conn); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StatementTest.java new file mode 100644 index 0000000..63c8fef --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StatementTest.java @@ -0,0 +1,1167 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.Driver; +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PgStatement; +import org.postgresql.test.TestUtil; +import org.postgresql.test.util.StrangeProxyServer; +import org.postgresql.util.LazyCleaner; +import org.postgresql.util.PSQLState; +import org.postgresql.util.SharedTimer; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +/* +* Test for getObject +*/ +class StatementTest { + private Connection con; + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + TestUtil.createTempTable(con, "test_statement", "i int"); + TestUtil.createTempTable(con, "escapetest", + "ts timestamp, d date, t time, \")\" varchar(5), \"\"\"){a}'\" text "); + TestUtil.createTempTable(con, "comparisontest", "str1 varchar(5), str2 varchar(15)"); + TestUtil.createTable(con, "test_lock", "name text"); + Statement stmt = con.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("comparisontest", "str1,str2", "'_abcd','_found'")); + stmt.executeUpdate(TestUtil.insertSQL("comparisontest", "str1,str2", "'%abcd','%found'")); + stmt.close(); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropTable(con, "test_statement"); + TestUtil.dropTable(con, "escapetest"); + TestUtil.dropTable(con, "comparisontest"); + TestUtil.dropTable(con, "test_lock"); + TestUtil.execute(con, "DROP FUNCTION IF EXISTS notify_loop()"); + TestUtil.execute(con, "DROP FUNCTION IF EXISTS notify_then_sleep()"); + con.close(); + } + + private void assumeLongTest() { + // Run the test: + // Travis: in PG_VERSION=HEAD + // Other: always + if ("true".equals(System.getenv("TRAVIS"))) { + Assumptions.assumeTrue("HEAD".equals(System.getenv("PG_VERSION"))); + } + } + + @Test + void close() throws SQLException { + Statement stmt = con.createStatement(); + stmt.close(); + + try { + stmt.getResultSet(); + fail("statements should not be re-used after close"); + } catch (SQLException ex) { + } + } + + @Test + void resultSetClosed() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select 1"); + stmt.close(); + assertTrue(rs.isClosed()); + } + + /** + * Closing a Statement twice is not an error. + */ + @Test + void doubleClose() throws SQLException { + Statement stmt = con.createStatement(); + stmt.close(); + stmt.close(); + } + + @Test + void multiExecute() throws SQLException { + Statement stmt = con.createStatement(); + assertTrue(stmt.execute("SELECT 1 as a; UPDATE test_statement SET i=1; SELECT 2 as b, 3 as c")); + + ResultSet rs = stmt.getResultSet(); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + rs.close(); + + assertFalse(stmt.getMoreResults()); + assertEquals(0, stmt.getUpdateCount()); + + assertTrue(stmt.getMoreResults()); + rs = stmt.getResultSet(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + + assertFalse(stmt.getMoreResults()); + assertEquals(-1, stmt.getUpdateCount()); + stmt.close(); + } + + @Test + void emptyQuery() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute(""); + assertNull(stmt.getResultSet()); + assertFalse(stmt.getMoreResults()); + } + + @Test + void updateCount() throws SQLException { + Statement stmt = con.createStatement(); + int count; + + count = stmt.executeUpdate("INSERT INTO test_statement VALUES (3)"); + assertEquals(1, count); + count = stmt.executeUpdate("INSERT INTO test_statement VALUES (3)"); + assertEquals(1, count); + + count = stmt.executeUpdate("UPDATE test_statement SET i=4"); + assertEquals(2, count); + + count = stmt.executeUpdate("CREATE TEMP TABLE another_table (a int)"); + assertEquals(0, count); + + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + count = stmt.executeUpdate("CREATE TEMP TABLE yet_another_table AS SELECT x FROM generate_series(1,10) x"); + assertEquals(10, count); + } + } + + @Test + void escapeProcessing() throws SQLException { + Statement stmt = con.createStatement(); + int count; + + count = stmt.executeUpdate("insert into escapetest (ts) values ({ts '1900-01-01 00:00:00'})"); + assertEquals(1, count); + + count = stmt.executeUpdate("insert into escapetest (d) values ({d '1900-01-01'})"); + assertEquals(1, count); + + count = stmt.executeUpdate("insert into escapetest (t) values ({t '00:00:00'})"); + assertEquals(1, count); + + ResultSet rs = stmt.executeQuery("select {fn version()} as version"); + assertTrue(rs.next()); + + // check nested and multiple escaped functions + rs = stmt.executeQuery("select {fn version()} as version, {fn log({fn log(3.0)})} as log"); + assertTrue(rs.next()); + assertEquals(Math.log(Math.log(3)), rs.getDouble(2), 0.00001); + + stmt.executeUpdate("UPDATE escapetest SET \")\" = 'a', \"\"\"){a}'\" = 'b'"); + + // check "difficult" values + rs = stmt.executeQuery("select {fn concat(')',escapetest.\")\")} as concat" + + ", {fn concat('{','}')} " + + ", {fn concat('''','\"')} " + + ", {fn concat(\"\"\"){a}'\", '''}''')} " + + " FROM escapetest"); + assertTrue(rs.next()); + assertEquals(")a", rs.getString(1)); + assertEquals("{}", rs.getString(2)); + assertEquals("'\"", rs.getString(3)); + assertEquals("b'}'", rs.getString(4)); + + count = stmt.executeUpdate("create temp table b (i int)"); + assertEquals(0, count); + + rs = stmt.executeQuery("select * from {oj test_statement a left outer join b on (a.i=b.i)} "); + assertFalse(rs.next()); + // test escape character + rs = stmt + .executeQuery("select str2 from comparisontest where str1 like '|_abcd' {escape '|'} "); + assertTrue(rs.next()); + assertEquals("_found", rs.getString(1)); + rs = stmt + .executeQuery("select str2 from comparisontest where str1 like '|%abcd' {escape '|'} "); + assertTrue(rs.next()); + assertEquals("%found", rs.getString(1)); + } + + @Test + void preparedFunction() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT {fn concat('a', ?)}"); + pstmt.setInt(1, 5); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals("a5", rs.getString(1)); + } + + @Test + void dollarInComment() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT /* $ */ {fn curdate()}"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertNotNull(rs.getString(1), "{fn curdate()} should be not null"); + } + + @Test + void dollarInCommentTwoComments() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT /* $ *//* $ */ {fn curdate()}"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertNotNull(rs.getString(1), "{fn curdate()} should be not null"); + } + + @Test + void numericFunctions() throws SQLException { + Statement stmt = con.createStatement(); + + ResultSet rs = stmt.executeQuery("select {fn abs(-2.3)} as abs "); + assertTrue(rs.next()); + assertEquals(2.3f, rs.getFloat(1), 0.00001); + + rs = stmt.executeQuery("select {fn acos(-0.6)} as acos "); + assertTrue(rs.next()); + assertEquals(Math.acos(-0.6), rs.getDouble(1), 0.00001); + + rs = stmt.executeQuery("select {fn asin(-0.6)} as asin "); + assertTrue(rs.next()); + assertEquals(Math.asin(-0.6), rs.getDouble(1), 0.00001); + + rs = stmt.executeQuery("select {fn atan(-0.6)} as atan "); + assertTrue(rs.next()); + assertEquals(Math.atan(-0.6), rs.getDouble(1), 0.00001); + + rs = stmt.executeQuery("select {fn atan2(-2.3,7)} as atan2 "); + assertTrue(rs.next()); + assertEquals(Math.atan2(-2.3, 7), rs.getDouble(1), 0.00001); + + rs = stmt.executeQuery("select {fn ceiling(-2.3)} as ceiling "); + assertTrue(rs.next()); + assertEquals(-2, rs.getDouble(1), 0.00001); + + rs = stmt.executeQuery("select {fn cos(-2.3)} as cos, {fn cot(-2.3)} as cot "); + assertTrue(rs.next()); + assertEquals(Math.cos(-2.3), rs.getDouble(1), 0.00001); + assertEquals(1 / Math.tan(-2.3), rs.getDouble(2), 0.00001); + + rs = stmt.executeQuery("select {fn degrees({fn pi()})} as degrees "); + assertTrue(rs.next()); + assertEquals(180, rs.getDouble(1), 0.00001); + + rs = stmt.executeQuery("select {fn exp(-2.3)}, {fn floor(-2.3)}," + + " {fn log(2.3)},{fn log10(2.3)},{fn mod(3,2)}"); + assertTrue(rs.next()); + assertEquals(Math.exp(-2.3), rs.getDouble(1), 0.00001); + assertEquals(-3, rs.getDouble(2), 0.00001); + assertEquals(Math.log(2.3), rs.getDouble(3), 0.00001); + assertEquals(Math.log(2.3) / Math.log(10), rs.getDouble(4), 0.00001); + assertEquals(1, rs.getDouble(5), 0.00001); + + rs = stmt.executeQuery("select {fn pi()}, {fn power(7,-2.3)}," + + " {fn radians(-180)},{fn round(3.1294,2)}"); + assertTrue(rs.next()); + assertEquals(Math.PI, rs.getDouble(1), 0.00001); + assertEquals(Math.pow(7, -2.3), rs.getDouble(2), 0.00001); + assertEquals(-Math.PI, rs.getDouble(3), 0.00001); + assertEquals(3.13, rs.getDouble(4), 0.00001); + + rs = stmt.executeQuery("select {fn sign(-2.3)}, {fn sin(-2.3)}," + + " {fn sqrt(2.3)},{fn tan(-2.3)},{fn truncate(3.1294,2)}"); + assertTrue(rs.next()); + assertEquals(-1, rs.getInt(1)); + assertEquals(Math.sin(-2.3), rs.getDouble(2), 0.00001); + assertEquals(Math.sqrt(2.3), rs.getDouble(3), 0.00001); + assertEquals(Math.tan(-2.3), rs.getDouble(4), 0.00001); + assertEquals(3.12, rs.getDouble(5), 0.00001); + } + + @Test + void stringFunctions() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery( + "select {fn ascii(' test')},{fn char(32)}" + + ",{fn concat('ab','cd')}" + + ",{fn lcase('aBcD')},{fn left('1234',2)},{fn length('123 ')}" + + ",{fn locate('bc','abc')},{fn locate('bc','abc',3)}"); + assertTrue(rs.next()); + assertEquals(32, rs.getInt(1)); + assertEquals(" ", rs.getString(2)); + assertEquals("abcd", rs.getString(3)); + assertEquals("abcd", rs.getString(4)); + assertEquals("12", rs.getString(5)); + assertEquals(3, rs.getInt(6)); + assertEquals(2, rs.getInt(7)); + assertEquals(0, rs.getInt(8)); + + rs = stmt.executeQuery( + "SELECT {fn insert('abcdef',3,2,'xxxx')}" + + ",{fn replace('abcdbc','bc','x')}"); + assertTrue(rs.next()); + assertEquals("abxxxxef", rs.getString(1)); + assertEquals("axdx", rs.getString(2)); + + rs = stmt.executeQuery( + "select {fn ltrim(' ab')},{fn repeat('ab',2)}" + + ",{fn right('abcde',2)},{fn rtrim('ab ')}" + + ",{fn space(3)},{fn substring('abcd',2,2)}" + + ",{fn ucase('aBcD')}"); + assertTrue(rs.next()); + assertEquals("ab", rs.getString(1)); + assertEquals("abab", rs.getString(2)); + assertEquals("de", rs.getString(3)); + assertEquals("ab", rs.getString(4)); + assertEquals(" ", rs.getString(5)); + assertEquals("bc", rs.getString(6)); + assertEquals("ABCD", rs.getString(7)); + } + + @Test + void dateFuncWithParam() throws SQLException { + // Prior to 8.0 there is not an interval + timestamp operator, + // so timestampadd does not work. + // + + PreparedStatement ps = con.prepareStatement( + "SELECT {fn timestampadd(SQL_TSI_QUARTER, ? ,{fn now()})}, {fn timestampadd(SQL_TSI_MONTH, ?, {fn now()})} "); + ps.setInt(1, 4); + ps.setInt(2, 12); + ResultSet rs = ps.executeQuery(); + assertTrue(rs.next()); + assertEquals(rs.getTimestamp(1), rs.getTimestamp(2)); + } + + @Test + void dateFunctions() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select {fn curdate()},{fn curtime()}" + + ",{fn dayname({fn now()})}, {fn dayofmonth({fn now()})}" + + ",{fn dayofweek({ts '2005-01-17 12:00:00'})},{fn dayofyear({fn now()})}" + + ",{fn hour({fn now()})},{fn minute({fn now()})}" + + ",{fn month({fn now()})}" + + ",{fn monthname({fn now()})},{fn quarter({fn now()})}" + + ",{fn second({fn now()})},{fn week({fn now()})}" + + ",{fn year({fn now()})} "); + assertTrue(rs.next()); + // ensure sunday =>1 and monday =>2 + assertEquals(2, rs.getInt(5)); + + // Prior to 8.0 there is not an interval + timestamp operator, + // so timestampadd does not work. + // + + // second + rs = stmt.executeQuery( + "select {fn timestampdiff(SQL_TSI_SECOND,{fn now()},{fn timestampadd(SQL_TSI_SECOND,3,{fn now()})})} "); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + // MINUTE + rs = stmt.executeQuery( + "select {fn timestampdiff(SQL_TSI_MINUTE,{fn now()},{fn timestampadd(SQL_TSI_MINUTE,3,{fn now()})})} "); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + // HOUR + rs = stmt.executeQuery( + "select {fn timestampdiff(SQL_tsi_HOUR,{fn now()},{fn timestampadd(SQL_TSI_HOUR,3,{fn now()})})} "); + assertTrue(rs.next()); + assertEquals(3, rs.getInt(1)); + // day + rs = stmt.executeQuery( + "select {fn timestampdiff(SQL_TSI_DAY,{fn now()},{fn timestampadd(SQL_TSI_DAY,-3,{fn now()})})} "); + assertTrue(rs.next()); + int res = rs.getInt(1); + if (res != -3 && res != -2) { + // set TimeZone='America/New_York'; + // select CAST(-3 || ' day' as interval); + // interval + //---------- + // -3 days + // + // select CAST(-3 || ' day' as interval)+now(); + // ?column? + //------------------------------- + // 2018-03-08 07:59:13.586895-05 + // + // select CAST(-3 || ' day' as interval)+now()-now(); + // ?column? + //------------------- + // -2 days -23:00:00 + fail("CAST(-3 || ' day' as interval)+now()-now() is expected to return -3 or -2. Actual value is " + res); + } + // WEEK => extract week from interval is not supported by backend + // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_WEEK,{fn now()},{fn + // timestampadd(SQL_TSI_WEEK,3,{fn now()})})} "); + // assertTrue(rs.next()); + // assertEquals(3,rs.getInt(1)); + // MONTH => backend assume there are 0 month in an interval of 92 days... + // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_MONTH,{fn now()},{fn + // timestampadd(SQL_TSI_MONTH,3,{fn now()})})} "); + // assertTrue(rs.next()); + // assertEquals(3,rs.getInt(1)); + // QUARTER => backend assume there are 1 quarter even in 270 days... + // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_QUARTER,{fn now()},{fn + // timestampadd(SQL_TSI_QUARTER,3,{fn now()})})} "); + // assertTrue(rs.next()); + // assertEquals(3,rs.getInt(1)); + // YEAR + // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_YEAR,{fn now()},{fn + // timestampadd(SQL_TSI_YEAR,3,{fn now()})})} "); + // assertTrue(rs.next()); + // assertEquals(3,rs.getInt(1)); + } + + @Test + void systemFunctions() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery( + "select {fn ifnull(null,'2')}" + + ",{fn user()} "); + assertTrue(rs.next()); + assertEquals("2", rs.getString(1)); + assertEquals(TestUtil.getUser(), rs.getString(2)); + + rs = stmt.executeQuery("select {fn database()} "); + assertTrue(rs.next()); + assertEquals(TestUtil.getDatabase(), rs.getString(1)); + } + + @Test + void warningsAreCleared() throws SQLException { + Statement stmt = con.createStatement(); + // Will generate a NOTICE: for primary key index creation + stmt.execute("CREATE TEMP TABLE unused (a int primary key)"); + stmt.executeQuery("SELECT 1"); + // Executing another query should clear the warning from the first one. + assertNull(stmt.getWarnings()); + stmt.close(); + } + + @Test + void warningsAreAvailableAsap() + throws Exception { + try (Connection outerLockCon = TestUtil.openDB()) { + outerLockCon.setAutoCommit(false); + //Acquire an exclusive lock so we can block the notice generating statement + outerLockCon.createStatement().execute("LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;"); + con.createStatement() + .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS " + + "$BODY$ " + + "BEGIN " + + "RAISE NOTICE 'Test 1'; " + + "RAISE NOTICE 'Test 2'; " + + "LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE; " + + "END " + + "$BODY$ " + + "LANGUAGE plpgsql;"); + con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'"); + //If we never receive the two warnings the statement will just hang, so set a low timeout + con.createStatement().execute("SET SESSION statement_timeout = 1000"); + final PreparedStatement preparedStatement = con.prepareStatement("SELECT notify_then_sleep()"); + final Callable warningReader = new Callable() { + @Override + public Void call() throws SQLException, InterruptedException { + while (true) { + SQLWarning warning = preparedStatement.getWarnings(); + if (warning != null) { + assertEquals("Test 1", warning.getMessage(), "First warning received not first notice raised"); + SQLWarning next = warning.getNextWarning(); + if (next != null) { + assertEquals("Test 2", next.getMessage(), "Second warning received not second notice raised"); + //Release the lock so that the notice generating statement can end. + outerLockCon.commit(); + return null; + } + } + //Break the loop on InterruptedException + Thread.sleep(0); + } + } + }; + ExecutorService executorService = Executors.newSingleThreadExecutor(); + try { + Future future = executorService.submit(warningReader); + //Statement should only finish executing once we have + //received the two notices and released the outer lock. + preparedStatement.execute(); + + //If test takes longer than 2 seconds its a failure. + future.get(2, TimeUnit.SECONDS); + } finally { + executorService.shutdownNow(); + } + } + } + + /** + *

Demonstrates a safe approach to concurrently reading the latest + * warnings while periodically clearing them.

+ * + *

One drawback of this approach is that it requires the reader to make it to the end of the + * warning chain before clearing it, so long as your warning processing step is not very slow, + * this should happen more or less instantaneously even if you receive a lot of warnings.

+ */ + @Test + void concurrentWarningReadAndClear() + throws SQLException, InterruptedException, ExecutionException, TimeoutException { + final int iterations = 1000; + con.createStatement() + .execute("CREATE OR REPLACE FUNCTION notify_loop() RETURNS VOID AS " + + "$BODY$ " + + "BEGIN " + + "FOR i IN 1.. " + iterations + " LOOP " + + " RAISE NOTICE 'Warning %', i; " + + "END LOOP; " + + "END " + + "$BODY$ " + + "LANGUAGE plpgsql;"); + con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'"); + final PreparedStatement statement = con.prepareStatement("SELECT notify_loop()"); + final Callable warningReader = new Callable() { + @Override + public Void call() throws SQLException, InterruptedException { + SQLWarning lastProcessed = null; + int warnings = 0; + //For production code replace this with some condition that + //ends after the statement finishes execution + while (warnings < iterations) { + SQLWarning warn = statement.getWarnings(); + //if next linked warning has value use that, otherwise keep using latest head + if (lastProcessed != null && lastProcessed.getNextWarning() != null) { + warn = lastProcessed.getNextWarning(); + } + if (warn != null) { + warnings++; + //System.out.println("Processing " + warn.getMessage()); + assertEquals("Warning " + warnings, warn.getMessage(), "Received warning out of expected order"); + lastProcessed = warn; + //If the processed warning was the head of the chain clear + if (warn == statement.getWarnings()) { + //System.out.println("Clearing warnings"); + statement.clearWarnings(); + } + } else { + //Not required for this test, but a good idea adding some delay for production code + //to avoid high cpu usage while the query is running and no warnings are coming in. + //Alternatively use JDK9's Thread.onSpinWait() + Thread.sleep(10); + } + } + assertEquals("Warning " + iterations, lastProcessed.getMessage(), "Didn't receive expected last warning"); + return null; + } + }; + + final ExecutorService executor = Executors.newSingleThreadExecutor(); + try { + final Future warningReaderThread = executor.submit(warningReader); + statement.execute(); + //If the reader doesn't return after 2 seconds, it failed. + warningReaderThread.get(2, TimeUnit.SECONDS); + } finally { + executor.shutdownNow(); + } + } + + /** + * The parser tries to break multiple statements into individual queries as required by the V3 + * extended query protocol. It can be a little overzealous sometimes and this test ensures we keep + * multiple rule actions together in one statement. + */ + @Test + void parsingSemiColons() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute( + "CREATE RULE r1 AS ON INSERT TO escapetest DO (DELETE FROM test_statement ; INSERT INTO test_statement VALUES (1); INSERT INTO test_statement VALUES (2); );"); + stmt.executeUpdate("INSERT INTO escapetest(ts) VALUES (NULL)"); + ResultSet rs = stmt.executeQuery("SELECT i from test_statement ORDER BY i"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + } + + @Test + void parsingDollarQuotes() throws SQLException { + // dollar-quotes are supported in the backend since version 8.0 + Statement st = con.createStatement(); + ResultSet rs; + + rs = st.executeQuery("SELECT '$a$ ; $a$'"); + assertTrue(rs.next()); + assertEquals("$a$ ; $a$", rs.getObject(1)); + rs.close(); + + rs = st.executeQuery("SELECT $$;$$"); + assertTrue(rs.next()); + assertEquals(";", rs.getObject(1)); + rs.close(); + + rs = st.executeQuery("SELECT $OR$$a$'$b$a$$OR$ WHERE '$a$''$b$a$'=$OR$$a$'$b$a$$OR$OR ';'=''"); + assertTrue(rs.next()); + assertEquals("$a$'$b$a$", rs.getObject(1)); + assertFalse(rs.next()); + rs.close(); + + rs = st.executeQuery("SELECT $B$;$b$B$"); + assertTrue(rs.next()); + assertEquals(";$b", rs.getObject(1)); + rs.close(); + + rs = st.executeQuery("SELECT $c$c$;$c$"); + assertTrue(rs.next()); + assertEquals("c$;", rs.getObject(1)); + rs.close(); + + rs = st.executeQuery("SELECT $A0$;$A0$ WHERE ''=$t$t$t$ OR ';$t$'=';$t$'"); + assertTrue(rs.next()); + assertEquals(";", rs.getObject(1)); + assertFalse(rs.next()); + rs.close(); + + st.executeQuery("SELECT /* */$$;$$/**//*;*/").close(); + st.executeQuery("SELECT /* */--;\n$$a$$/**/--\n--;\n").close(); + + st.close(); + } + + @Test + void unbalancedParensParseError() throws SQLException { + Statement stmt = con.createStatement(); + try { + stmt.executeQuery("SELECT i FROM test_statement WHERE (1 > 0)) ORDER BY i"); + fail("Should have thrown a parse error."); + } catch (SQLException sqle) { + } + } + + @Test + void executeUpdateFailsOnSelect() throws SQLException { + Statement stmt = con.createStatement(); + try { + stmt.executeUpdate("SELECT 1"); + fail("Should have thrown an error."); + } catch (SQLException sqle) { + } + } + + @Test + void executeUpdateFailsOnMultiStatementSelect() throws SQLException { + Statement stmt = con.createStatement(); + try { + stmt.executeUpdate("/* */; SELECT 1"); + fail("Should have thrown an error."); + } catch (SQLException sqle) { + } + } + + @Test + void setQueryTimeout() throws SQLException { + Statement stmt = con.createStatement(); + long start = 0; + boolean cancelReceived = false; + try { + stmt.setQueryTimeout(1); + start = System.nanoTime(); + stmt.execute("select pg_sleep(10)"); + } catch (SQLException sqle) { + // state for cancel + if ("57014".equals(sqle.getSQLState())) { + cancelReceived = true; + } + } + long duration = System.nanoTime() - start; + if (!cancelReceived || duration > TimeUnit.SECONDS.toNanos(5)) { + fail("Query should have been cancelled since the timeout was set to 1 sec." + + " Cancel state: " + cancelReceived + ", duration: " + duration); + } + } + + @Test + void longQueryTimeout() throws SQLException { + Statement stmt = con.createStatement(); + stmt.setQueryTimeout(Integer.MAX_VALUE); + assertEquals(Integer.MAX_VALUE, + stmt.getQueryTimeout(), + "setQueryTimeout(Integer.MAX_VALUE)"); + stmt.setQueryTimeout(Integer.MAX_VALUE - 1); + assertEquals(Integer.MAX_VALUE - 1, + stmt.getQueryTimeout(), + "setQueryTimeout(Integer.MAX_VALUE-1)"); + } + + /** + * Test executes two queries one after another. The first one has timeout of 1ms, and the second + * one does not. The timeout of the first query should not impact the second one. + */ + @Test + void shortQueryTimeout() throws SQLException { + assumeLongTest(); + + long deadLine = System.nanoTime() + TimeUnit.SECONDS.toNanos(10); + Statement stmt = con.createStatement(); + ((PgStatement) stmt).setQueryTimeoutMs(1); + Statement stmt2 = con.createStatement(); + while (System.nanoTime() < deadLine) { + try { + // This usually won't time out but scheduler jitter, server load + // etc can cause a timeout. + stmt.executeQuery("select 1;"); + } catch (SQLException e) { + // Expect "57014 query_canceled" (en-msg is "canceling statement due to statement timeout") + // but anything else is fatal. We can't differentiate other causes of statement cancel like + // "canceling statement due to user request" without error message matching though, and we + // don't want to do that. + assertEquals( + PSQLState.QUERY_CANCELED.getState(), + e.getSQLState(), + "Query is expected to be cancelled via st.close(), got " + e.getMessage()); + } + // Must never time out. + stmt2.executeQuery("select 1;"); + } + } + + @Test + void setQueryTimeoutWithSleep() throws SQLException, InterruptedException { + // check that the timeout starts ticking at execute, not at the + // setQueryTimeout call. + Statement stmt = con.createStatement(); + try { + stmt.setQueryTimeout(1); + Thread.sleep(3000); + stmt.execute("select pg_sleep(5)"); + fail("statement should have been canceled by query timeout"); + } catch (SQLException sqle) { + // state for cancel + if (sqle.getSQLState().compareTo("57014") != 0) { + throw sqle; + } + } + } + + @Test + void setQueryTimeoutOnPrepared() throws SQLException, InterruptedException { + // check that a timeout set on a prepared statement works on every + // execution. + PreparedStatement pstmt = con.prepareStatement("select pg_sleep(5)"); + pstmt.setQueryTimeout(1); + for (int i = 1; i <= 3; i++) { + try { + ResultSet rs = pstmt.executeQuery(); + fail("statement should have been canceled by query timeout (execution #" + i + ")"); + } catch (SQLException sqle) { + // state for cancel + if (sqle.getSQLState().compareTo("57014") != 0) { + throw sqle; + } + } + } + } + + @Test + void setQueryTimeoutWithoutExecute() throws SQLException, InterruptedException { + // check that a timeout set on one statement doesn't affect another + Statement stmt1 = con.createStatement(); + stmt1.setQueryTimeout(1); + + Statement stmt2 = con.createStatement(); + ResultSet rs = stmt2.executeQuery("SELECT pg_sleep(2)"); + } + + @Test + void resultSetTwice() throws SQLException { + Statement stmt = con.createStatement(); + + ResultSet rs = stmt.executeQuery("select {fn abs(-2.3)} as abs "); + assertNotNull(rs); + + ResultSet rsOther = stmt.getResultSet(); + assertNotNull(rsOther); + } + + @Test + void multipleCancels() throws Exception { + SharedTimer sharedTimer = Driver.getSharedTimer(); + + Connection connA = null; + Connection connB = null; + Statement stmtA = null; + Statement stmtB = null; + ResultSet rsA = null; + ResultSet rsB = null; + try { + assertEquals(0, sharedTimer.getRefCount()); + connA = TestUtil.openDB(); + connB = TestUtil.openDB(); + stmtA = connA.createStatement(); + stmtB = connB.createStatement(); + stmtA.setQueryTimeout(1); + stmtB.setQueryTimeout(1); + try { + rsA = stmtA.executeQuery("SELECT pg_sleep(2)"); + } catch (SQLException e) { + // ignore the expected timeout + } + assertEquals(1, sharedTimer.getRefCount()); + try { + rsB = stmtB.executeQuery("SELECT pg_sleep(2)"); + } catch (SQLException e) { + // ignore the expected timeout + } + } finally { + TestUtil.closeQuietly(rsA); + TestUtil.closeQuietly(rsB); + TestUtil.closeQuietly(stmtA); + TestUtil.closeQuietly(stmtB); + TestUtil.closeQuietly(connA); + TestUtil.closeQuietly(connB); + } + assertEquals(0, sharedTimer.getRefCount()); + } + + @Test + @Timeout(30) + void cancelQueryWithBrokenNetwork() throws SQLException, IOException, InterruptedException { + // check that stmt.cancel() doesn't hang forever if the network is broken + + ExecutorService executor = Executors.newCachedThreadPool(); + + try (StrangeProxyServer proxyServer = new StrangeProxyServer(TestUtil.getServer(), TestUtil.getPort())) { + Properties props = new Properties(); + props.setProperty(TestUtil.SERVER_HOST_PORT_PROP, String.format("%s:%s", "localhost", proxyServer.getServerPort())); + PGProperty.CANCEL_SIGNAL_TIMEOUT.set(props, 1); + + try (Connection conn = TestUtil.openDB(props); Statement stmt = conn.createStatement()) { + executor.submit(() -> stmt.execute("select pg_sleep(60)")); + + Thread.sleep(1000); + proxyServer.stopForwardingAllClients(); + + stmt.cancel(); + // Note: network is still inaccessible, so the statement execution is still in progress. + // So we abort the connection to allow implicit conn.close() + conn.abort(executor); + } + } + + executor.shutdownNow(); + } + + @Test + @Timeout(10) + void closeInProgressStatement() throws Exception { + ExecutorService executor = Executors.newSingleThreadExecutor(); + final Connection outerLockCon = TestUtil.openDB(); + outerLockCon.setAutoCommit(false); + //Acquire an exclusive lock so we can block the notice generating statement + outerLockCon.createStatement().execute("LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;"); + + try { + con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'"); + con.createStatement() + .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS " + + "$BODY$ " + + "BEGIN " + + "RAISE NOTICE 'start';" + + "LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;" + + "END " + + "$BODY$ " + + "LANGUAGE plpgsql;"); + int cancels = 0; + for (int i = 0; i < 100; i++) { + final Statement st = con.createStatement(); + executor.submit(new Callable() { + @Override + public Void call() throws Exception { + long start = System.nanoTime(); + while (st.getWarnings() == null) { + long dt = System.nanoTime() - start; + if (dt > TimeUnit.SECONDS.toNanos(10)) { + throw new IllegalStateException("Expected to receive a notice within 10 seconds"); + } + } + st.close(); + return null; + } + }); + st.setQueryTimeout(120); + try { + st.execute("select notify_then_sleep()"); + } catch (SQLException e) { + assertEquals( + PSQLState.QUERY_CANCELED.getState(), + e.getSQLState(), + "Query is expected to be cancelled via st.close(), got " + e.getMessage() + ); + cancels++; + break; + } finally { + TestUtil.closeQuietly(st); + } + } + assertNotEquals(0, cancels, "At least one QUERY_CANCELED state is expected"); + } finally { + executor.shutdown(); + TestUtil.closeQuietly(outerLockCon); + } + } + + @Test + @Timeout(10) + void concurrentIsValid() throws Throwable { + ExecutorService executor = Executors.newCachedThreadPool(); + try { + List> results = new ArrayList<>(); + Random rnd = new Random(); + for (int i = 0; i < 10; i++) { + Future future = executor.submit(() -> { + try { + for (int j = 0; j < 50; j++) { + con.isValid(2); + try (PreparedStatement ps = + con.prepareStatement("select * from generate_series(1,?) as x(id)")) { + int limit = rnd.nextInt(10); + ps.setInt(1, limit); + try (ResultSet r = ps.executeQuery()) { + int cnt = 0; + String callName = "generate_series(1, " + limit + ") in thread " + + Thread.currentThread().getName(); + while (r.next()) { + cnt++; + assertEquals(cnt, r.getInt(1), callName + ", row " + cnt); + } + assertEquals(limit, cnt, callName + " number of rows"); + } + } + } + } catch (SQLException e) { + throw new RuntimeException(e); + } + }); + results.add(future); + } + for (Future result : results) { + // Propagate exception if any + result.get(); + } + } catch (ExecutionException e) { + throw e.getCause(); + } finally { + executor.shutdown(); + executor.awaitTermination(10, TimeUnit.SECONDS); + } + } + + @Test + @Timeout(20) + void fastCloses() throws SQLException { + ExecutorService executor = Executors.newSingleThreadExecutor(); + con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'"); + con.createStatement() + .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS " + + "$BODY$ " + + "BEGIN " + + "RAISE NOTICE 'start';" + + "EXECUTE pg_sleep(1);" // Note: timeout value does not matter here, we just test if test crashes or locks somehow + + "END " + + "$BODY$ " + + "LANGUAGE plpgsql;"); + Map cnt = new HashMap<>(); + final Random rnd = new Random(); + for (int i = 0; i < 1000; i++) { + final Statement st = con.createStatement(); + executor.submit(new Callable() { + @Override + public Void call() throws Exception { + int s = rnd.nextInt(10); + if (s > 8) { + try { + Thread.sleep(s - 9); + } catch (InterruptedException ex) { + // don't execute the close here as this thread was cancelled below in shutdownNow + return null; + } + } + st.close(); + return null; + } + }); + ResultSet rs = null; + String sqlState = "0"; + try { + rs = st.executeQuery("select 1"); + // Acceptable + } catch (SQLException e) { + sqlState = e.getSQLState(); + if (!PSQLState.OBJECT_NOT_IN_STATE.getState().equals(sqlState) + && !PSQLState.QUERY_CANCELED.getState().equals(sqlState)) { + assertEquals( + PSQLState.QUERY_CANCELED.getState(), + e.getSQLState(), + "Query is expected to be cancelled via st.close(), got " + e.getMessage() + ); + } + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(st); + } + Integer val = cnt.get(sqlState); + val = (val == null ? 0 : val) + 1; + cnt.put(sqlState, val); + } + System.out.println("[testFastCloses] total counts for each sql state: " + cnt); + executor.shutdown(); + } + + /** + * Tests that calling {@code java.sql.Statement#close()} from a concurrent thread does not result + * in {@link java.util.ConcurrentModificationException}. + */ + @Test + void sideStatementFinalizers() throws SQLException { + long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(2); + + final AtomicInteger leaks = new AtomicInteger(); + final AtomicReference cleanupFailure = new AtomicReference<>(); + // Create several cleaners, so they can clean leaks concurrently + List cleaners = new ArrayList<>(); + for (int i = 0; i < 16; i++) { + cleaners.add(new LazyCleaner(Duration.ofSeconds(2), "pgjdbc-test-cleaner-" + i)); + } + + for (int q = 0; System.nanoTime() < deadline || leaks.get() < 10000; q++) { + for (int i = 0; i < 100; i++) { + PreparedStatement ps = con.prepareStatement("select " + (i + q)); + ps.close(); + } + final int nextId = q; + int cleanerId = ThreadLocalRandom.current().nextInt(cleaners.size()); + PreparedStatement ps = con.prepareStatement("select /*leak*/ " + nextId); + cleaners.get(cleanerId).register(new Object(), leak -> { + try { + ps.close(); + } catch (Throwable t) { + cleanupFailure.compareAndSet(null, t); + } + leaks.incrementAndGet(); + }); + } + if (cleanupFailure.get() != null) { + throw new IllegalStateException("Detected failure in cleanup thread", cleanupFailure.get()); + } + } + + /** + * Test that $JAVASCRIPT$ protects curly braces from JDBC {fn now()} kind of syntax. + * @throws SQLException if something goes wrong + */ + @Test + void javaScriptFunction() throws SQLException { + String str = " var _modules = {};\n" + + " var _current_stack = [];\n" + + "\n" + + " // modules start\n" + + " _modules[\"/root/aidbox/fhirbase/src/core\"] = {\n" + + " init: function(){\n" + + " var exports = {};\n" + + " _current_stack.push({file: \"core\", dir: \"/root/aidbox/fhirbase/src\"})\n" + + " var module = {exports: exports};"; + + PreparedStatement ps = null; + try { + ps = con.prepareStatement("select $JAVASCRIPT$" + str + "$JAVASCRIPT$"); + ResultSet rs = ps.executeQuery(); + rs.next(); + assertEquals(str, rs.getString(1), "JavaScript code has been protected with $JAVASCRIPT$"); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + void unterminatedDollarQuotes() throws SQLException { + ensureSyntaxException("dollar quotes", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS $$\n" + + "BEGIN"); + } + + @Test + void unterminatedNamedDollarQuotes() throws SQLException { + ensureSyntaxException("dollar quotes", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS $ABC$\n" + + "BEGIN"); + } + + @Test + void unterminatedComment() throws SQLException { + ensureSyntaxException("block comment", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS /* $$\n" + + "BEGIN $$"); + } + + @Test + void unterminatedLiteral() throws SQLException { + ensureSyntaxException("string literal", "CREATE OR REPLACE FUNCTION update_on_change() 'RETURNS TRIGGER AS $$\n" + + "BEGIN $$"); + } + + @Test + void unterminatedIdentifier() throws SQLException { + ensureSyntaxException("string literal", "CREATE OR REPLACE FUNCTION \"update_on_change() RETURNS TRIGGER AS $$\n" + + "BEGIN $$"); + } + + private void ensureSyntaxException(String errorType, String sql) throws SQLException { + PreparedStatement ps = null; + try { + ps = con.prepareStatement(sql); + ps.executeUpdate(); + fail("Query with unterminated " + errorType + " should fail"); + } catch (SQLException e) { + assertEquals(PSQLState.SYNTAX_ERROR.getState(), e.getSQLState(), "Query should fail with unterminated " + errorType); + } finally { + TestUtil.closeQuietly(ps); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StringTypeUnspecifiedArrayTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StringTypeUnspecifiedArrayTest.java new file mode 100644 index 0000000..18942e6 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StringTypeUnspecifiedArrayTest.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.PGProperty; +import org.postgresql.geometric.PGbox; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Array; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class StringTypeUnspecifiedArrayTest extends BaseTest4 { + public StringTypeUnspecifiedArrayTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + protected void updateProperties(Properties props) { + PGProperty.STRING_TYPE.set(props, "unspecified"); + super.updateProperties(props); + } + + @Test + public void testCreateArrayWithNonCachedType() throws Exception { + PGbox[] in = new PGbox[0]; + Array a = con.createArrayOf("box", in); + Assert.assertEquals(1111, a.getBaseType()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TestACL.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TestACL.java new file mode 100644 index 0000000..ebf9cf3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TestACL.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.jdbc.PgConnection; +import org.postgresql.jdbc.PgDatabaseMetaData; + +import org.junit.jupiter.api.Test; + +class TestACL { + + @Test + void parseACL() { + PgConnection pgConnection = null; + PgDatabaseMetaData a = new PgDatabaseMetaData(pgConnection) { + }; + a.parseACL("{jurka=arwdRxt/jurka,permuser=rw*/jurka}", "jurka"); + a.parseACL("{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka}", "jurka"); + a.parseACL("{=,jurka=arwdRxt,permuser=rw}", "jurka"); + a.parseACL("{jurka=arwdRxt/jurka,permuser=rw*/jurka,grantuser=w/permuser}", "jurka"); + a.parseACL("{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka,grantuser=w/permuser}", "jurka"); + a.parseACL( + "{jurka=arwdRxt/jurka,permuser=rw*/jurka,grantuser=w/permuser,\"group permgroup=a/jurka\"}", + "jurka"); + a.parseACL( + "{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka,grantuser=w/permuser,\"group permgroup=a/jurka\"}", + "jurka"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimeTest.java new file mode 100644 index 0000000..04b900f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimeTest.java @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Calendar; +import java.util.TimeZone; + +/* +* Some simple tests based on problems reported by users. Hopefully these will help prevent previous +* problems from re-occurring ;-) +* +*/ +class TimeTest { + private Connection con; + private boolean testSetTime; + + @BeforeEach + void setUp() throws Exception { + con = TestUtil.openDB(); + TestUtil.createTempTable(con, "testtime", "tm time, tz time with time zone"); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropTable(con, "testtime"); + TestUtil.closeDB(con); + } + + private long extractMillis(long time) { + return time >= 0 ? (time % 1000) : (time % 1000 + 1000); + } + + /* + * + * Test use of calendar + */ + @Test + void getTimeZone() throws Exception { + final Time midnight = new Time(0, 0, 0); + Statement stmt = con.createStatement(); + Calendar cal = Calendar.getInstance(); + + cal.setTimeZone(TimeZone.getTimeZone("GMT")); + + int localOffset = Calendar.getInstance().getTimeZone().getOffset(midnight.getTime()); + + // set the time to midnight to make this easy + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'00:00:00','00:00:00'"))); + assertEquals(1, + stmt.executeUpdate(TestUtil.insertSQL("testtime", "'00:00:00.1','00:00:00.01'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", + "CAST(CAST(now() AS timestamp without time zone) AS time),now()"))); + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("testtime", "tm,tz")); + assertNotNull(rs); + assertTrue(rs.next()); + + Time time = rs.getTime(1); + Timestamp timestamp = rs.getTimestamp(1); + assertNotNull(timestamp); + + Timestamp timestamptz = rs.getTimestamp(2); + assertNotNull(timestamptz); + + assertEquals(midnight, time); + + time = rs.getTime(1, cal); + assertEquals(midnight.getTime(), time.getTime() - localOffset); + + assertTrue(rs.next()); + + time = rs.getTime(1); + assertNotNull(time); + assertEquals(100, extractMillis(time.getTime())); + timestamp = rs.getTimestamp(1); + assertNotNull(timestamp); + + assertEquals(100, extractMillis(timestamp.getTime())); + + assertEquals(100000000, timestamp.getNanos()); + + Time timetz = rs.getTime(2); + assertNotNull(timetz); + assertEquals(10, extractMillis(timetz.getTime())); + timestamptz = rs.getTimestamp(2); + assertNotNull(timestamptz); + assertEquals(10, extractMillis(timestamptz.getTime())); + + assertEquals(10000000, timestamptz.getNanos()); + + assertTrue(rs.next()); + + time = rs.getTime(1); + assertNotNull(time); + timestamp = rs.getTimestamp(1); + assertNotNull(timestamp); + + timetz = rs.getTime(2); + assertNotNull(timetz); + timestamptz = rs.getTimestamp(2); + assertNotNull(timestamptz); + } + + /* + * Tests the time methods in ResultSet + */ + @Test + void getTime() throws SQLException { + Statement stmt = con.createStatement(); + + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'01:02:03'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'23:59:59'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'12:00:00'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'05:15:21'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'16:21:51'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'12:15:12'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'22:12:01'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'08:46:44'"))); + + // Fall through helper + timeTest(); + + assertEquals(8, stmt.executeUpdate("DELETE FROM testtime")); + stmt.close(); + } + + /* + * Tests the time methods in PreparedStatement + */ + @Test + void setTime() throws SQLException { + PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("testtime", "?")); + Statement stmt = con.createStatement(); + + ps.setTime(1, makeTime(1, 2, 3)); + assertEquals(1, ps.executeUpdate()); + + ps.setTime(1, makeTime(23, 59, 59)); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, Time.valueOf("12:00:00"), Types.TIME); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, Time.valueOf("05:15:21"), Types.TIME); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, Time.valueOf("16:21:51"), Types.TIME); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, Time.valueOf("12:15:12"), Types.TIME); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "22:12:1", Types.TIME); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "8:46:44", Types.TIME); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "5:1:2-03", Types.TIME); + assertEquals(1, ps.executeUpdate()); + + ps.setObject(1, "23:59:59+11", Types.TIME); + assertEquals(1, ps.executeUpdate()); + + // Need to let the test know this one has extra test cases. + testSetTime = true; + // Fall through helper + timeTest(); + testSetTime = false; + + assertEquals(10, stmt.executeUpdate("DELETE FROM testtime")); + stmt.close(); + ps.close(); + } + + /* + * Helper for the TimeTests. It tests what should be in the db + */ + private void timeTest() throws SQLException { + Statement st = con.createStatement(); + ResultSet rs; + Time t; + + rs = st.executeQuery(TestUtil.selectSQL("testtime", "tm")); + assertNotNull(rs); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(1, 2, 3), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(23, 59, 59), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(12, 0, 0), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(5, 15, 21), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(16, 21, 51), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(12, 15, 12), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(22, 12, 1), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + assertEquals(makeTime(8, 46, 44), t); + + // If we're checking for timezones. + if (testSetTime) { + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + Time tmpTime = Time.valueOf("5:1:2"); + int localOffset = Calendar.getInstance().getTimeZone().getOffset(tmpTime.getTime()); + int timeOffset = 3 * 60 * 60 * 1000; + tmpTime.setTime(tmpTime.getTime() + timeOffset + localOffset); + assertEquals(makeTime(tmpTime.getHours(), tmpTime.getMinutes(), tmpTime.getSeconds()), t); + + assertTrue(rs.next()); + t = rs.getTime(1); + assertNotNull(t); + tmpTime = Time.valueOf("23:59:59"); + localOffset = Calendar.getInstance().getTimeZone().getOffset(tmpTime.getTime()); + timeOffset = -11 * 60 * 60 * 1000; + tmpTime.setTime(tmpTime.getTime() + timeOffset + localOffset); + assertEquals(makeTime(tmpTime.getHours(), tmpTime.getMinutes(), tmpTime.getSeconds()), t); + } + + assertFalse(rs.next()); + + rs.close(); + } + + private Time makeTime(int h, int m, int s) { + return Time.valueOf(TestUtil.fix(h, 2) + ":" + TestUtil.fix(m, 2) + ":" + TestUtil.fix(s, 2)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimestampTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimestampTest.java new file mode 100644 index 0000000..19f97be --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimestampTest.java @@ -0,0 +1,737 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import org.postgresql.PGStatement; +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.TimestampUtils; +import org.postgresql.test.TestUtil; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Collection; +import java.util.GregorianCalendar; +import java.util.TimeZone; + +/* + * Test get/setTimestamp for both timestamp with time zone and timestamp without time zone datatypes + * TODO: refactor to a property-based testing or parameterized testing somehow so adding new times + * don't require to add constants and setters/getters. JUnit 5 would probably help here. + */ +@RunWith(Parameterized.class) +public class TimestampTest extends BaseTest4 { + + public TimestampTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + private TimeZone currentTZ; + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, TSWTZ_TABLE, "ts timestamp with time zone"); + TestUtil.createTable(con, TSWOTZ_TABLE, "ts timestamp without time zone"); + TestUtil.createTable(con, DATE_TABLE, "ts date"); + currentTZ = TimeZone.getDefault(); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, TSWTZ_TABLE); + TestUtil.dropTable(con, TSWOTZ_TABLE); + TestUtil.dropTable(con, DATE_TABLE); + TimeZone.setDefault(currentTZ); + super.tearDown(); + } + + /** + * Ensure the driver doesn't modify a Calendar that is passed in. + */ + @Test + public void testCalendarModification() throws SQLException { + Calendar cal = Calendar.getInstance(); + Calendar origCal = (Calendar) cal.clone(); + PreparedStatement ps = con.prepareStatement("INSERT INTO " + TSWOTZ_TABLE + " VALUES (?)"); + + ps.setDate(1, new Date(0), cal); + ps.executeUpdate(); + assertEquals(origCal, cal); + + ps.setTimestamp(1, new Timestamp(0), cal); + ps.executeUpdate(); + assertEquals(origCal, cal); + + ps.setTime(1, new Time(0), cal); + // Can't actually execute this one because of type mismatch, + // but all we're really concerned about is the set call. + // ps.executeUpdate(); + assertEquals(origCal, cal); + + ps.close(); + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT ts FROM " + TSWOTZ_TABLE); + assertTrue(rs.next()); + + rs.getDate(1, cal); + assertEquals(origCal, cal); + + rs.getTimestamp(1, cal); + assertEquals(origCal, cal); + + rs.getTime(1, cal); + assertEquals(origCal, cal); + + rs.close(); + stmt.close(); + } + + @Test + public void testInfinity() throws SQLException { + runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY); + runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY); + runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY); + runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) { + runInfinityTests(DATE_TABLE, PGStatement.DATE_POSITIVE_INFINITY); + runInfinityTests(DATE_TABLE, PGStatement.DATE_NEGATIVE_INFINITY); + } + } + + private void runInfinityTests(String table, long value) throws SQLException { + GregorianCalendar cal = new GregorianCalendar(); + // Pick some random timezone that is hopefully different than ours + // and exists in this JVM. + cal.setTimeZone(TimeZone.getTimeZone("Europe/Warsaw")); + + String strValue; + if (value == PGStatement.DATE_POSITIVE_INFINITY) { + strValue = "infinity"; + } else { + strValue = "-infinity"; + } + + Statement stmt = con.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL(table, "'" + strValue + "'")); + stmt.close(); + + PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL(table, "?")); + ps.setTimestamp(1, new Timestamp(value)); + ps.executeUpdate(); + ps.setTimestamp(1, new Timestamp(value), cal); + ps.executeUpdate(); + ps.close(); + + stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select ts from " + table); + while (rs.next()) { + assertEquals(strValue, rs.getString(1)); + + Timestamp ts = rs.getTimestamp(1); + assertEquals(value, ts.getTime()); + + Date d = rs.getDate(1); + assertEquals(value, d.getTime()); + + Timestamp tscal = rs.getTimestamp(1, cal); + assertEquals(value, tscal.getTime()); + } + rs.close(); + + assertEquals(3, stmt.executeUpdate("DELETE FROM " + table)); + stmt.close(); + } + + /* + * Tests the timestamp methods in ResultSet on timestamp with time zone we insert a known string + * value (don't use setTimestamp) then see that we get back the same value from getTimestamp + */ + @Test + public void testGetTimestampWTZ() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + + Statement stmt = con.createStatement(); + TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils(); + + // Insert the three timestamp values in raw pg format + for (int i = 0; i < 3; i++) { + assertEquals(1, + stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS1WTZ_PGFORMAT + "'"))); + assertEquals(1, + stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS2WTZ_PGFORMAT + "'"))); + assertEquals(1, + stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS3WTZ_PGFORMAT + "'"))); + assertEquals(1, + stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS4WTZ_PGFORMAT + "'"))); + } + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpDate1.getTime())) + "'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpDate2.getTime())) + "'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpDate3.getTime())) + "'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpDate4.getTime())) + "'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpTime1.getTime())) + "'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpTime2.getTime())) + "'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpTime3.getTime())) + "'"))); + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, + "'" + tsu.toString(null, new Timestamp(tmpTime4.getTime())) + "'"))); + + // Fall through helper + timestampTestWTZ(); + + assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE)); + + stmt.close(); + } + + /* + * Tests the timestamp methods in PreparedStatement on timestamp with time zone we insert a value + * using setTimestamp then see that we get back the same value from getTimestamp (which we know + * works as it was tested independently of setTimestamp + */ + @Test + public void testSetTimestampWTZ() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + + Statement stmt = con.createStatement(); + PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWTZ_TABLE, "?")); + + pstmt.setTimestamp(1, TS1WTZ); + assertEquals(1, pstmt.executeUpdate()); + + pstmt.setTimestamp(1, TS2WTZ); + assertEquals(1, pstmt.executeUpdate()); + + pstmt.setTimestamp(1, TS3WTZ); + assertEquals(1, pstmt.executeUpdate()); + + pstmt.setTimestamp(1, TS4WTZ); + assertEquals(1, pstmt.executeUpdate()); + + // With java.sql.Timestamp + pstmt.setObject(1, TS1WTZ, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, TS2WTZ, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, TS3WTZ, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, TS4WTZ, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + + // With Strings + pstmt.setObject(1, TS1WTZ_PGFORMAT, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, TS2WTZ_PGFORMAT, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, TS3WTZ_PGFORMAT, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, TS4WTZ_PGFORMAT, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + + // With java.sql.Date + pstmt.setObject(1, tmpDate1, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, tmpDate2, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, tmpDate3, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, tmpDate4, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + + // With java.sql.Time + pstmt.setObject(1, tmpTime1, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, tmpTime2, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, tmpTime3, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + pstmt.setObject(1, tmpTime4, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + // Fall through helper + timestampTestWTZ(); + + assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE)); + + pstmt.close(); + stmt.close(); + } + + /* + * Tests the timestamp methods in ResultSet on timestamp without time zone we insert a known + * string value (don't use setTimestamp) then see that we get back the same value from + * getTimestamp + */ + @Test + public void testGetTimestampWOTZ() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + //Refer to #896 + assumeMinimumServerVersion(ServerVersion.v8_4); + + Statement stmt = con.createStatement(); + TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils(); + + // Insert the three timestamp values in raw pg format + for (int i = 0; i < 3; i++) { + for (String value : TS__WOTZ_PGFORMAT) { + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, "'" + value + "'"))); + } + } + + for (java.util.Date date : TEST_DATE_TIMES) { + String stringValue = "'" + tsu.toString(null, new Timestamp(date.getTime())) + "'"; + assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, stringValue))); + } + + // Fall through helper + timestampTestWOTZ(); + + assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE)); + + stmt.close(); + } + + /* + * Tests the timestamp methods in PreparedStatement on timestamp without time zone we insert a + * value using setTimestamp then see that we get back the same value from getTimestamp (which we + * know works as it was tested independently of setTimestamp + */ + @Test + public void testSetTimestampWOTZ() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + //Refer to #896 + assumeMinimumServerVersion(ServerVersion.v8_4); + + Statement stmt = con.createStatement(); + PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWOTZ_TABLE, "?")); + + for (Timestamp timestamp : TS__WOTZ) { + pstmt.setTimestamp(1, timestamp); + assertEquals(1, pstmt.executeUpdate()); + } + + // With java.sql.Timestamp + for (Timestamp timestamp : TS__WOTZ) { + pstmt.setObject(1, timestamp, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + } + + // With Strings + for (String value : TS__WOTZ_PGFORMAT) { + pstmt.setObject(1, value, Types.TIMESTAMP); + assertEquals(1, pstmt.executeUpdate()); + } + + // With java.sql.Date, java.sql.Time + for (java.util.Date date : TEST_DATE_TIMES) { + pstmt.setObject(1, date, Types.TIMESTAMP); + assertEquals("insert into TSWOTZ_TABLE via setObject(1, " + date + + ", Types.TIMESTAMP) -> expecting one row inserted", 1, pstmt.executeUpdate()); + } + + // Fall through helper + timestampTestWOTZ(); + + assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE)); + + pstmt.close(); + stmt.close(); + } + + /* + * Helper for the TimestampTests. It tests what should be in the db + */ + private void timestampTestWTZ() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs; + Timestamp t; + + rs = stmt.executeQuery("select ts from " + TSWTZ_TABLE); // removed the order by ts + assertNotNull(rs); + + for (int i = 0; i < 3; i++) { + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS1WTZ, t); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS2WTZ, t); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS3WTZ, t); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS4WTZ, t); + } + + // Testing for Date + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpDate1.getTime(), t.getTime()); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpDate2.getTime(), t.getTime()); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpDate3.getTime(), t.getTime()); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpDate4.getTime(), t.getTime()); + + // Testing for Time + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpTime1.getTime(), t.getTime()); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpTime2.getTime(), t.getTime()); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpTime3.getTime(), t.getTime()); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(tmpTime4.getTime(), t.getTime()); + + assertTrue(!rs.next()); // end of table. Fail if more entries exist. + + rs.close(); + stmt.close(); + } + + /* + * Helper for the TimestampTests. It tests what should be in the db + */ + private void timestampTestWOTZ() throws SQLException { + Statement stmt = con.createStatement(); + Timestamp t; + String tString; + + ResultSet rs = stmt.executeQuery("select ts from " + TSWOTZ_TABLE); // removed the order by ts + assertNotNull(rs); + + for (int i = 0; i < 3; i++) { + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS1WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS1WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS2WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS2WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS3WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS3WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS4WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS4WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS5WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS5WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS6WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS6WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS7WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS7WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS8WOTZ, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS8WOTZ_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS9WOTZ_ROUNDED, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS9WOTZ_ROUNDED_PGFORMAT, tString); + + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals(TS10WOTZ_ROUNDED, t); + + tString = rs.getString(1); + assertNotNull(tString); + assertEquals(TS10WOTZ_ROUNDED_PGFORMAT, tString); + } + + // Testing for Date + for (java.util.Date expected : TEST_DATE_TIMES) { + assertTrue(rs.next()); + t = rs.getTimestamp(1); + assertNotNull(t); + assertEquals("rs.getTimestamp(1).getTime()", expected.getTime(), t.getTime()); + } + + assertTrue(!rs.next()); // end of table. Fail if more entries exist. + + rs.close(); + stmt.close(); + } + + @Test + public void testJavaTimestampFromSQLTime() throws SQLException { + Statement st = con.createStatement(); + ResultSet rs = st.executeQuery("SELECT '00:00:05.123456'::time as t, '1970-01-01 00:00:05.123456'::timestamp as ts, " + + "'00:00:05.123456 +0300'::time with time zone as tz, '1970-01-01 00:00:05.123456 +0300'::timestamp with time zone as tstz "); + rs.next(); + Timestamp t = rs.getTimestamp("t"); + Timestamp ts = rs.getTimestamp("ts"); + Timestamp tz = rs.getTimestamp("tz"); + + Timestamp tstz = rs.getTimestamp("tstz"); + + Integer desiredNanos = 123456000; + Integer tNanos = t.getNanos(); + Integer tzNanos = tz.getNanos(); + + assertEquals("Time should be microsecond-accurate", desiredNanos, tNanos); + assertEquals("Time with time zone should be microsecond-accurate", desiredNanos, tzNanos); + assertEquals("Unix epoch timestamp and Time should match", ts, t); + assertEquals("Unix epoch timestamp with time zone and time with time zone should match", tstz, tz); + } + + private static Timestamp getTimestamp(int y, int m, int d, int h, int mn, int se, int f, + String tz) { + Timestamp result = null; + java.text.DateFormat dateFormat; + try { + String ts; + ts = TestUtil.fix(y, 4) + "-" + + TestUtil.fix(m, 2) + "-" + + TestUtil.fix(d, 2) + " " + + TestUtil.fix(h, 2) + ":" + + TestUtil.fix(mn, 2) + ":" + + TestUtil.fix(se, 2) + " "; + + if (tz == null) { + dateFormat = new SimpleDateFormat("y-M-d H:m:s"); + } else { + ts = ts + tz; + dateFormat = new SimpleDateFormat("y-M-d H:m:s z"); + } + java.util.Date date = dateFormat.parse(ts); + result = new Timestamp(date.getTime()); + result.setNanos(f); + } catch (Exception ex) { + fail(ex.getMessage()); + } + return result; + } + + private static final Timestamp TS1WTZ = + getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, "PST"); + private static final String TS1WTZ_PGFORMAT = "1950-02-07 15:00:00.1-08"; + + private static final Timestamp TS2WTZ = + getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, "GMT"); + private static final String TS2WTZ_PGFORMAT = "2000-02-07 15:00:00.12+00"; + + private static final Timestamp TS3WTZ = + getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, "GMT"); + private static final String TS3WTZ_PGFORMAT = "2000-07-07 15:00:00.123+00"; + + private static final Timestamp TS4WTZ = + getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, "GMT"); + private static final String TS4WTZ_PGFORMAT = "2000-07-07 15:00:00.123456+00"; + + private static final Timestamp TS1WOTZ = + getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, null); + private static final String TS1WOTZ_PGFORMAT = "1950-02-07 15:00:00.1"; + + private static final Timestamp TS2WOTZ = + getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, null); + private static final String TS2WOTZ_PGFORMAT = "2000-02-07 15:00:00.12"; + + private static final Timestamp TS3WOTZ = + getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, null); + private static final String TS3WOTZ_PGFORMAT = "2000-07-07 15:00:00.123"; + + private static final Timestamp TS4WOTZ = + getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, null); + private static final String TS4WOTZ_PGFORMAT = "2000-07-07 15:00:00.123456"; + + private static final Timestamp TS5WOTZ = + new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY); + private static final String TS5WOTZ_PGFORMAT = "-infinity"; + + private static final Timestamp TS6WOTZ = + new Timestamp(PGStatement.DATE_POSITIVE_INFINITY); + private static final String TS6WOTZ_PGFORMAT = "infinity"; + + private static final Timestamp TS7WOTZ = + getTimestamp(2000, 7, 7, 15, 0, 0, 0, null); + private static final String TS7WOTZ_PGFORMAT = "2000-07-07 15:00:00"; + + private static final Timestamp TS8WOTZ = + getTimestamp(2000, 7, 7, 15, 0, 0, 20400000, null); + private static final String TS8WOTZ_PGFORMAT = "2000-07-07 15:00:00.0204"; + + private static final Timestamp TS9WOTZ = + getTimestamp(2000, 2, 7, 15, 0, 0, 789, null); + private static final String TS9WOTZ_PGFORMAT = "2000-02-07 15:00:00.000000789"; + private static final Timestamp TS9WOTZ_ROUNDED = + getTimestamp(2000, 2, 7, 15, 0, 0, 1000, null); + private static final String TS9WOTZ_ROUNDED_PGFORMAT = "2000-02-07 15:00:00.000001"; + + private static final Timestamp TS10WOTZ = + getTimestamp(2018, 12, 31, 23, 59, 59, 999999500, null); + private static final String TS10WOTZ_PGFORMAT = "2018-12-31 23:59:59.999999500"; + private static final Timestamp TS10WOTZ_ROUNDED = + getTimestamp(2019, 1, 1, 0, 0, 0, 0, null); + private static final String TS10WOTZ_ROUNDED_PGFORMAT = "2019-01-01 00:00:00"; + + private static final Timestamp[] TS__WOTZ = { + TS1WOTZ, TS2WOTZ, TS3WOTZ, TS4WOTZ, TS5WOTZ, + TS6WOTZ, TS7WOTZ, TS8WOTZ, TS9WOTZ, TS10WOTZ, + }; + + private static final String[] TS__WOTZ_PGFORMAT = { + TS1WOTZ_PGFORMAT, TS2WOTZ_PGFORMAT, TS3WOTZ_PGFORMAT, TS4WOTZ_PGFORMAT, TS5WOTZ_PGFORMAT, + TS6WOTZ_PGFORMAT, TS7WOTZ_PGFORMAT, TS8WOTZ_PGFORMAT, TS9WOTZ_PGFORMAT, TS10WOTZ_PGFORMAT, + }; + + private static final String TSWTZ_TABLE = "testtimestampwtz"; + private static final String TSWOTZ_TABLE = "testtimestampwotz"; + private static final String DATE_TABLE = "testtimestampdate"; + + private static final java.sql.Date tmpDate1 = new java.sql.Date(TS1WTZ.getTime()); + private static final java.sql.Time tmpTime1 = new java.sql.Time(TS1WTZ.getTime()); + private static final java.sql.Date tmpDate2 = new java.sql.Date(TS2WTZ.getTime()); + private static final java.sql.Time tmpTime2 = new java.sql.Time(TS2WTZ.getTime()); + private static final java.sql.Date tmpDate3 = new java.sql.Date(TS3WTZ.getTime()); + private static final java.sql.Time tmpTime3 = new java.sql.Time(TS3WTZ.getTime()); + private static final java.sql.Date tmpDate4 = new java.sql.Date(TS4WTZ.getTime()); + private static final java.sql.Time tmpTime4 = new java.sql.Time(TS4WTZ.getTime()); + + private static final java.sql.Date tmpDate1WOTZ = new java.sql.Date(TS1WOTZ.getTime()); + private static final java.sql.Time tmpTime1WOTZ = new java.sql.Time(TS1WOTZ.getTime()); + private static final java.sql.Date tmpDate2WOTZ = new java.sql.Date(TS2WOTZ.getTime()); + private static final java.sql.Time tmpTime2WOTZ = new java.sql.Time(TS2WOTZ.getTime()); + private static final java.sql.Date tmpDate3WOTZ = new java.sql.Date(TS3WOTZ.getTime()); + private static final java.sql.Time tmpTime3WOTZ = new java.sql.Time(TS3WOTZ.getTime()); + private static final java.sql.Date tmpDate4WOTZ = new java.sql.Date(TS4WOTZ.getTime()); + private static final java.sql.Time tmpTime4WOTZ = new java.sql.Time(TS4WOTZ.getTime()); + private static final java.sql.Date tmpDate5WOTZ = new java.sql.Date(TS5WOTZ.getTime()); + private static final java.sql.Date tmpTime5WOTZ = new java.sql.Date(TS5WOTZ.getTime()); + private static final java.sql.Date tmpDate6WOTZ = new java.sql.Date(TS6WOTZ.getTime()); + private static final java.sql.Date tmpTime6WOTZ = new java.sql.Date(TS6WOTZ.getTime()); + private static final java.sql.Date tmpDate7WOTZ = new java.sql.Date(TS7WOTZ.getTime()); + private static final java.sql.Time tmpTime7WOTZ = new java.sql.Time(TS7WOTZ.getTime()); + private static final java.sql.Date tmpDate8WOTZ = new java.sql.Date(TS8WOTZ.getTime()); + private static final java.sql.Time tmpTime8WOTZ = new java.sql.Time(TS8WOTZ.getTime()); + private static final java.sql.Date tmpDate9WOTZ = new java.sql.Date(TS9WOTZ.getTime()); + private static final java.sql.Time tmpTime9WOTZ = new java.sql.Time(TS9WOTZ.getTime()); + private static final java.sql.Date tmpDate10WOTZ = new java.sql.Date(TS10WOTZ.getTime()); + private static final java.sql.Time tmpTime10WOTZ = new java.sql.Time(TS10WOTZ.getTime()); + + private static final java.util.Date[] TEST_DATE_TIMES = { + tmpDate1WOTZ, tmpDate2WOTZ, tmpDate3WOTZ, tmpDate4WOTZ, tmpDate5WOTZ, + tmpDate6WOTZ, tmpDate7WOTZ, tmpDate8WOTZ, tmpDate9WOTZ, tmpDate10WOTZ, + tmpTime1WOTZ, tmpTime2WOTZ, tmpTime3WOTZ, tmpTime4WOTZ, tmpTime5WOTZ, + tmpTime6WOTZ, tmpTime7WOTZ, tmpTime8WOTZ, tmpTime9WOTZ, tmpTime10WOTZ, + }; +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneCachingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneCachingTest.java new file mode 100644 index 0000000..97430e5 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneCachingTest.java @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.core.BaseConnection; +import org.postgresql.jdbc.TimestampUtils; +import org.postgresql.test.TestUtil; + +import org.junit.Assume; +import org.junit.Test; + +import java.lang.reflect.Field; +import java.sql.BatchUpdateException; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.TimeZone; + +public class TimezoneCachingTest extends BaseTest4 { + + /** + * Test to check the internal cached timezone of a prepared statement is set/cleared as expected. + */ + @Test + public void testPreparedStatementCachedTimezoneInstance() throws SQLException { + Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0); + Date date = new Date(2016 - 1900, 0, 31); + Time time = new Time(System.currentTimeMillis()); + TimeZone tz = TimeZone.getDefault(); + PreparedStatement pstmt = null; + try { + pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)"); + assertEquals( + "Cache never initialized: must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setInt(1, 1); + assertEquals( + "Cache never initialized: must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setTimestamp(2, ts); + assertEquals( + "Cache initialized by setTimestamp(xx): must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.addBatch(); + assertEquals( + "Cache was initialized, addBatch does not change that: must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.setInt(1, 2); + pstmt.setNull(2, java.sql.Types.DATE); + assertEquals( + "Cache was initialized, setNull does not change that: must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.addBatch(); + assertEquals( + "Cache was initialized, addBatch does not change that: must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.executeBatch(); + assertEquals( + "Cache reset by executeBatch(): must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setInt(1, 3); + assertEquals( + "Cache not initialized: must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setInt(1, 4); + pstmt.setNull(2, java.sql.Types.DATE); + assertEquals( + "Cache was not initialized, setNull does not change that: must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setTimestamp(2, ts); + assertEquals( + "Cache initialized by setTimestamp(xx): must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.clearParameters(); + assertEquals( + "Cache was initialized, clearParameters does not change that: must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.setInt(1, 5); + pstmt.setTimestamp(2, ts); + pstmt.addBatch(); + pstmt.executeBatch(); + pstmt.close(); + pstmt = con.prepareStatement("UPDATE testtz SET col2 = ? WHERE col1 = 1"); + assertEquals( + "Cache not initialized: must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setDate(1, date); + assertEquals( + "Cache initialized by setDate(xx): must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.execute(); + assertEquals( + "Cache reset by execute(): must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setDate(1, date); + assertEquals( + "Cache initialized by setDate(xx): must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.executeUpdate(); + assertEquals( + "Cache reset by executeUpdate(): must be null", + null, getTimeZoneCache(pstmt)); + pstmt.setTime(1, time); + assertEquals( + "Cache initialized by setTime(xx): must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.close(); + pstmt = con.prepareStatement("SELECT * FROM testtz WHERE col2 = ?"); + pstmt.setDate(1, date); + assertEquals( + "Cache initialized by setDate(xx): must not be null", + tz, getTimeZoneCache(pstmt)); + pstmt.executeQuery(); + assertEquals( + "Cache reset by executeQuery(): must be null", + null, getTimeZoneCache(pstmt)); + } finally { + TestUtil.closeQuietly(pstmt); + } + } + + /** + * Test to check the internal cached timezone of a prepared statement is used as expected. + */ + @Test + public void testPreparedStatementCachedTimezoneUsage() throws SQLException { + Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0); + Statement stmt = null; + PreparedStatement pstmt = null; + TimeZone tz1 = TimeZone.getTimeZone("GMT+8:00"); + TimeZone tz2 = TimeZone.getTimeZone("GMT-2:00"); + TimeZone tz3 = TimeZone.getTimeZone("UTC+2"); + TimeZone tz4 = TimeZone.getTimeZone("UTC+3"); + Calendar c3 = new GregorianCalendar(tz3); + Calendar c4 = new GregorianCalendar(tz4); + try { + stmt = con.createStatement(); + TimeZone.setDefault(tz1); + pstmt = con.prepareStatement("INSERT INTO testtz VALUES(1, ?)"); + pstmt.setTimestamp(1, ts); + pstmt.executeUpdate(); + checkTimestamp("Default is tz2, was saved as tz1, expecting tz1", stmt, ts, tz1); + pstmt.close(); + pstmt = con.prepareStatement("UPDATE testtz SET col2 = ? WHERE col1 = ?"); + pstmt.setTimestamp(1, ts); + TimeZone.setDefault(tz2); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp("Default is tz2, but was saved as tz1, expecting tz1", stmt, ts, tz1); + pstmt.setTimestamp(1, ts); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp("Default is tz2, was saved as tz2, expecting tz2", stmt, ts, tz2); + pstmt.setTimestamp(1, ts); + pstmt.setInt(2, 1); + pstmt.clearParameters(); + TimeZone.setDefault(tz1); + pstmt.setTimestamp(1, ts); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp( + "Default is tz1, but was first saved as tz2, next save used tz2 cache, expecting tz2", + stmt, ts, tz2); + pstmt.setTimestamp(1, ts, c3); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp("Explicit use of tz3, expecting tz3", stmt, ts, tz3); + pstmt.setTimestamp(1, ts, c3); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.setTimestamp(1, ts, c4); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp("Last set explicitly used tz4, expecting tz4", stmt, ts, tz4); + pstmt.setTimestamp(1, ts, c3); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.setTimestamp(1, ts); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.setTimestamp(1, ts, c4); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp("Last set explicitly used tz4, expecting tz4", stmt, ts, tz4); + pstmt.setTimestamp(1, ts, c3); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.setTimestamp(1, ts); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp( + "Default is tz1, was first saved as tz1, last save used tz1 cache, expecting tz1", stmt, + ts, tz1); + pstmt.setTimestamp(1, ts); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.setTimestamp(1, ts, c4); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.setTimestamp(1, ts); + pstmt.setInt(2, 1); + pstmt.addBatch(); + pstmt.executeBatch(); + checkTimestamp( + "Default is tz1, was first saved as tz1, last save used tz1 cache, expecting tz1", stmt, + ts, tz1); + } catch (BatchUpdateException ex) { + SQLException nextException = ex.getNextException(); + nextException.printStackTrace(); + } finally { + TimeZone.setDefault(null); + TestUtil.closeQuietly(pstmt); + TestUtil.closeQuietly(stmt); + } + } + + /** + * Test to check the internal cached timezone of a result set is set/cleared as expected. + */ + @Test + public void testResultSetCachedTimezoneInstance() throws SQLException { + Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0); + TimeZone tz = TimeZone.getDefault(); + Statement stmt = null; + PreparedStatement pstmt = null; + ResultSet rs = null; + try { + pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)"); + pstmt.setInt(1, 1); + pstmt.setTimestamp(2, ts); + pstmt.addBatch(); + pstmt.executeBatch(); + stmt = con.createStatement(); + rs = stmt.executeQuery("SELECT col1, col2 FROM testtz"); + rs.next(); + assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs)); + rs.getInt(1); + assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs)); + rs.getTimestamp(2); + assertEquals("Cache initialized by getTimestamp(x): must not be null", + tz, getTimeZoneCache(rs)); + rs.close(); + rs = stmt.executeQuery("SELECT col1, col2 FROM testtz"); + rs.next(); + rs.getInt(1); + assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs)); + rs.getObject(2); + assertEquals("Cache initialized by getObject(x) on a DATE column: must not be null", + tz, getTimeZoneCache(rs)); + rs.close(); + rs = stmt.executeQuery("SELECT col1, col2 FROM testtz"); + rs.next(); + assertEquals("Cache should NOT be set", null, getTimeZoneCache(rs)); + rs.getInt(1); + assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs)); + rs.getDate(2); + assertEquals("Cache initialized by getDate(x): must not be null", tz, getTimeZoneCache(rs)); + rs.close(); + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(pstmt); + TestUtil.closeQuietly(stmt); + } + } + + /** + * Test to check the internal cached timezone of a result set is used as expected. + */ + @Test + public void testResultSetCachedTimezoneUsage() throws SQLException { + Statement stmt = null; + PreparedStatement pstmt = null; + ResultSet rs = null; + TimeZone tz1 = TimeZone.getTimeZone("GMT+8:00"); + TimeZone tz2 = TimeZone.getTimeZone("GMT-2:00"); // 10 hour difference + Timestamp ts1 = new Timestamp(2016 - 1900, 0, 31, 3, 0, 0, 0); + Timestamp ts2 = new Timestamp(2016 - 1900, 0, 31, 13, 0, 0, 0); // 10 hour difference + Calendar c1 = new GregorianCalendar(tz1); + Calendar c2 = new GregorianCalendar(tz2); + try { + TimeZone.setDefault(tz1); + pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)"); + pstmt.setInt(1, 1); + // We are in tz1, so timestamp added as tz1. + pstmt.setTimestamp(2, ts1); + pstmt.addBatch(); + pstmt.executeBatch(); + stmt = con.createStatement(); + rs = stmt.executeQuery("SELECT col1, col2 FROM testtz"); + rs.next(); + rs.getInt(1); + assertEquals( + "Current TZ is tz1, empty cache to be initialized to tz1 => retrieve in tz1, timestamps must be equal", + ts1, rs.getTimestamp(2)); + rs.close(); + rs = stmt.executeQuery("SELECT col1, col2 FROM testtz"); + rs.next(); + rs.getInt(1); + TimeZone.setDefault(tz2); + assertEquals( + "Current TZ is tz2, empty cache to be initialized to tz2 => retrieve in tz2, timestamps cannot be equal", + ts2, rs.getTimestamp(2)); + assertEquals( + "Explicit tz1 calendar, so timestamps must be equal", + ts1, rs.getTimestamp(2, c1)); + assertEquals( + "Cache was initialized to tz2, so timestamps cannot be equal", + ts2, rs.getTimestamp(2)); + TimeZone.setDefault(tz1); + assertEquals( + "Cache was initialized to tz2, so timestamps cannot be equal", + ts2, rs.getTimestamp(2)); + rs.close(); + rs = stmt.executeQuery("SELECT col1, col2 FROM testtz"); + rs.next(); + rs.getInt(1); + assertEquals( + "Explicit tz2 calendar, so timestamps cannot be equal", + ts2, rs.getTimestamp(2, c2)); + assertEquals( + "Current TZ is tz1, empty cache to be initialized to tz1 => retrieve in tz1, timestamps must be equal", + ts1, rs.getTimestamp(2)); + assertEquals( + "Explicit tz2 calendar, so timestamps cannot be equal", + ts2, rs.getTimestamp(2, c2)); + assertEquals( + "Explicit tz2 calendar, so timestamps must be equal", + ts1, rs.getTimestamp(2, c1)); + rs.close(); + } finally { + TimeZone.setDefault(null); + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(pstmt); + TestUtil.closeQuietly(stmt); + } + } + + private void checkTimestamp(String checkText, Statement stmt, Timestamp ts, TimeZone tz) + throws SQLException { + TimeZone prevTz = TimeZone.getDefault(); + TimeZone.setDefault(tz); + ResultSet rs = stmt.executeQuery("SELECT col2 FROM testtz"); + rs.next(); + Timestamp dbTs = rs.getTimestamp(1); + rs.close(); + TimeZone.setDefault(prevTz); + assertEquals(checkText, ts, dbTs); + } + + private TimeZone getTimeZoneCache(Object stmt) { + try { + Field defaultTimeZoneField = stmt.getClass().getDeclaredField("defaultTimeZone"); + defaultTimeZoneField.setAccessible(true); + return (TimeZone) defaultTimeZoneField.get(stmt); + } catch (Exception e) { + } + return null; + } + + /* Set up the fixture for this test case: a connection to a database with + a table for this test. */ + public void setUp() throws Exception { + super.setUp(); + TimestampUtils timestampUtils = ((BaseConnection) con).getTimestampUtils(); + Assume.assumeFalse("If connection has fast access to TimeZone.getDefault," + + " then no cache is needed", timestampUtils.hasFastDefaultTimeZone()); + /* Drop the test table if it already exists for some reason. It is + not an error if it doesn't exist. */ + TestUtil.createTable(con, "testtz", "col1 INTEGER, col2 TIMESTAMP"); + } + + // Tear down the fixture for this test case. + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "testtz"); + super.tearDown(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneTest.java new file mode 100644 index 0000000..42ca9f7 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneTest.java @@ -0,0 +1,974 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Calendar; +import java.util.List; +import java.util.Properties; +import java.util.TimeZone; + +/** + *

Tests for time and date types with calendars involved. TimestampTest was melting my brain, so I + * started afresh. -O

+ * + *

Conversions that this code tests:

+ * + *

setTimestamp -> timestamp, timestamptz, date, time, timetz

+ * + *

setDate -> timestamp, timestamptz, date

+ * + *

setTime -> time, timetz

+ * + *

getTimestamp <- timestamp, timestamptz, date, time, timetz

+ * + *

getDate <- timestamp, timestamptz, date

+ * + *

getTime <- timestamp, timestamptz, time, timetz

+ * + *

(this matches what we must support per JDBC 3.0, tables B-5 and B-6)

+ */ +public class TimezoneTest { + private static final int DAY = 24 * 3600 * 1000; + private static final TimeZone saveTZ = TimeZone.getDefault(); + private static final int PREPARE_THRESHOLD = 2; + + private Connection con; + + // + // We set up everything in different timezones to try to exercise many cases: + // + // default JVM timezone: GMT+0100 + // server timezone: GMT+0300 + // test timezones: GMT+0000 GMT+0100 GMT+0300 GMT+1300 GMT-0500 + + private final Calendar cUTC; + private final Calendar cGMT03; + private final Calendar cGMT05; + private final Calendar cGMT13; + + public TimezoneTest() { + TimeZone tzUTC = TimeZone.getTimeZone("UTC"); // +0000 always + TimeZone tzGMT03 = TimeZone.getTimeZone("GMT+03"); // +0300 always + TimeZone tzGMT05 = TimeZone.getTimeZone("GMT-05"); // -0500 always + TimeZone tzGMT13 = TimeZone.getTimeZone("GMT+13"); // +1000 always + + cUTC = Calendar.getInstance(tzUTC); + cGMT03 = Calendar.getInstance(tzGMT03); + cGMT05 = Calendar.getInstance(tzGMT05); + cGMT13 = Calendar.getInstance(tzGMT13); + } + + @BeforeEach + void setUp() throws Exception { + // We must change the default TZ before establishing the connection. + // Arbitrary timezone that doesn't match our test timezones + TimeZone.setDefault(TimeZone.getTimeZone("GMT+01")); + + connect(); + TestUtil.createTable(con, "testtimezone", + "seq int4, tstz timestamp with time zone, ts timestamp without time zone, t time without time zone, tz time with time zone, d date"); + + // This is not obvious, but the "gmt-3" timezone is actually 3 hours *ahead* of GMT + // so will produce +03 timestamptz output + con.createStatement().executeUpdate("set timezone = 'gmt-3'"); + + // System.err.println("++++++ TESTS START (" + getName() + ") ++++++"); + } + + private void connect() throws Exception { + Properties p = new Properties(); + PGProperty.PREPARE_THRESHOLD.set(p, 1); + con = TestUtil.openDB(p); + } + + @AfterEach + void tearDown() throws Exception { + // System.err.println("++++++ TESTS END (" + getName() + ") ++++++"); + TimeZone.setDefault(saveTZ); + + TestUtil.dropTable(con, "testtimezone"); + TestUtil.closeDB(con); + } + + @Test + void getTimestamp() throws Exception { + con.createStatement().executeUpdate( + "INSERT INTO testtimezone(tstz,ts,t,tz,d) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '15:00:00', '15:00:00 +0300', '2005-01-01')"); + + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + String format = i == 0 ? ", text" : ", binary"; + PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,t,tz,d from testtimezone"); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + checkDatabaseContents("SELECT tstz::text,ts::text,t::text,tz::text,d::text from testtimezone", + new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00", "15:00:00+03", + "2005-01-01"}); + + Timestamp ts; + String str; + + // timestamptz: 2005-01-01 15:00:00+03 + ts = rs.getTimestamp(1); // Represents an instant in time, timezone is irrelevant. + assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC + ts = rs.getTimestamp(1, cUTC); // Represents an instant in time, timezone is irrelevant. + assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC + ts = rs.getTimestamp(1, cGMT03); // Represents an instant in time, timezone is irrelevant. + assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC + ts = rs.getTimestamp(1, cGMT05); // Represents an instant in time, timezone is irrelevant. + assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC + ts = rs.getTimestamp(1, cGMT13); // Represents an instant in time, timezone is irrelevant. + assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC + str = rs.getString(1); + assertEquals("2005-01-01 15:00:00+03", str, "tstz -> getString" + format); + + // timestamp: 2005-01-01 15:00:00 + ts = rs.getTimestamp(2); // Convert timestamp to +0100 + assertEquals(1104588000000L, ts.getTime()); // 2005-01-01 15:00:00 +0100 + ts = rs.getTimestamp(2, cUTC); // Convert timestamp to UTC + assertEquals(1104591600000L, ts.getTime()); // 2005-01-01 15:00:00 +0000 + ts = rs.getTimestamp(2, cGMT03); // Convert timestamp to +0300 + assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 15:00:00 +0300 + ts = rs.getTimestamp(2, cGMT05); // Convert timestamp to -0500 + assertEquals(1104609600000L, ts.getTime()); // 2005-01-01 15:00:00 -0500 + ts = rs.getTimestamp(2, cGMT13); // Convert timestamp to +1300 + assertEquals(1104544800000L, ts.getTime()); // 2005-01-01 15:00:00 +1300 + str = rs.getString(2); + assertEquals("2005-01-01 15:00:00", str, "ts -> getString" + format); + + // time: 15:00:00 + ts = rs.getTimestamp(3); + assertEquals(50400000L, ts.getTime()); // 1970-01-01 15:00:00 +0100 + ts = rs.getTimestamp(3, cUTC); + assertEquals(54000000L, ts.getTime()); // 1970-01-01 15:00:00 +0000 + ts = rs.getTimestamp(3, cGMT03); + assertEquals(43200000L, ts.getTime()); // 1970-01-01 15:00:00 +0300 + ts = rs.getTimestamp(3, cGMT05); + assertEquals(72000000L, ts.getTime()); // 1970-01-01 15:00:00 -0500 + ts = rs.getTimestamp(3, cGMT13); + assertEquals(7200000L, ts.getTime()); // 1970-01-01 15:00:00 +1300 + str = rs.getString(3); + assertEquals("15:00:00", str, "time -> getString" + format); + + // timetz: 15:00:00+03 + ts = rs.getTimestamp(4); + // 1970-01-01 15:00:00 +0300 -> 1970-01-01 13:00:00 +0100 + assertEquals(43200000L, ts.getTime()); + ts = rs.getTimestamp(4, cUTC); + // 1970-01-01 15:00:00 +0300 -> 1970-01-01 12:00:00 +0000 + assertEquals(43200000L, ts.getTime()); + ts = rs.getTimestamp(4, cGMT03); + // 1970-01-01 15:00:00 +0300 -> 1970-01-01 15:00:00 +0300 + assertEquals(43200000L, ts.getTime()); + ts = rs.getTimestamp(4, cGMT05); + // 1970-01-01 15:00:00 +0300 -> 1970-01-01 07:00:00 -0500 + assertEquals(43200000L, ts.getTime()); + ts = rs.getTimestamp(4, cGMT13); + // 1970-01-01 15:00:00 +0300 -> 1970-01-02 01:00:00 +1300 + assertEquals(43200000L, ts.getTime()); + str = rs.getString(4); + assertEquals("15:00:00+03", str, "timetz -> getString" + format); + + // date: 2005-01-01 + ts = rs.getTimestamp(5); + assertEquals(1104534000000L, ts.getTime()); // 2005-01-01 00:00:00 +0100 + ts = rs.getTimestamp(5, cUTC); + assertEquals(1104537600000L, ts.getTime()); // 2005-01-01 00:00:00 +0000 + ts = rs.getTimestamp(5, cGMT03); + assertEquals(1104526800000L, ts.getTime()); // 2005-01-01 00:00:00 +0300 + ts = rs.getTimestamp(5, cGMT05); + assertEquals(1104555600000L, ts.getTime()); // 2005-01-01 00:00:00 -0500 + ts = rs.getTimestamp(5, cGMT13); + assertEquals(1104490800000L, ts.getTime()); // 2005-01-01 00:00:00 +1300 + str = rs.getString(5); + assertEquals("2005-01-01", str, "date -> getString" + format); + + assertFalse(rs.next()); + ps.close(); + } + } + + @Test + void getDate() throws Exception { + con.createStatement().executeUpdate( + "INSERT INTO testtimezone(tstz,ts,d) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '2005-01-01')"); + + PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,d from testtimezone"); + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + checkDatabaseContents("SELECT tstz::text,ts::text,d::text from testtimezone", + new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "2005-01-01"}); + + Date d; + + // timestamptz: 2005-01-01 15:00:00+03 + d = rs.getDate(1); // 2005-01-01 13:00:00 +0100 -> 2005-01-01 00:00:00 +0100 + assertEquals(1104534000000L, d.getTime()); + d = rs.getDate(1, cUTC); // 2005-01-01 12:00:00 +0000 -> 2005-01-01 00:00:00 +0000 + assertEquals(1104537600000L, d.getTime()); + d = rs.getDate(1, cGMT03); // 2005-01-01 15:00:00 +0300 -> 2005-01-01 00:00:00 +0300 + assertEquals(1104526800000L, d.getTime()); + d = rs.getDate(1, cGMT05); // 2005-01-01 07:00:00 -0500 -> 2005-01-01 00:00:00 -0500 + assertEquals(1104555600000L, d.getTime()); + d = rs.getDate(1, cGMT13); // 2005-01-02 01:00:00 +1300 -> 2005-01-02 00:00:00 +1300 + assertEquals(1104577200000L, d.getTime()); + + // timestamp: 2005-01-01 15:00:00 + d = rs.getDate(2); // 2005-01-01 00:00:00 +0100 + assertEquals(1104534000000L, d.getTime()); + d = rs.getDate(2, cUTC); // 2005-01-01 00:00:00 +0000 + assertEquals(1104537600000L, d.getTime()); + d = rs.getDate(2, cGMT03); // 2005-01-01 00:00:00 +0300 + assertEquals(1104526800000L, d.getTime()); + d = rs.getDate(2, cGMT05); // 2005-01-01 00:00:00 -0500 + assertEquals(1104555600000L, d.getTime()); + d = rs.getDate(2, cGMT13); // 2005-01-01 00:00:00 +1300 + assertEquals(1104490800000L, d.getTime()); + + // date: 2005-01-01 + d = rs.getDate(3); // 2005-01-01 00:00:00 +0100 + assertEquals(1104534000000L, d.getTime()); + d = rs.getDate(3, cUTC); // 2005-01-01 00:00:00 +0000 + assertEquals(1104537600000L, d.getTime()); + d = rs.getDate(3, cGMT03); // 2005-01-01 00:00:00 +0300 + assertEquals(1104526800000L, d.getTime()); + d = rs.getDate(3, cGMT05); // 2005-01-01 00:00:00 -0500 + assertEquals(1104555600000L, d.getTime()); + d = rs.getDate(3, cGMT13); // 2005-01-01 00:00:00 +1300 + assertEquals(1104490800000L, d.getTime()); + + assertFalse(rs.next()); + rs.close(); + } + } + + @Test + void getTime() throws Exception { + con.createStatement().executeUpdate( + "INSERT INTO testtimezone(tstz,ts,t,tz) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '15:00:00', '15:00:00 +0300')"); + + PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,t,tz from testtimezone"); + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + checkDatabaseContents("SELECT tstz::text,ts::text,t::text,tz::text,d::text from testtimezone", + new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00", "15:00:00+03"}); + + Time t; + + // timestamptz: 2005-01-01 15:00:00+03 + t = rs.getTime(1); + // 2005-01-01 13:00:00 +0100 -> 1970-01-01 13:00:00 +0100 + assertEquals(43200000L, t.getTime()); + t = rs.getTime(1, cUTC); + // 2005-01-01 12:00:00 +0000 -> 1970-01-01 12:00:00 +0000 + assertEquals(43200000L, t.getTime()); + t = rs.getTime(1, cGMT03); + // 2005-01-01 15:00:00 +0300 -> 1970-01-01 15:00:00 +0300 + assertEquals(43200000L, t.getTime()); + t = rs.getTime(1, cGMT05); + // 2005-01-01 07:00:00 -0500 -> 1970-01-01 07:00:00 -0500 + assertEquals(43200000L, t.getTime()); + t = rs.getTime(1, cGMT13); + // 2005-01-02 01:00:00 +1300 -> 1970-01-01 01:00:00 +1300 + assertEquals(43200000L, t.getTime()); + + // timestamp: 2005-01-01 15:00:00 + t = rs.getTime(2); + assertEquals(50400000L, t.getTime()); // 1970-01-01 15:00:00 +0100 + t = rs.getTime(2, cUTC); + assertEquals(54000000L, t.getTime()); // 1970-01-01 15:00:00 +0000 + t = rs.getTime(2, cGMT03); + assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300 + t = rs.getTime(2, cGMT05); + assertEquals(72000000L, t.getTime()); // 1970-01-01 15:00:00 -0500 + t = rs.getTime(2, cGMT13); + assertEquals(7200000L, t.getTime()); // 1970-01-01 15:00:00 +1300 + + // time: 15:00:00 + t = rs.getTime(3); + assertEquals(50400000L, t.getTime()); // 1970-01-01 15:00:00 +0100 + t = rs.getTime(3, cUTC); + assertEquals(54000000L, t.getTime()); // 1970-01-01 15:00:00 +0000 + t = rs.getTime(3, cGMT03); + assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300 + t = rs.getTime(3, cGMT05); + assertEquals(72000000L, t.getTime()); // 1970-01-01 15:00:00 -0500 + t = rs.getTime(3, cGMT13); + assertEquals(7200000L, t.getTime()); // 1970-01-01 15:00:00 +1300 + + // timetz: 15:00:00+03 + t = rs.getTime(4); + assertEquals(43200000L, t.getTime()); // 1970-01-01 13:00:00 +0100 + t = rs.getTime(4, cUTC); + assertEquals(43200000L, t.getTime()); // 1970-01-01 12:00:00 +0000 + t = rs.getTime(4, cGMT03); + assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300 + t = rs.getTime(4, cGMT05); + assertEquals(43200000L, t.getTime()); // 1970-01-01 07:00:00 -0500 + t = rs.getTime(4, cGMT13); + assertEquals(43200000L, t.getTime()); // 1970-01-01 01:00:00 +1300 + rs.close(); + } + } + + /** + * This test is broken off from testSetTimestamp because it does not work for pre-7.4 servers and + * putting tons of conditionals in that test makes it largely unreadable. The time data type does + * not accept timestamp with time zone style input on these servers. + */ + @Test + void setTimestampOnTime() throws Exception { + // Pre-7.4 servers cannot convert timestamps with timezones to times. + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + con.createStatement().execute("delete from testtimezone"); + PreparedStatement insertTimestamp = + con.prepareStatement("INSERT INTO testtimezone(seq,t) VALUES (?,?)"); + int seq = 1; + + Timestamp instant = new Timestamp(1104580800000L); // 2005-01-01 12:00:00 UTC + Timestamp instantTime = new Timestamp(instant.getTime() % DAY); + + // +0100 (JVM default) + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant); // 13:00:00 + insertTimestamp.executeUpdate(); + + // UTC + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cUTC); // 12:00:00 + insertTimestamp.executeUpdate(); + + // +0300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cGMT03); // 15:00:00 + insertTimestamp.executeUpdate(); + + // -0500 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cGMT05); // 07:00:00 + insertTimestamp.executeUpdate(); + + // +1300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cGMT13); // 01:00:00 + insertTimestamp.executeUpdate(); + + insertTimestamp.close(); + + checkDatabaseContents("SELECT seq::text,t::text from testtimezone ORDER BY seq", + new String[][]{new String[]{"1", "13:00:00"}, new String[]{"2", "12:00:00"}, + new String[]{"3", "15:00:00"}, new String[]{"4", "07:00:00"}, + new String[]{"5", "01:00:00"}}); + + seq = 1; + PreparedStatement ps = con.prepareStatement("SELECT seq,t FROM testtimezone ORDER BY seq"); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instantTime, rs.getTimestamp(2)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instantTime, rs.getTimestamp(2, cUTC)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instantTime, rs.getTimestamp(2, cGMT03)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instantTime, rs.getTimestamp(2, cGMT05)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(normalizeTimeOfDayPart(instantTime, cGMT13), rs.getTimestamp(2, cGMT13)); + + assertFalse(rs.next()); + ps.close(); + } + } + + @Test + void setTimestamp() throws Exception { + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + con.createStatement().execute("delete from testtimezone"); + PreparedStatement insertTimestamp = + con.prepareStatement("INSERT INTO testtimezone(seq,tstz,ts,tz,d) VALUES (?,?,?,?,?)"); + int seq = 1; + + Timestamp instant = new Timestamp(1104580800000L); // 2005-01-01 12:00:00 UTC + Timestamp instantTime = new Timestamp(instant.getTime() % DAY); + Timestamp instantDateJVM = new Timestamp( + instant.getTime() - (instant.getTime() % DAY) - TimeZone.getDefault().getRawOffset()); + Timestamp instantDateUTC = new Timestamp( + instant.getTime() - (instant.getTime() % DAY) - cUTC.getTimeZone().getRawOffset()); + Timestamp instantDateGMT03 = new Timestamp( + instant.getTime() - (instant.getTime() % DAY) - cGMT03.getTimeZone().getRawOffset()); + Timestamp instantDateGMT05 = new Timestamp( + instant.getTime() - (instant.getTime() % DAY) - cGMT05.getTimeZone().getRawOffset()); + Timestamp instantDateGMT13 = new Timestamp(instant.getTime() - (instant.getTime() % DAY) + - cGMT13.getTimeZone().getRawOffset() + DAY); + + // +0100 (JVM default) + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant); // 2005-01-01 13:00:00 +0100 + insertTimestamp.setTimestamp(3, instant); // 2005-01-01 13:00:00 + insertTimestamp.setTimestamp(4, instant); // 13:00:00 +0100 + insertTimestamp.setTimestamp(5, instant); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // UTC + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cUTC); // 2005-01-01 12:00:00 +0000 + insertTimestamp.setTimestamp(3, instant, cUTC); // 2005-01-01 12:00:00 + insertTimestamp.setTimestamp(4, instant, cUTC); // 12:00:00 +0000 + insertTimestamp.setTimestamp(5, instant, cUTC); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // +0300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cGMT03); // 2005-01-01 15:00:00 +0300 + insertTimestamp.setTimestamp(3, instant, cGMT03); // 2005-01-01 15:00:00 + insertTimestamp.setTimestamp(4, instant, cGMT03); // 15:00:00 +0300 + insertTimestamp.setTimestamp(5, instant, cGMT03); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // -0500 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cGMT05); // 2005-01-01 07:00:00 -0500 + insertTimestamp.setTimestamp(3, instant, cGMT05); // 2005-01-01 07:00:00 + insertTimestamp.setTimestamp(4, instant, cGMT05); // 07:00:00 -0500 + insertTimestamp.setTimestamp(5, instant, cGMT05); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // +1300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTimestamp(2, instant, cGMT13); // 2005-01-02 01:00:00 +1300 + insertTimestamp.setTimestamp(3, instant, cGMT13); // 2005-01-02 01:00:00 + insertTimestamp.setTimestamp(4, instant, cGMT13); // 01:00:00 +1300 + insertTimestamp.setTimestamp(5, instant, cGMT13); // 2005-01-02 + insertTimestamp.executeUpdate(); + + insertTimestamp.close(); + + // check that insert went correctly by parsing the raw contents in UTC + checkDatabaseContents( + "SELECT seq::text,tstz::text,ts::text,tz::text,d::text from testtimezone ORDER BY seq", + new String[][]{ + new String[]{"1", "2005-01-01 12:00:00+00", "2005-01-01 13:00:00", "13:00:00+01", + "2005-01-01"}, + new String[]{"2", "2005-01-01 12:00:00+00", "2005-01-01 12:00:00", "12:00:00+00", + "2005-01-01"}, + new String[]{"3", "2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00+03", + "2005-01-01"}, + new String[]{"4", "2005-01-01 12:00:00+00", "2005-01-01 07:00:00", "07:00:00-05", + "2005-01-01"}, + new String[]{"5", "2005-01-01 12:00:00+00", "2005-01-02 01:00:00", "01:00:00+13", + "2005-01-02"}}); + + // + // check results + // + + seq = 1; + PreparedStatement ps = + con.prepareStatement("SELECT seq,tstz,ts,tz,d FROM testtimezone ORDER BY seq"); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instant, rs.getTimestamp(2)); + assertEquals(instant, rs.getTimestamp(3)); + assertEquals(instantTime, rs.getTimestamp(4)); + assertEquals(instantDateJVM, rs.getTimestamp(5)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instant, rs.getTimestamp(2, cUTC)); + assertEquals(instant, rs.getTimestamp(3, cUTC)); + assertEquals(instantTime, rs.getTimestamp(4, cUTC)); + assertEquals(instantDateUTC, rs.getTimestamp(5, cUTC)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instant, rs.getTimestamp(2, cGMT03)); + assertEquals(instant, rs.getTimestamp(3, cGMT03)); + assertEquals(instantTime, rs.getTimestamp(4, cGMT03)); + assertEquals(instantDateGMT03, rs.getTimestamp(5, cGMT03)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instant, rs.getTimestamp(2, cGMT05)); + assertEquals(instant, rs.getTimestamp(3, cGMT05)); + assertEquals(instantTime, rs.getTimestamp(4, cGMT05)); + assertEquals(instantDateGMT05, rs.getTimestamp(5, cGMT05)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(instant, rs.getTimestamp(2, cGMT13)); + assertEquals(instant, rs.getTimestamp(3, cGMT13)); + assertEquals(normalizeTimeOfDayPart(instantTime, cGMT13), rs.getTimestamp(4, cGMT13)); + assertEquals(instantDateGMT13, rs.getTimestamp(5, cGMT13)); + + assertFalse(rs.next()); + ps.close(); + } + } + + @Test + void setDate() throws Exception { + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + con.createStatement().execute("delete from testtimezone"); + PreparedStatement insertTimestamp = + con.prepareStatement("INSERT INTO testtimezone(seq,tstz,ts,d) VALUES (?,?,?,?)"); + + int seq = 1; + + Date dJVM; + Date dUTC; + Date dGMT03; + Date dGMT05; + Date dGMT13 = null; + + // +0100 (JVM default) + dJVM = new Date(1104534000000L); // 2005-01-01 00:00:00 +0100 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setDate(2, dJVM); // 2005-01-01 00:00:00 +0100 + insertTimestamp.setDate(3, dJVM); // 2005-01-01 00:00:00 + insertTimestamp.setDate(4, dJVM); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // UTC + dUTC = new Date(1104537600000L); // 2005-01-01 00:00:00 +0000 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setDate(2, dUTC, cUTC); // 2005-01-01 00:00:00 +0000 + insertTimestamp.setDate(3, dUTC, cUTC); // 2005-01-01 00:00:00 + insertTimestamp.setDate(4, dUTC, cUTC); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // +0300 + dGMT03 = new Date(1104526800000L); // 2005-01-01 00:00:00 +0300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setDate(2, dGMT03, cGMT03); // 2005-01-01 00:00:00 +0300 + insertTimestamp.setDate(3, dGMT03, cGMT03); // 2005-01-01 00:00:00 + insertTimestamp.setDate(4, dGMT03, cGMT03); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // -0500 + dGMT05 = new Date(1104555600000L); // 2005-01-01 00:00:00 -0500 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setDate(2, dGMT05, cGMT05); // 2005-01-01 00:00:00 -0500 + insertTimestamp.setDate(3, dGMT05, cGMT05); // 2005-01-01 00:00:00 + insertTimestamp.setDate(4, dGMT05, cGMT05); // 2005-01-01 + insertTimestamp.executeUpdate(); + + // +1300 + dGMT13 = new Date(1104490800000L); // 2005-01-01 00:00:00 +1300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setDate(2, dGMT13, cGMT13); // 2005-01-01 00:00:00 +1300 + insertTimestamp.setDate(3, dGMT13, cGMT13); // 2005-01-01 00:00:00 + insertTimestamp.setDate(4, dGMT13, cGMT13); // 2005-01-01 + insertTimestamp.executeUpdate(); + + insertTimestamp.close(); + + // check that insert went correctly by parsing the raw contents in UTC + checkDatabaseContents( + "SELECT seq::text,tstz::text,ts::text,d::text from testtimezone ORDER BY seq", + new String[][]{ + new String[]{"1", "2004-12-31 23:00:00+00", "2005-01-01 00:00:00", "2005-01-01"}, + new String[]{"2", "2005-01-01 00:00:00+00", "2005-01-01 00:00:00", "2005-01-01"}, + new String[]{"3", "2004-12-31 21:00:00+00", "2005-01-01 00:00:00", "2005-01-01"}, + new String[]{"4", "2005-01-01 05:00:00+00", "2005-01-01 00:00:00", "2005-01-01"}, + new String[]{"5", "2004-12-31 11:00:00+00", "2005-01-01 00:00:00", "2005-01-01"}}); + // + // check results + // + + seq = 1; + PreparedStatement ps = + con.prepareStatement("SELECT seq,tstz,ts,d FROM testtimezone ORDER BY seq"); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(dJVM, rs.getDate(2)); + assertEquals(dJVM, rs.getDate(3)); + assertEquals(dJVM, rs.getDate(4)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(dUTC, rs.getDate(2, cUTC)); + assertEquals(dUTC, rs.getDate(3, cUTC)); + assertEquals(dUTC, rs.getDate(4, cUTC)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(dGMT03, rs.getDate(2, cGMT03)); + assertEquals(dGMT03, rs.getDate(3, cGMT03)); + assertEquals(dGMT03, rs.getDate(4, cGMT03)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(dGMT05, rs.getDate(2, cGMT05)); + assertEquals(dGMT05, rs.getDate(3, cGMT05)); + assertEquals(dGMT05, rs.getDate(4, cGMT05)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(dGMT13, rs.getDate(2, cGMT13)); + assertEquals(dGMT13, rs.getDate(3, cGMT13)); + assertEquals(dGMT13, rs.getDate(4, cGMT13)); + + assertFalse(rs.next()); + ps.close(); + } + } + + @Test + void setTime() throws Exception { + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + con.createStatement().execute("delete from testtimezone"); + PreparedStatement insertTimestamp = + con.prepareStatement("INSERT INTO testtimezone(seq,t,tz) VALUES (?,?,?)"); + + int seq = 1; + + Time tJVM; + Time tUTC; + Time tGMT03; + Time tGMT05; + Time tGMT13; + + // +0100 (JVM default) + tJVM = new Time(50400000L); // 1970-01-01 15:00:00 +0100 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTime(2, tJVM); // 15:00:00 + insertTimestamp.setTime(3, tJVM); // 15:00:00+03 + insertTimestamp.executeUpdate(); + + // UTC + tUTC = new Time(54000000L); // 1970-01-01 15:00:00 +0000 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTime(2, tUTC, cUTC); // 15:00:00 + insertTimestamp.setTime(3, tUTC, cUTC); // 15:00:00+00 + insertTimestamp.executeUpdate(); + + // +0300 + tGMT03 = new Time(43200000L); // 1970-01-01 15:00:00 +0300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTime(2, tGMT03, cGMT03); // 15:00:00 + insertTimestamp.setTime(3, tGMT03, cGMT03); // 15:00:00+03 + insertTimestamp.executeUpdate(); + + // -0500 + tGMT05 = new Time(72000000L); // 1970-01-01 15:00:00 -0500 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTime(2, tGMT05, cGMT05); // 15:00:00 + insertTimestamp.setTime(3, tGMT05, cGMT05); // 15:00:00-05 + insertTimestamp.executeUpdate(); + + // +1300 + tGMT13 = new Time(7200000L); // 1970-01-01 15:00:00 +1300 + insertTimestamp.setInt(1, seq++); + insertTimestamp.setTime(2, tGMT13, cGMT13); // 15:00:00 + insertTimestamp.setTime(3, tGMT13, cGMT13); // 15:00:00+13 + insertTimestamp.executeUpdate(); + + insertTimestamp.close(); + + // check that insert went correctly by parsing the raw contents in UTC + checkDatabaseContents("SELECT seq::text,t::text,tz::text from testtimezone ORDER BY seq", + new String[][]{new String[]{"1", "15:00:00", "15:00:00+01",}, + new String[]{"2", "15:00:00", "15:00:00+00",}, + new String[]{"3", "15:00:00", "15:00:00+03",}, + new String[]{"4", "15:00:00", "15:00:00-05",}, + new String[]{"5", "15:00:00", "15:00:00+13",}}); + + // + // check results + // + + seq = 1; + PreparedStatement ps = con.prepareStatement("SELECT seq,t,tz FROM testtimezone ORDER BY seq"); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(tJVM, rs.getTime(2)); + assertEquals(tJVM, rs.getTime(3)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(tUTC, rs.getTime(2, cUTC)); + assertEquals(tUTC, rs.getTime(3, cUTC)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(tGMT03, rs.getTime(2, cGMT03)); + assertEquals(tGMT03, rs.getTime(3, cGMT03)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(tGMT05, rs.getTime(2, cGMT05)); + assertEquals(tGMT05, rs.getTime(3, cGMT05)); + + assertTrue(rs.next()); + assertEquals(seq++, rs.getInt(1)); + assertEquals(tGMT13, rs.getTime(2, cGMT13)); + assertEquals(tGMT13, rs.getTime(3, cGMT13)); + + assertFalse(rs.next()); + ps.close(); + } + } + + @Test + void halfHourTimezone() throws Exception { + Statement stmt = con.createStatement(); + stmt.execute("SET TimeZone = 'GMT+3:30'"); + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + PreparedStatement ps = con.prepareStatement("SELECT '1969-12-31 20:30:00'::timestamptz"); + ResultSet rs = ps.executeQuery(); + assertTrue(rs.next()); + assertEquals(0L, rs.getTimestamp(1).getTime()); + ps.close(); + } + } + + @Test + void timezoneWithSeconds() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("SET TimeZone = 'Europe/Paris'"); + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + PreparedStatement ps = con.prepareStatement("SELECT '1920-01-01'::timestamptz"); + ResultSet rs = ps.executeQuery(); + rs.next(); + // select extract(epoch from '1920-01-01'::timestamptz - 'epoch'::timestamptz) * 1000; + + assertEquals(-1577923200000L, rs.getTimestamp(1).getTime()); + ps.close(); + } + } + + @Test + void localTimestampsInNonDSTZones() throws Exception { + for (int i = -12; i <= 13; i++) { + localTimestamps(String.format("GMT%02d", i)); + } + } + + @Test + void localTimestampsInAfricaCasablanca() throws Exception { + localTimestamps("Africa/Casablanca"); // It is something like GMT+0..GMT+1 + } + + @Test + void localTimestampsInAtlanticAzores() throws Exception { + localTimestamps("Atlantic/Azores"); // It is something like GMT-1..GMT+0 + } + + @Test + void localTimestampsInEuropeMoscow() throws Exception { + localTimestamps("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s + } + + @Test + void localTimestampsInPacificApia() throws Exception { + localTimestamps("Pacific/Apia"); // It is something like GMT+13..GMT+14 + } + + @Test + void localTimestampsInPacificNiue() throws Exception { + localTimestamps("Pacific/Niue"); // It is something like GMT-11..GMT-11 + } + + @Test + void localTimestampsInAmericaAdak() throws Exception { + localTimestamps("America/Adak"); // It is something like GMT-10..GMT-9 + } + + private String setTimeTo00_00_00(String timestamp) { + return timestamp.substring(0, 11) + "00:00:00"; + } + + public void localTimestamps(String timeZone) throws Exception { + TimeZone.setDefault(TimeZone.getTimeZone(timeZone)); + + final String testDateFormat = "yyyy-MM-dd HH:mm:ss"; + final List datesToTest = Arrays.asList("2015-09-03 12:00:00", "2015-06-30 23:59:58", + "1997-06-30 23:59:59", "1997-07-01 00:00:00", "2012-06-30 23:59:59", "2012-07-01 00:00:00", + "2015-06-30 23:59:59", "2015-07-01 00:00:00", "2005-12-31 23:59:59", "2006-01-01 00:00:00", + "2008-12-31 23:59:59", "2009-01-01 00:00:00", "2015-06-30 23:59:60", "2015-07-31 00:00:00", + "2015-07-31 00:00:01", + + // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00 + "2000-03-26 01:59:59", "2000-03-26 02:00:00", "2000-03-26 02:00:01", "2000-03-26 02:59:59", + "2000-03-26 03:00:00", "2000-03-26 03:00:01", "2000-03-26 03:59:59", "2000-03-26 04:00:00", + "2000-03-26 04:00:01", + + // This is a pre-1970 date, so check if it is rounded properly + "1950-07-20 02:00:00", + + // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00 + "2000-10-29 01:59:59", "2000-10-29 02:00:00", "2000-10-29 02:00:01", "2000-10-29 02:59:59", + "2000-10-29 03:00:00", "2000-10-29 03:00:01", "2000-10-29 03:59:59", "2000-10-29 04:00:00", + "2000-10-29 04:00:01"); + + con.createStatement().execute("delete from testtimezone"); + Statement stmt = con.createStatement(); + + for (int i = 0; i < datesToTest.size(); i++) { + stmt.execute( + "insert into testtimezone (ts, d, seq) values (" + + "'" + datesToTest.get(i) + "'" + + ", '" + setTimeTo00_00_00(datesToTest.get(i)) + "'" + + ", " + i + ")"); + } + + // Different timezone test should have different sql text, so we test both text and binary modes + PreparedStatement pstmt = + con.prepareStatement("SELECT ts, d FROM testtimezone order by seq /*" + timeZone + "*/"); + + Calendar expectedTimestamp = Calendar.getInstance(); + + SimpleDateFormat sdf = new SimpleDateFormat(testDateFormat); + + for (int i = 0; i < PREPARE_THRESHOLD; i++) { + ResultSet rs = pstmt.executeQuery(); + for (int j = 0; rs.next(); j++) { + String testDate = datesToTest.get(j); + Date getDate = rs.getDate(1); + Date getDateFromDateColumn = rs.getDate(2); + Timestamp getTimestamp = rs.getTimestamp(1); + String getString = rs.getString(1); + Time getTime = rs.getTime(1); + expectedTimestamp.setTime(sdf.parse(testDate)); + + assertEquals( + sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getTimestamp), "getTimestamp: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary") + + ", timeZone: " + timeZone); + + assertEquals( + sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(sdf.parse(getString)), "getString: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary") + + ", timeZone: " + timeZone); + + expectedTimestamp.set(Calendar.HOUR_OF_DAY, 0); + expectedTimestamp.set(Calendar.MINUTE, 0); + expectedTimestamp.set(Calendar.SECOND, 0); + + assertEquals( + sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getDate), "TIMESTAMP -> getDate: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary") + + ", timeZone: " + timeZone); + + String expectedDateFromDateColumn = setTimeTo00_00_00(testDate); + if ("Atlantic/Azores".equals(timeZone) && testDate.startsWith("2000-03-26")) { + // Atlantic/Azores does not have 2000-03-26 00:00:00 + // They go right to 2000-03-26 01:00:00 due to DST. + // Vladimir Sitnikov: I have no idea how do they represent 2000-03-26 00:00:00 :( + // So the assumption is 2000-03-26 01:00:00 is the expected for that time zone + expectedDateFromDateColumn = "2000-03-26 01:00:00"; + } + + assertEquals( + expectedDateFromDateColumn, sdf.format(getDateFromDateColumn), "DATE -> getDate: " + expectedDateFromDateColumn + ", transfer format: " + (i == 0 ? "text" : "binary") + + ", timeZone: " + timeZone); + + expectedTimestamp.setTime(sdf.parse(testDate)); + expectedTimestamp.set(Calendar.YEAR, 1970); + expectedTimestamp.set(Calendar.MONTH, 0); + expectedTimestamp.set(Calendar.DAY_OF_MONTH, 1); + + assertEquals( + sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getTime), "getTime: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary") + + ", timeZone: " + timeZone); + + } + rs.close(); + } + } + + /** + * Does a query in UTC time zone to database to check that the inserted values are correct. + * + * @param query The query to run. + * @param correct The correct answers in UTC time zone as formatted by backend. + */ + private void checkDatabaseContents(String query, String[] correct) throws Exception { + checkDatabaseContents(query, new String[][]{correct}); + } + + private void checkDatabaseContents(String query, String[][] correct) throws Exception { + Connection con2 = TestUtil.openDB(); + Statement s = con2.createStatement(); + assertFalse(s.execute("set time zone 'UTC'")); + assertTrue(s.execute(query)); + ResultSet rs = s.getResultSet(); + for (int j = 0; j < correct.length; j++) { + assertTrue(rs.next()); + for (int i = 0; i < correct[j].length; i++) { + assertEquals(correct[j][i], rs.getString(i + 1), "On row " + (j + 1)); + } + } + assertFalse(rs.next()); + rs.close(); + s.close(); + con2.close(); + } + + /** + * Converts the given time. + * + * @param t The time of day. Must be within -24 and + 24 hours of epoc. + * @param tz The timezone to normalize to. + * @return the Time normalized to 0 to 24 hours of epoc adjusted with given timezone. + */ + private Timestamp normalizeTimeOfDayPart(Timestamp t, Calendar tz) { + return new Timestamp(normalizeTimeOfDayPart(t.getTime(), tz.getTimeZone())); + } + + private long normalizeTimeOfDayPart(long t, TimeZone tz) { + long millis = t; + long low = -tz.getOffset(millis); + long high = low + DAY; + if (millis < low) { + do { + millis += DAY; + } while (millis < low); + } else if (millis >= high) { + do { + millis -= DAY; + } while (millis > high); + } + return millis; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TypeCacheDLLStressTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TypeCacheDLLStressTest.java new file mode 100644 index 0000000..1402f8a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TypeCacheDLLStressTest.java @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import org.postgresql.test.TestUtil; + +import org.junit.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class TypeCacheDLLStressTest extends BaseTest4 { + private static final int DURATION = Integer.getInteger("TypeCacheDLLStressTest.DURATION", 5); + + private Connection con2; + + @Override + protected void updateProperties(Properties props) { + try { + con2 = TestUtil.openDB(props); + } catch (Exception e) { + throw new IllegalStateException("Unable to open second DB connection", e); + } + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "create_and_drop_table", "user_id serial PRIMARY KEY"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.closeDB(con2); + } + + @Test + public void createDropTableAndGetTypeInfo() throws Throwable { + ExecutorService executor = Executors.newFixedThreadPool(2); + + Future typeInfoCache = executor.submit(new Callable() { + public Void call() throws Exception { + while (!Thread.currentThread().isInterrupted()) { + ResultSet rs = con.getMetaData().getTypeInfo(); + rs.close(); + } + return null; + } + }); + + Future createAndDrop = executor.submit(new Callable() { + public Void call() throws Exception { + Statement stmt = con2.createStatement(); + + while (!Thread.currentThread().isInterrupted()) { + stmt.execute("drop TABLE create_and_drop_table"); + stmt.execute("CREATE TABLE create_and_drop_table" + + "( user_id serial PRIMARY KEY, username VARCHAR (50) UNIQUE NOT NULL" + + ", password VARCHAR (50) NOT NULL, email VARCHAR (355) UNIQUE NOT NULL" + + ", created_on TIMESTAMP NOT NULL, last_login TIMESTAMP)"); + } + return null; + } + }); + + try { + typeInfoCache.get(DURATION, TimeUnit.SECONDS); + } catch (ExecutionException e) { + createAndDrop.cancel(true); + throw e.getCause(); + } catch (TimeoutException e) { + // Test is expected to run as long as it can + } + + typeInfoCache.cancel(true); + createAndDrop.cancel(true); + + try { + createAndDrop.get(DURATION, TimeUnit.SECONDS); + } catch (ExecutionException e) { + throw e.getCause(); + } catch (TimeoutException e) { + // Test is expected to run as long as it can + } catch (CancellationException e) { + // Ignore + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpdateableResultTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpdateableResultTest.java new file mode 100644 index 0000000..c878b24 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpdateableResultTest.java @@ -0,0 +1,903 @@ +/* + * Copyright (c) 2001, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGConnection; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.StringReader; +import java.io.UnsupportedEncodingException; +import java.sql.Array; +import java.sql.Connection; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.TimeZone; + +public class UpdateableResultTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "updateable", + "id int primary key, name text, notselected text, ts timestamp with time zone, intarr int[]"); + TestUtil.createTable(con, "hasdate", "id int primary key, dt date unique, name text"); + TestUtil.createTable(con, "unique_null_constraint", "u1 int unique, name1 text"); + TestUtil.createTable(con, "uniquekeys", "id int unique not null, id2 int unique, dt date"); + TestUtil.createTable(con, "partialunique", "subject text, target text, success boolean"); + TestUtil.execute(con, "CREATE UNIQUE INDEX tests_success_constraint ON partialunique (subject, target) WHERE success"); + TestUtil.createTable(con, "second", "id1 int primary key, name1 text"); + TestUtil.createTable(con, "primaryunique", "id int primary key, name text unique not null, dt date"); + TestUtil.createTable(con, "serialtable", "gen_id serial primary key, name text"); + TestUtil.createTable(con, "compositepktable", "gen_id serial, name text, dec_id serial"); + TestUtil.execute(con, "alter sequence compositepktable_dec_id_seq increment by 10; alter sequence compositepktable_dec_id_seq restart with 10"); + TestUtil.execute(con, "alter table compositepktable add primary key ( gen_id, dec_id )"); + TestUtil.createTable(con, "stream", "id int primary key, asi text, chr text, bin bytea"); + TestUtil.createTable(con, "multicol", "id1 int not null, id2 int not null, val text"); + TestUtil.createTable(con, "nopkmulticol", "id1 int not null, id2 int not null, val text"); + TestUtil.createTable(con, "booltable", "id int not null primary key, b boolean default false"); + TestUtil.execute(con, "insert into booltable (id) values (1)"); + TestUtil.execute(con, "insert into uniquekeys(id, id2, dt) values (1, 2, now())"); + + Statement st2 = con.createStatement(); + // create pk for multicol table + st2.execute("ALTER TABLE multicol ADD CONSTRAINT multicol_pk PRIMARY KEY (id1, id2)"); + // put some dummy data into second + st2.execute("insert into second values (1,'anyvalue' )"); + st2.close(); + TestUtil.execute(con, "insert into unique_null_constraint values (1, 'dave')"); + TestUtil.execute(con, "insert into unique_null_constraint values (null, 'unknown')"); + TestUtil.execute(con, "insert into primaryunique values (1, 'dave', now())"); + + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "updateable"); + TestUtil.dropTable(con, "second"); + TestUtil.dropTable(con, "serialtable"); + TestUtil.dropTable(con, "compositepktable"); + TestUtil.dropTable(con, "stream"); + TestUtil.dropTable(con, "nopkmulticol"); + TestUtil.dropTable(con, "booltable"); + TestUtil.dropTable(con, "unique_null_constraint"); + TestUtil.dropTable(con, "hasdate"); + TestUtil.dropTable(con, "uniquekeys"); + TestUtil.dropTable(con, "partialunique"); + TestUtil.dropTable(con, "primaryunique"); + super.tearDown(); + } + + @Test + public void testDeleteRows() throws SQLException { + Statement st = con.createStatement(); + st.executeUpdate("INSERT INTO second values (2,'two')"); + st.executeUpdate("INSERT INTO second values (3,'three')"); + st.executeUpdate("INSERT INTO second values (4,'four')"); + st.close(); + + st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select id1,name1 from second order by id1"); + + assertTrue(rs.next()); + assertEquals(1, rs.getInt("id1")); + rs.deleteRow(); + assertTrue(rs.isBeforeFirst()); + + assertTrue(rs.next()); + assertTrue(rs.next()); + assertEquals(3, rs.getInt("id1")); + rs.deleteRow(); + assertEquals(2, rs.getInt("id1")); + + rs.close(); + st.close(); + } + + @Test + public void testCancelRowUpdates() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select * from second"); + + // make sure we're dealing with the correct row. + rs.first(); + assertEquals(1, rs.getInt(1)); + assertEquals("anyvalue", rs.getString(2)); + + // update, cancel and make sure nothings changed. + rs.updateInt(1, 99); + rs.cancelRowUpdates(); + assertEquals(1, rs.getInt(1)); + assertEquals("anyvalue", rs.getString(2)); + + // real update + rs.updateInt(1, 999); + rs.updateRow(); + assertEquals(999, rs.getInt(1)); + assertEquals("anyvalue", rs.getString(2)); + + // scroll some and make sure the update is still there + rs.beforeFirst(); + rs.next(); + assertEquals(999, rs.getInt(1)); + assertEquals("anyvalue", rs.getString(2)); + + // make sure the update got to the db and the driver isn't lying to us. + rs.close(); + rs = st.executeQuery("select * from second"); + rs.first(); + assertEquals(999, rs.getInt(1)); + assertEquals("anyvalue", rs.getString(2)); + + rs.close(); + st.close(); + } + + private void checkPositioning(ResultSet rs) throws SQLException { + try { + rs.getInt(1); + fail("Can't use an incorrectly positioned result set."); + } catch (SQLException sqle) { + } + + try { + rs.updateInt(1, 2); + fail("Can't use an incorrectly positioned result set."); + } catch (SQLException sqle) { + } + + try { + rs.updateRow(); + fail("Can't use an incorrectly positioned result set."); + } catch (SQLException sqle) { + } + + try { + rs.deleteRow(); + fail("Can't use an incorrectly positioned result set."); + } catch (SQLException sqle) { + } + } + + @Test + public void testPositioning() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = stmt.executeQuery("SELECT id1,name1 FROM second"); + + checkPositioning(rs); + + assertTrue(rs.next()); + rs.beforeFirst(); + checkPositioning(rs); + + rs.afterLast(); + checkPositioning(rs); + + rs.beforeFirst(); + assertTrue(rs.next()); + assertTrue(!rs.next()); + checkPositioning(rs); + + rs.afterLast(); + assertTrue(rs.previous()); + assertTrue(!rs.previous()); + checkPositioning(rs); + + rs.close(); + stmt.close(); + } + + @Test + public void testReturnSerial() throws Exception { + final String ole = "Ole"; + + Statement st = null; + ResultSet rs = null; + try { + st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + rs = st.executeQuery("SELECT * FROM serialtable"); + + rs.moveToInsertRow(); + rs.updateString("name", ole); + rs.insertRow(); + + assertTrue(rs.first()); + assertEquals(1, rs.getInt("gen_id")); + assertEquals(ole, rs.getString("name")); + + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(st); + } + + final String ole2 = "OleOle"; + try { + st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + rs = st.executeQuery("SELECT name, gen_id FROM serialtable"); + + rs.moveToInsertRow(); + rs.updateString("name", ole2); + rs.insertRow(); + + assertTrue(rs.first()); + assertEquals(1, rs.getInt("gen_id")); + assertEquals(ole, rs.getString("name")); + + assertTrue(rs.last()); + assertEquals(2, rs.getInt("gen_id")); + assertEquals(ole2, rs.getString("name")); + + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(st); + } + + final String dec = "Dec"; + try { + st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + rs = st.executeQuery("SELECT * FROM compositepktable"); + + rs.moveToInsertRow(); + rs.updateString("name", dec); + rs.insertRow(); + + assertTrue(rs.first()); + assertEquals(1, rs.getInt("gen_id")); + assertEquals(dec, rs.getString("name")); + assertEquals(10, rs.getInt("dec_id")); + + rs.moveToInsertRow(); + rs.updateString("name", dec); + rs.insertRow(); + + assertTrue(rs.last()); + assertEquals(2, rs.getInt("gen_id")); + assertEquals(dec, rs.getString("name")); + assertEquals(20, rs.getInt("dec_id")); + + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(st); + } + } + + @Test + public void testUpdateTimestamp() throws SQLException { + TimeZone origTZ = TimeZone.getDefault(); + try { + // We choose a timezone which has a partial hour portion + // Asia/Tehran is +3:30 + TimeZone.setDefault(TimeZone.getTimeZone("Asia/Tehran")); + Timestamp ts = Timestamp.valueOf("2006-11-20 16:17:18"); + + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = stmt.executeQuery("SELECT id, ts FROM updateable"); + rs.moveToInsertRow(); + rs.updateInt(1, 1); + rs.updateTimestamp(2, ts); + rs.insertRow(); + rs.first(); + assertEquals(ts, rs.getTimestamp(2)); + } finally { + TimeZone.setDefault(origTZ); + } + } + + @Test + public void testUpdateStreams() throws SQLException, UnsupportedEncodingException { + assumeByteaSupported(); + String string = "Hello"; + byte[] bytes = new byte[]{0, '\\', (byte) 128, (byte) 255}; + + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = stmt.executeQuery("SELECT id, asi, chr, bin FROM stream"); + + rs.moveToInsertRow(); + rs.updateInt(1, 1); + rs.updateAsciiStream("asi", null, 17); + rs.updateCharacterStream("chr", null, 81); + rs.updateBinaryStream("bin", null, 0); + rs.insertRow(); + + rs.moveToInsertRow(); + rs.updateInt(1, 3); + rs.updateAsciiStream("asi", new ByteArrayInputStream(string.getBytes("US-ASCII")), 5); + rs.updateCharacterStream("chr", new StringReader(string), 5); + rs.updateBinaryStream("bin", new ByteArrayInputStream(bytes), bytes.length); + rs.insertRow(); + + rs.beforeFirst(); + rs.next(); + + assertEquals(1, rs.getInt(1)); + assertNull(rs.getString(2)); + assertNull(rs.getString(3)); + assertNull(rs.getBytes(4)); + + rs.updateInt("id", 2); + rs.updateAsciiStream("asi", new ByteArrayInputStream(string.getBytes("US-ASCII")), 5); + rs.updateCharacterStream("chr", new StringReader(string), 5); + rs.updateBinaryStream("bin", new ByteArrayInputStream(bytes), bytes.length); + rs.updateRow(); + + assertEquals(2, rs.getInt(1)); + assertEquals(string, rs.getString(2)); + assertEquals(string, rs.getString(3)); + assertArrayEquals(bytes, rs.getBytes(4)); + + rs.refreshRow(); + + assertEquals(2, rs.getInt(1)); + assertEquals(string, rs.getString(2)); + assertEquals(string, rs.getString(3)); + assertArrayEquals(bytes, rs.getBytes(4)); + + rs.next(); + + assertEquals(3, rs.getInt(1)); + assertEquals(string, rs.getString(2)); + assertEquals(string, rs.getString(3)); + assertArrayEquals(bytes, rs.getBytes(4)); + + rs.close(); + stmt.close(); + } + + @Test + public void testZeroRowResult() throws SQLException { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select * from updateable WHERE 0 > 1"); + assertTrue(!rs.next()); + rs.moveToInsertRow(); + rs.moveToCurrentRow(); + } + + @Test + public void testUpdateable() throws SQLException { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select * from updateable"); + assertNotNull(rs); + rs.moveToInsertRow(); + rs.updateInt(1, 1); + rs.updateString(2, "jake"); + rs.updateString(3, "avalue"); + rs.insertRow(); + rs.first(); + + rs.updateInt("id", 2); + rs.updateString("name", "dave"); + rs.updateRow(); + + assertEquals(2, rs.getInt("id")); + assertEquals("dave", rs.getString("name")); + assertEquals("avalue", rs.getString("notselected")); + + rs.deleteRow(); + rs.moveToInsertRow(); + rs.updateInt("id", 3); + rs.updateString("name", "paul"); + + rs.insertRow(); + + try { + rs.refreshRow(); + fail("Can't refresh when on the insert row."); + } catch (SQLException sqle) { + } + + assertEquals(3, rs.getInt("id")); + assertEquals("paul", rs.getString("name")); + assertNull(rs.getString("notselected")); + + rs.close(); + + rs = st.executeQuery("select id1, id, name, name1 from updateable, second"); + try { + while (rs.next()) { + rs.updateInt("id", 2); + rs.updateString("name", "dave"); + rs.updateRow(); + } + fail("should not get here, update should fail"); + } catch (SQLException ex) { + } + + rs = st.executeQuery("select * from updateable"); + assertTrue(rs.first()); + rs.updateInt("id", 3); + rs.updateString("name", "dave3"); + rs.updateRow(); + assertEquals(3, rs.getInt("id")); + assertEquals("dave3", rs.getString("name")); + + rs.moveToInsertRow(); + rs.updateInt("id", 4); + rs.updateString("name", "dave4"); + + rs.insertRow(); + rs.updateInt("id", 5); + rs.updateString("name", "dave5"); + rs.insertRow(); + + rs.moveToCurrentRow(); + assertEquals(3, rs.getInt("id")); + assertEquals("dave3", rs.getString("name")); + + assertTrue(rs.next()); + assertEquals(4, rs.getInt("id")); + assertEquals("dave4", rs.getString("name")); + + assertTrue(rs.next()); + assertEquals(5, rs.getInt("id")); + assertEquals("dave5", rs.getString("name")); + + rs.close(); + st.close(); + } + + @Test + public void testUpdateDate() throws Exception { + Date testDate = Date.valueOf("2021-01-01"); + TestUtil.execute(con, "insert into hasdate values (1,'2021-01-01'::date)"); + con.setAutoCommit(false); + String sql = "SELECT * FROM hasdate where id=1"; + ResultSet rs = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE).executeQuery(sql); + assertTrue(rs.next()); + assertEquals(testDate, rs.getDate("dt")); + rs.updateDate("dt", Date.valueOf("2020-01-01")); + rs.updateRow(); + assertEquals(Date.valueOf("2020-01-01"), rs.getDate("dt")); + con.commit(); + rs = con.createStatement().executeQuery("select dt from hasdate where id=1"); + assertTrue(rs.next()); + assertEquals(Date.valueOf("2020-01-01"), rs.getDate("dt")); + rs.close(); + } + + @Test + public void test2193() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select * from updateable"); + assertNotNull(rs); + rs.moveToInsertRow(); + rs.updateInt(1, 1); + rs.updateString(2, "jake"); + rs.updateString(3, "avalue"); + rs.insertRow(); + rs.first(); + + rs.updateString(2, "bob"); + rs.updateRow(); + rs.refreshRow(); + rs.updateString(2, "jake"); + rs.updateRow(); + } + + @Test + public void testInsertRowIllegalMethods() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select * from updateable"); + assertNotNull(rs); + rs.moveToInsertRow(); + + try { + rs.cancelRowUpdates(); + fail("expected an exception when calling cancelRowUpdates() on the insert row"); + } catch (SQLException e) { + } + + try { + rs.updateRow(); + fail("expected an exception when calling updateRow() on the insert row"); + } catch (SQLException e) { + } + + try { + rs.deleteRow(); + fail("expected an exception when calling deleteRow() on the insert row"); + } catch (SQLException e) { + } + + try { + rs.refreshRow(); + fail("expected an exception when calling refreshRow() on the insert row"); + } catch (SQLException e) { + } + + rs.close(); + st.close(); + } + + @Test + public void testUpdateablePreparedStatement() throws Exception { + // No args. + PreparedStatement st = con.prepareStatement("select * from updateable", + ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery(); + rs.moveToInsertRow(); + rs.close(); + st.close(); + + // With args. + st = con.prepareStatement("select * from updateable where id = ?", + ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + st.setInt(1, 1); + rs = st.executeQuery(); + rs.moveToInsertRow(); + rs.close(); + st.close(); + } + + @Test + public void testUpdateSelectOnly() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + + ResultSet rs = st.executeQuery("select * from only second"); + assertTrue(rs.next()); + rs.updateInt(1, 2); + rs.updateRow(); + } + + @Test + public void testUpdateReadOnlyResultSet() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); + ResultSet rs = st.executeQuery("select * from updateable"); + try { + rs.moveToInsertRow(); + fail("expected an exception when calling moveToInsertRow() on a read-only resultset"); + } catch (SQLException e) { + } + } + + @Test + public void testBadColumnIndexes() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select * from updateable"); + rs.moveToInsertRow(); + try { + rs.updateInt(0, 1); + fail("Should have thrown an exception on bad column index."); + } catch (SQLException sqle) { + } + try { + rs.updateString(1000, "hi"); + fail("Should have thrown an exception on bad column index."); + } catch (SQLException sqle) { + } + try { + rs.updateNull(1000); + fail("Should have thrown an exception on bad column index."); + } catch (SQLException sqle) { + } + } + + @Test + public void testArray() throws SQLException { + Statement stmt = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + stmt.executeUpdate("INSERT INTO updateable (id, intarr) VALUES (1, '{1,2,3}'::int4[])"); + ResultSet rs = stmt.executeQuery("SELECT id, intarr FROM updateable"); + assertTrue(rs.next()); + rs.updateObject(2, rs.getArray(2)); + rs.updateRow(); + + Array arr = rs.getArray(2); + assertEquals(Types.INTEGER, arr.getBaseType()); + Integer[] intarr = (Integer[]) arr.getArray(); + assertEquals(3, intarr.length); + assertEquals(1, intarr[0].intValue()); + assertEquals(2, intarr[1].intValue()); + assertEquals(3, intarr[2].intValue()); + rs.close(); + + rs = stmt.executeQuery("SELECT id,intarr FROM updateable"); + assertTrue(rs.next()); + arr = rs.getArray(2); + assertEquals(Types.INTEGER, arr.getBaseType()); + intarr = (Integer[]) arr.getArray(); + assertEquals(3, intarr.length); + assertEquals(1, intarr[0].intValue()); + assertEquals(2, intarr[1].intValue()); + assertEquals(3, intarr[2].intValue()); + + rs.close(); + stmt.close(); + } + + @Test + public void testMultiColumnUpdateWithoutAllColumns() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select id1,val from multicol"); + try { + rs.moveToInsertRow(); + fail("Move to insert row succeeded. It should not"); + } catch (SQLException sqle) { + // Ensure we're reporting that the RS is not updatable. + assertEquals("24000", sqle.getSQLState()); + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(st); + } + } + + @Test + public void testMultiColumnUpdateWithoutPrimaryKey() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("select * from nopkmulticol"); + try { + rs.moveToInsertRow(); + fail("Move to insert row succeeded. It should not"); + } catch (SQLException sqle) { + // Ensure we're reporting that the RS is not updatable. + assertEquals("24000", sqle.getSQLState()); + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(st); + } + } + + @Test + public void testMultiColumnUpdate() throws Exception { + Statement st = + con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE); + st.executeUpdate("INSERT INTO multicol (id1,id2,val) VALUES (1,2,'val')"); + + ResultSet rs = st.executeQuery("SELECT id1, id2, val FROM multicol"); + assertTrue(rs.next()); + assertEquals("val", rs.getString("val")); + rs.updateString("val", "newval"); + rs.updateRow(); + rs.close(); + + rs = st.executeQuery("SELECT id1, id2, val FROM multicol"); + assertTrue(rs.next()); + assertEquals("newval", rs.getString("val")); + rs.close(); + st.close(); + } + + @Test + public void simpleAndUpdateableSameQuery() throws Exception { + PGConnection unwrap = con.unwrap(PGConnection.class); + Assume.assumeNotNull(unwrap); + int prepareThreshold = unwrap.getPrepareThreshold(); + String sql = "select * from second where id1=?"; + for (int i = 0; i <= prepareThreshold; i++) { + PreparedStatement ps = null; + ResultSet rs = null; + try { + ps = con.prepareStatement(sql); + ps.setInt(1, 1); + rs = ps.executeQuery(); + rs.next(); + String name1 = rs.getString("name1"); + Assert.assertEquals("anyvalue", name1); + int id1 = rs.getInt("id1"); + Assert.assertEquals(1, id1); + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(ps); + } + } + // The same SQL, and use updateable ResultSet + { + PreparedStatement ps = null; + ResultSet rs = null; + try { + ps = con.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + ps.setInt(1, 1); + rs = ps.executeQuery(); + rs.next(); + String name1 = rs.getString("name1"); + Assert.assertEquals("anyvalue", name1); + int id1 = rs.getInt("id1"); + Assert.assertEquals(1, id1); + rs.updateString("name1", "updatedValue"); + rs.updateRow(); + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(ps); + } + } + } + + @Test + public void testUpdateBoolean() throws Exception { + + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT * FROM booltable WHERE id=1"); + assertTrue(rs.next()); + assertFalse(rs.getBoolean("b")); + rs.updateBoolean("b", true); + rs.updateRow(); + //rs.refreshRow(); //fetches the value stored + assertTrue(rs.getBoolean("b")); + } + + @Test + public void testOidUpdatable() throws Exception { + Connection privilegedCon = TestUtil.openPrivilegedDB(); + try { + Statement st = privilegedCon.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT oid,* FROM pg_class WHERE relname = 'pg_class'"); + assertTrue(rs.next()); + assertTrue(rs.first()); + rs.updateString("relname", "pg_class"); + rs.updateRow(); + rs.close(); + st.close(); + } finally { + privilegedCon.close(); + } + } + + @Test + public void testUniqueWithNullableColumnsNotUpdatable() throws Exception { + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT u1, name1 from unique_null_constraint"); + assertTrue(rs.next()); + assertTrue(rs.first()); + try { + rs.updateString("name1", "bob"); + fail("Should have failed since unique column u1 is nullable"); + } catch (SQLException ex) { + assertEquals("No eligible primary or unique key found for table unique_null_constraint.", + ex.getMessage()); + } + rs.close(); + st.close(); + } + + @Test + public void testPrimaryAndUniqueUpdateableByPrimary() throws Exception { + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT id, dt from primaryunique"); + assertTrue(rs.next()); + assertTrue(rs.first()); + int id = rs.getInt("id"); + rs.updateDate("dt", Date.valueOf("1999-01-01")); + rs.updateRow(); + assertFalse(rs.next()); + rs.close(); + rs = st.executeQuery("select dt from primaryunique where id = " + id); + assertTrue(rs.next()); + assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt")); + rs.close(); + st.close(); + } + + @Test + public void testPrimaryAndUniqueUpdateableByUnique() throws Exception { + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT name, dt from primaryunique"); + assertTrue(rs.next()); + assertTrue(rs.first()); + String name = rs.getString("name"); + rs.updateDate("dt", Date.valueOf("1999-01-01")); + rs.updateRow(); + assertFalse(rs.next()); + rs.close(); + rs = st.executeQuery("select dt from primaryunique where name = '" + name + "'"); + assertTrue(rs.next()); + assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt")); + rs.close(); + st.close(); + } + + @Test + public void testUniqueWithNullAndNotNullableColumnUpdateable() throws Exception { + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + int id = 0; + int id2 = 0; + ResultSet rs = st.executeQuery("SELECT id, id2, dt from uniquekeys"); + assertTrue(rs.next()); + assertTrue(rs.first()); + id = rs.getInt("id"); + id2 = rs.getInt("id2"); + rs.updateDate("dt", Date.valueOf("1999-01-01")); + rs.updateRow(); + rs.close(); + rs = st.executeQuery("select dt from uniquekeys where id = " + id + " and id2 = " + id2); + assertNotNull(rs); + assertTrue(rs.next()); + assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt")); + rs.close(); + st.close(); + } + + @Test + public void testUniqueWithNotNullableColumnUpdateable() throws Exception { + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + int id = 0; + ResultSet rs = st.executeQuery("SELECT id, dt from uniquekeys"); + assertTrue(rs.next()); + assertTrue(rs.first()); + id = rs.getInt("id"); + rs.updateDate("dt", Date.valueOf("1999-01-01")); + rs.updateRow(); + rs.close(); + rs = st.executeQuery("select id, dt from uniquekeys where id = " + id); + assertNotNull(rs); + assertTrue(rs.next()); + assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt")); + rs.close(); + st.close(); + } + + @Test + public void testUniqueWithNullableColumnNotUpdateable() throws Exception { + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT id2, dt from uniquekeys"); + assertTrue(rs.next()); + assertTrue(rs.first()); + try { + rs.updateDate("dt", Date.valueOf("1999-01-01")); + fail("Should have failed since id2 is nullable column"); + } catch (SQLException ex) { + assertEquals("No eligible primary or unique key found for table uniquekeys.", + ex.getMessage()); + } + rs.close(); + st.close(); + } + + @Test + public void testNoUniqueNotUpdateable() throws SQLException { + Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.CONCUR_UPDATABLE); + ResultSet rs = st.executeQuery("SELECT dt from uniquekeys"); + assertTrue(rs.next()); + assertTrue(rs.first()); + try { + rs.updateDate("dt", Date.valueOf("1999-01-01")); + fail("Should have failed since no UK/PK are in the select statement"); + } catch (SQLException ex) { + assertEquals("No eligible primary or unique key found for table uniquekeys.", + ex.getMessage()); + } + rs.close(); + st.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpsertTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpsertTest.java new file mode 100644 index 0000000..6e713d4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpsertTest.java @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; + +/** + * Tests {@code INSERT .. ON CONFLICT} introduced in PostgreSQL 9.5. + */ +@RunWith(Parameterized.class) +public class UpsertTest extends BaseTest4 { + public UpsertTest(BinaryMode binaryMode, ReWriteBatchedInserts rewrite) { + setBinaryMode(binaryMode); + setReWriteBatchedInserts(rewrite); + } + + @Parameterized.Parameters(name = "binary = {0}, reWriteBatchedInserts = {1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + for (ReWriteBatchedInserts rewrite : ReWriteBatchedInserts.values()) { + ids.add(new Object[]{binaryMode, rewrite}); + } + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeMinimumServerVersion(ServerVersion.v9_5); + + TestUtil.createTempTable(con, "test_statement", "i int primary key, t varchar(5)"); + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO test_statement(i, t) VALUES (42, '42')"); + TestUtil.closeQuietly(stmt); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "test_statement"); + super.tearDown(); + } + + protected int executeUpdate(String sql) throws SQLException { + PreparedStatement ps = con.prepareStatement(sql); + int count = ps.executeUpdate(); + ps.close(); + return count; + } + + @Test + public void testUpsertDoNothingConflict() throws SQLException { + int count = executeUpdate( + "INSERT INTO test_statement(i, t) VALUES (42, '42') ON CONFLICT DO NOTHING"); + assertEquals("insert on CONFLICT DO NOTHING should report 0 modified rows on CONFLICT", + 0, count); + } + + @Test + public void testUpsertDoNothingNoConflict() throws SQLException { + int count = executeUpdate( + "INSERT INTO test_statement(i, t) VALUES (43, '43') ON CONFLICT DO NOTHING"); + assertEquals("insert on conflict DO NOTHING should report 1 modified row on plain insert", + 1, count); + } + + @Test + public void testUpsertDoUpdateConflict() throws SQLException { + int count = executeUpdate( + "INSERT INTO test_statement(i, t) VALUES (42, '42') ON CONFLICT(i) DO UPDATE SET t='43'"); + assertEquals("insert ON CONFLICT DO UPDATE should report 1 modified row on CONFLICT", + 1, count); + } + + @Test + public void testUpsertDoUpdateNoConflict() throws SQLException { + int count = executeUpdate( + "INSERT INTO test_statement(i, t) VALUES (43, '43') ON CONFLICT(i) DO UPDATE SET t='43'"); + assertEquals("insert on conflict do update should report 1 modified row on plain insert", + 1, count); + } + + @Test + public void testSingleValuedUpsertBatch() throws SQLException { + PreparedStatement ps = null; + try { + ps = con.prepareStatement( + "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO NOTHING"); + ps.setInt(1, 50); + ps.setString(2, "50"); + ps.addBatch(); + ps.setInt(1, 53); + ps.setString(2, "53"); + ps.addBatch(); + int[] actual = ps.executeBatch(); + BatchExecuteTest.assertSimpleInsertBatch(2, actual); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testMultiValuedUpsertBatch() throws SQLException { + PreparedStatement ps = null; + try { + ps = con.prepareStatement( + "insert into test_statement(i, t) values (?,?),(?,?) ON CONFLICT (i) DO NOTHING"); + ps.setInt(1, 50); + ps.setString(2, "50"); + ps.setInt(3, 51); + ps.setString(4, "51"); + ps.addBatch(); + ps.setInt(1, 52); + ps.setString(2, "52"); + ps.setInt(3, 53); + ps.setString(4, "53"); + ps.addBatch(); + int[] actual = ps.executeBatch(); + + BatchExecuteTest.assertBatchResult("2 batched rows, 2-values each", new int[]{2, 2}, actual); + + Statement st = con.createStatement(); + ResultSet rs = + st.executeQuery("select count(*) from test_statement where i between 50 and 53"); + rs.next(); + Assert.assertEquals("test_statement should have 4 rows with 'i' of 50..53", 4, rs.getInt(1)); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testSingleValuedUpsertUpdateBatch() throws SQLException { + PreparedStatement ps = null; + try { + ps = con.prepareStatement( + "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO update set t=?"); + ps.setInt(1, 50); + ps.setString(2, "50U"); + ps.setString(3, "50U"); + ps.addBatch(); + ps.setInt(1, 53); + ps.setString(2, "53U"); + ps.setString(3, "53U"); + ps.addBatch(); + int[] actual = ps.executeBatch(); + BatchExecuteTest.assertSimpleInsertBatch(2, actual); + } finally { + TestUtil.closeQuietly(ps); + } + } + + @Test + public void testSingleValuedUpsertUpdateConstantBatch() throws SQLException { + PreparedStatement ps = null; + try { + // For reWriteBatchedInserts=YES the following is expected + // FE=> Parse(stmt=null,query="insert into test_statement(i, t) values ($1,$2),($3,$4) ON CONFLICT (i) DO update set t='DEF'",oids={23,1043,23,1043}) + ps = con.prepareStatement( + "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO update set t='DEF'"); + ps.setInt(1, 50); + ps.setString(2, "50"); + ps.addBatch(); + ps.setInt(1, 53); + ps.setString(2, "53"); + ps.addBatch(); + int[] actual = ps.executeBatch(); + BatchExecuteTest.assertSimpleInsertBatch(2, actual); + } finally { + TestUtil.closeQuietly(ps); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceFailoverUrlsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceFailoverUrlsTest.java new file mode 100644 index 0000000..4e2bb37 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceFailoverUrlsTest.java @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.postgresql.ds.common.BaseDataSource; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; + +import javax.naming.NamingException; + +/** + * tests that failover urls survive the parse/rebuild roundtrip with and without specific ports + */ +class BaseDataSourceFailoverUrlsTest { + + private static final String DEFAULT_PORT = "5432"; + + @Test + void fullDefault() throws ClassNotFoundException, NamingException, IOException { + roundTripFromUrl("jdbc:postgresql://server/database", "jdbc:postgresql://server:" + DEFAULT_PORT + "/database"); + } + + @Test + void twoNoPorts() throws ClassNotFoundException, NamingException, IOException { + roundTripFromUrl("jdbc:postgresql://server1,server2/database", "jdbc:postgresql://server1:" + DEFAULT_PORT + ",server2:" + DEFAULT_PORT + "/database"); + } + + @Test + void twoWithPorts() throws ClassNotFoundException, NamingException, IOException { + roundTripFromUrl("jdbc:postgresql://server1:1234,server2:2345/database", "jdbc:postgresql://server1:1234,server2:2345/database"); + } + + @Test + void twoFirstPort() throws ClassNotFoundException, NamingException, IOException { + roundTripFromUrl("jdbc:postgresql://server1,server2:2345/database", "jdbc:postgresql://server1:" + DEFAULT_PORT + ",server2:2345/database"); + } + + @Test + void twoLastPort() throws ClassNotFoundException, NamingException, IOException { + roundTripFromUrl("jdbc:postgresql://server1:2345,server2/database", "jdbc:postgresql://server1:2345,server2:" + DEFAULT_PORT + "/database"); + } + + @Test + void nullPorts() { + BaseDataSource bds = newDS(); + bds.setDatabaseName("database"); + bds.setPortNumbers(null); + assertUrlWithoutParamsEquals("jdbc:postgresql://localhost/database", bds.getURL()); + assertEquals(0, bds.getPortNumber()); + assertEquals(0, bds.getPortNumbers()[0]); + } + + @Test + void emptyPorts() { + BaseDataSource bds = newDS(); + bds.setDatabaseName("database"); + bds.setPortNumbers(new int[0]); + assertUrlWithoutParamsEquals("jdbc:postgresql://localhost/database", bds.getURL()); + assertEquals(0, bds.getPortNumber()); + assertEquals(0, bds.getPortNumbers()[0]); + } + + @Test + void wrongNumberOfPorts() { + BaseDataSource bds = newDS(); + bds.setDatabaseName("database"); + bds.setServerNames(new String[]{"localhost", "localhost1"}); + bds.setPortNumbers(new int[]{6432}); + assertThrows(IllegalArgumentException.class, bds::getUrl, "Number of ports not equal to the number of servers should throw an exception"); + } + + private BaseDataSource newDS() { + return new BaseDataSource() { + @Override + public String getDescription() { + return "BaseDataSourceFailoverUrlsTest-DS"; + } + }; + } + + private void roundTripFromUrl(String in, String expected) throws NamingException, ClassNotFoundException, IOException { + BaseDataSource bds = newDS(); + + bds.setUrl(in); + assertUrlWithoutParamsEquals(expected, bds.getURL()); + + bds.setFromReference(bds.getReference()); + assertUrlWithoutParamsEquals(expected, bds.getURL()); + + bds.initializeFrom(bds); + assertUrlWithoutParamsEquals(expected, bds.getURL()); + } + + private static String jdbcUrlStripParams(String in) { + return in.replaceAll("\\?.*$", ""); + } + + private static void assertUrlWithoutParamsEquals(String expected, String url) { + assertEquals(expected, jdbcUrlStripParams(url)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceTest.java new file mode 100644 index 0000000..17e3719 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceTest.java @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +import org.postgresql.PGConnection; +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.test.TestUtil; +import org.postgresql.test.util.MiniJndiContextFactory; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Hashtable; + +import javax.naming.Context; +import javax.naming.InitialContext; +import javax.naming.NamingException; + +/** + * Common tests for all the BaseDataSource implementations. This is a small variety to make sure + * that a connection can be opened and some basic queries run. The different BaseDataSource + * subclasses have different subclasses of this which add additional custom tests. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public abstract class BaseDataSourceTest { + public static final String DATA_SOURCE_JNDI = "BaseDataSource"; + + protected Connection con; + protected BaseDataSource bds; + + /** + * Creates a test table using a standard connection (not from a DataSource). + */ + @Before + public void setUp() throws Exception { + con = TestUtil.openDB(); + TestUtil.createTable(con, "poolingtest", "id int4 not null primary key, name varchar(50)"); + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO poolingtest VALUES (1, 'Test Row 1')"); + stmt.executeUpdate("INSERT INTO poolingtest VALUES (2, 'Test Row 2')"); + TestUtil.closeDB(con); + } + + /** + * Removes the test table using a standard connection (not from a DataSource). + */ + @After + public void tearDown() throws Exception { + TestUtil.closeDB(con); + con = TestUtil.openDB(); + TestUtil.dropTable(con, "poolingtest"); + TestUtil.closeDB(con); + } + + /** + * Gets a connection from the current BaseDataSource. + */ + protected Connection getDataSourceConnection() throws SQLException { + if (bds == null) { + initializeDataSource(); + } + return bds.getConnection(); + } + + /** + * Creates an instance of the current BaseDataSource for testing. Must be customized by each + * subclass. + */ + protected abstract void initializeDataSource(); + + public static void setupDataSource(BaseDataSource bds) { + bds.setServerName(TestUtil.getServer()); + bds.setPortNumber(TestUtil.getPort()); + bds.setDatabaseName(TestUtil.getDatabase()); + bds.setUser(TestUtil.getUser()); + bds.setPassword(TestUtil.getPassword()); + bds.setPrepareThreshold(TestUtil.getPrepareThreshold()); + bds.setProtocolVersion(TestUtil.getProtocolVersion()); + } + + /** + * Test to make sure you can instantiate and configure the appropriate DataSource. + */ + @Test + public void testCreateDataSource() { + initializeDataSource(); + } + + /** + * Test to make sure you can get a connection from the DataSource, which in turn means the + * DataSource was able to open it. + */ + @Test + public void testGetConnection() { + try { + con = getDataSourceConnection(); + con.close(); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * A simple test to make sure you can execute SQL using the Connection from the DataSource. + */ + @Test + public void testUseConnection() { + try { + con = getDataSourceConnection(); + Statement st = con.createStatement(); + ResultSet rs = st.executeQuery("SELECT COUNT(*) FROM poolingtest"); + if (rs.next()) { + int count = rs.getInt(1); + if (rs.next()) { + fail("Should only have one row in SELECT COUNT result set"); + } + if (count != 2) { + fail("Count returned " + count + " expecting 2"); + } + } else { + fail("Should have one row in SELECT COUNT result set"); + } + rs.close(); + st.close(); + con.close(); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * A test to make sure you can execute DDL SQL using the Connection from the DataSource. + */ + @Test + public void testDdlOverConnection() { + try { + con = getDataSourceConnection(); + TestUtil.createTable(con, "poolingtest", "id int4 not null primary key, name varchar(50)"); + con.close(); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * A test to make sure the connections are not being pooled by the current DataSource. Obviously + * need to be overridden in the case of a pooling Datasource. + */ + @Test + public void testNotPooledConnection() throws SQLException { + Connection con1 = getDataSourceConnection(); + con1.close(); + Connection con2 = getDataSourceConnection(); + con2.close(); + assertNotSame(con1, con2); + } + + /** + * Test to make sure that PGConnection methods can be called on the pooled Connection. + */ + @Test + public void testPGConnection() { + try { + con = getDataSourceConnection(); + ((PGConnection) con).getNotifications(); + con.close(); + } catch (Exception e) { + fail("Unable to call PGConnection method on pooled connection due to " + + e.getClass().getName() + " (" + e.getMessage() + ")"); + } + } + + /** + * Eventually, we must test stuffing the DataSource in JNDI and then getting it back out and make + * sure it's still usable. This should ideally test both Serializable and Referenceable + * mechanisms. Will probably be multiple tests when implemented. + */ + @Test + public void testJndi() { + initializeDataSource(); + BaseDataSource oldbds = bds; + String oldurl = bds.getURL(); + InitialContext ic = getInitialContext(); + try { + ic.rebind(DATA_SOURCE_JNDI, bds); + bds = (BaseDataSource) ic.lookup(DATA_SOURCE_JNDI); + assertNotNull("Got null looking up DataSource from JNDI!", bds); + compareJndiDataSource(oldbds, bds); + } catch (NamingException e) { + fail(e.getMessage()); + } + oldbds = bds; + String url = bds.getURL(); + testUseConnection(); + assertSame("Test should not have changed DataSource (" + bds + " != " + oldbds + ")!", + oldbds, bds); + assertEquals("Test should not have changed DataSource URL", + oldurl, url); + } + + /** + * Uses the mini-JNDI implementation for testing purposes. + */ + protected InitialContext getInitialContext() { + Hashtable env = new Hashtable<>(); + env.put(Context.INITIAL_CONTEXT_FACTORY, MiniJndiContextFactory.class.getName()); + try { + return new InitialContext(env); + } catch (NamingException e) { + fail("Unable to create InitialContext: " + e.getMessage()); + return null; + } + } + + /** + * Check whether a DS was dereferenced from JNDI or recreated. + */ + protected void compareJndiDataSource(BaseDataSource oldbds, BaseDataSource bds) { + assertNotSame("DataSource was dereferenced, should have been serialized or recreated", oldbds, bds); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/CaseOptimiserDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/CaseOptimiserDataSourceTest.java new file mode 100644 index 0000000..8c750aa --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/CaseOptimiserDataSourceTest.java @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.core.BaseConnection; +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.jdbc2.optional.SimpleDataSource; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; + +/** + * DataSource test to ensure the BaseConnection is configured with column sanitiser disabled. + */ +public class CaseOptimiserDataSourceTest { + private BaseDataSource bds; + protected Connection conn; + + @BeforeEach + void setUp() throws SQLException { + Connection conn = getDataSourceConnection(); + assertTrue(conn instanceof BaseConnection); + BaseConnection bc = (BaseConnection) conn; + assertTrue(bc.isColumnSanitiserDisabled(), + "Expected state [TRUE] of base connection configuration failed test."); + Statement insert = conn.createStatement(); + TestUtil.createTable(conn, "allmixedup", + "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)"); + insert.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'")); + insert.close(); + conn.close(); + } + + @AfterEach + void tearDown() throws SQLException { + Connection conn = getDataSourceConnection(); + Statement drop = conn.createStatement(); + drop.execute("drop table allmixedup"); + drop.close(); + conn.close(); + bds.setDisableColumnSanitiser(false); + } + + /* + * Test to ensure a datasource can be configured with the column sanitiser optimisation. This test + * checks for a side effect of the sanitiser being disabled. The column is not expected to be + * found. + */ + @Test + void dataSourceDisabledSanitiserPropertySucceeds() throws SQLException { + String label = "FOO"; + Connection conn = getDataSourceConnection(); + PreparedStatement query = + conn.prepareStatement("select * from allmixedup"); + if (0 < TestUtil.findColumn(query, label)) { + fail(String.format("Did not expect to find the column with the label [%1$s].", label)); + } + query.close(); + conn.close(); + } + + /** + * Gets a connection from the current BaseDataSource. + */ + protected Connection getDataSourceConnection() throws SQLException { + if (bds == null) { + initializeDataSource(); + } + return bds.getConnection(); + } + + protected void initializeDataSource() { + if (bds == null) { + bds = new SimpleDataSource(); + setupDataSource(bds); + bds.setDisableColumnSanitiser(true); + } + } + + public static void setupDataSource(BaseDataSource bds) { + bds.setServerName(TestUtil.getServer()); + bds.setPortNumber(TestUtil.getPort()); + bds.setDatabaseName(TestUtil.getDatabase()); + bds.setUser(TestUtil.getUser()); + bds.setPassword(TestUtil.getPassword()); + bds.setPrepareThreshold(TestUtil.getPrepareThreshold()); + bds.setProtocolVersion(TestUtil.getProtocolVersion()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/ConnectionPoolTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/ConnectionPoolTest.java new file mode 100644 index 0000000..271fc25 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/ConnectionPoolTest.java @@ -0,0 +1,521 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.ds.PGConnectionPoolDataSource; +import org.postgresql.jdbc2.optional.ConnectionPool; +import org.postgresql.test.TestUtil; + +import org.junit.Assume; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; + +import javax.sql.ConnectionEvent; +import javax.sql.ConnectionEventListener; +import javax.sql.PooledConnection; + +/** + * Tests for the ConnectionPoolDataSource and PooledConnection implementations. They are tested + * together because the only client interface to the PooledConnection is through the CPDS. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public class ConnectionPoolTest extends BaseDataSourceTest { + private final ArrayList connections = new ArrayList<>(); + + /** + * Creates and configures a ConnectionPool. + */ + @Override + protected void initializeDataSource() { + if (bds == null) { + bds = new ConnectionPool(); + setupDataSource(bds); + } + } + + @Override + public void tearDown() throws Exception { + for (PooledConnection c : connections) { + try { + c.close(); + } catch (Exception ex) { + // close throws nullptr or other evil things if the connection + // is already closed + } + } + } + + /** + * Instead of just fetching a Connection from the ConnectionPool, get a PooledConnection, add a + * listener to close it when the Connection is closed, and then get the Connection. Without the + * listener the PooledConnection (and thus the physical connection) would never by closed. + * Probably not a disaster during testing, but you never know. + */ + @Override + protected Connection getDataSourceConnection() throws SQLException { + initializeDataSource(); + final PooledConnection pc = getPooledConnection(); + // Since the pooled connection won't be reused in these basic tests, close it when the + // connection is closed + pc.addConnectionEventListener(new ConnectionEventListener() { + public void connectionClosed(ConnectionEvent event) { + try { + pc.close(); + } catch (SQLException e) { + fail("Unable to close PooledConnection: " + e); + } + } + + public void connectionErrorOccurred(ConnectionEvent event) { + } + }); + return pc.getConnection(); + } + + /** + * Though the normal client interface is to grab a Connection, in order to test the + * middleware/server interface, we need to deal with PooledConnections. Some tests use each. + */ + protected PooledConnection getPooledConnection() throws SQLException { + initializeDataSource(); + // we need to recast to PGConnectionPool rather than + // jdbc.optional.ConnectionPool because our ObjectFactory + // returns only the top level class, not the specific + // jdbc2/jdbc3 implementations. + PooledConnection c = ((PGConnectionPoolDataSource) bds).getPooledConnection(); + connections.add(c); + return c; + } + + /** + * Makes sure that if you get a connection from a PooledConnection, close it, and then get another + * one, you're really using the same physical connection. Depends on the implementation of + * toString for the connection handle. + */ + @Test + public void testPoolReuse() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + String name = con.toString(); + con.close(); + con = pc.getConnection(); + String name2 = con.toString(); + con.close(); + pc.close(); + assertTrue("Physical connection doesn't appear to be reused across PooledConnection wrappers", + name.equals(name2)); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Makes sure that when you request a connection from the PooledConnection, and previous + * connection it might have given out is closed. See JDBC 2.0 Optional Package spec section 6.2.3 + */ + @Test + public void testPoolCloseOldWrapper() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + Connection con2 = pc.getConnection(); + try { + con.createStatement(); + fail( + "Original connection wrapper should be closed when new connection wrapper is generated"); + } catch (SQLException e) { + } + con2.close(); + pc.close(); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Makes sure that if you get two connection wrappers from the same PooledConnection, they are + * different, even though the represent the same physical connection. See JDBC 2.0 Optional + * Package spec section 6.2.2 + */ + @Test + public void testPoolNewWrapper() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + Connection con2 = pc.getConnection(); + con2.close(); + pc.close(); + assertTrue( + "Two calls to PooledConnection.getConnection should not return the same connection wrapper", + con != con2); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Makes sure that exactly one close event is fired for each time a connection handle is closed. + * Also checks that events are not fired after a given handle has been closed once. + */ + @Test + public void testCloseEvent() { + try { + PooledConnection pc = getPooledConnection(); + CountClose cc = new CountClose(); + pc.addConnectionEventListener(cc); + con = pc.getConnection(); + assertEquals(0, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con.close(); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con = pc.getConnection(); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con.close(); + assertEquals(2, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + // a double close shouldn't fire additional events + con.close(); + assertEquals(2, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + pc.close(); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Makes sure that close events are not fired after a listener has been removed. + */ + @Test + public void testNoCloseEvent() { + try { + PooledConnection pc = getPooledConnection(); + CountClose cc = new CountClose(); + pc.addConnectionEventListener(cc); + con = pc.getConnection(); + assertEquals(0, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con.close(); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + pc.removeConnectionEventListener(cc); + con = pc.getConnection(); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con.close(); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Makes sure that a listener can be removed while dispatching events. Sometimes this causes a + * ConcurrentModificationException or something. + */ + @Test + public void testInlineCloseEvent() { + try { + PooledConnection pc = getPooledConnection(); + RemoveClose rc1 = new RemoveClose(); + RemoveClose rc2 = new RemoveClose(); + RemoveClose rc3 = new RemoveClose(); + pc.addConnectionEventListener(rc1); + pc.addConnectionEventListener(rc2); + pc.addConnectionEventListener(rc3); + con = pc.getConnection(); + con.close(); + con = pc.getConnection(); + con.close(); + } catch (Exception e) { + fail(e.getMessage()); + } + } + + /** + * Tests that a close event is not generated when a connection handle is closed automatically due + * to a new connection handle being opened for the same PooledConnection. See JDBC 2.0 Optional + * Package spec section 6.3 + */ + @Test + public void testAutomaticCloseEvent() { + try { + PooledConnection pc = getPooledConnection(); + CountClose cc = new CountClose(); + pc.addConnectionEventListener(cc); + con = pc.getConnection(); + assertEquals(0, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con.close(); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con = pc.getConnection(); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + // Open a 2nd connection, causing the first to be closed. No even should be generated. + Connection con2 = pc.getConnection(); + assertTrue("Connection handle was not closed when new handle was opened", con.isClosed()); + assertEquals(1, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + con2.close(); + assertEquals(2, cc.getCount()); + assertEquals(0, cc.getErrorCount()); + pc.close(); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Makes sure the isClosed method on a connection wrapper does what you'd expect. Checks the usual + * case, as well as automatic closure when a new handle is opened on the same physical connection. + */ + @Test + public void testIsClosed() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + assertTrue(!con.isClosed()); + con.close(); + assertTrue(con.isClosed()); + con = pc.getConnection(); + Connection con2 = pc.getConnection(); + assertTrue(con.isClosed()); + assertTrue(!con2.isClosed()); + con2.close(); + assertTrue(con.isClosed()); + pc.close(); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Make sure that close status of pooled connection reflect the one of the underlying physical + * connection. + */ + @Test + public void testBackendIsClosed() throws Exception { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + assertTrue(!con.isClosed()); + + Assume.assumeTrue("pg_terminate_backend requires PostgreSQL 8.4+", + TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)); + + TestUtil.terminateBackend(con); + try { + TestUtil.executeQuery(con, "SELECT 1"); + fail("The connection should not be opened anymore. An exception was expected"); + } catch (SQLException e) { + // this is expected as the connection has been forcibly closed from backend + } + assertTrue(con.isClosed()); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Ensures that a statement generated by a proxied connection returns the proxied connection from + * getConnection() [not the physical connection]. + */ + @Test + public void testStatementConnection() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + Statement s = con.createStatement(); + Connection conRetrieved = s.getConnection(); + + assertEquals(con.getClass(), conRetrieved.getClass()); + assertEquals(con, conRetrieved); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Ensures that the Statement proxy generated by the Connection handle throws the correct kind of + * exception. + */ + @Test + public void testStatementProxy() { + Statement s = null; + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + s = con.createStatement(); + } catch (SQLException e) { + fail(e.getMessage()); + } + try { + s.executeQuery("SELECT * FROM THIS_TABLE_SHOULD_NOT_EXIST"); + fail("An SQL exception was not thrown that should have been"); + } catch (SQLException e) { + // This is the expected and correct path + } catch (Exception e) { + fail("bad exception; was expecting SQLException, not" + e.getClass().getName()); + } + } + + /** + * Ensures that a prepared statement generated by a proxied connection returns the proxied + * connection from getConnection() [not the physical connection]. + */ + @Test + public void testPreparedStatementConnection() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + PreparedStatement s = con.prepareStatement("select 'x'"); + Connection conRetrieved = s.getConnection(); + + assertEquals(con.getClass(), conRetrieved.getClass()); + assertEquals(con, conRetrieved); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Ensures that a callable statement generated by a proxied connection returns the proxied + * connection from getConnection() [not the physical connection]. + */ + @Test + public void testCallableStatementConnection() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + CallableStatement s = con.prepareCall("select 'x'"); + Connection conRetrieved = s.getConnection(); + + assertEquals(con.getClass(), conRetrieved.getClass()); + assertEquals(con, conRetrieved); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Ensure that a statement created from a pool can be used like any other statement in regard to + * pg extensions. + */ + @Test + public void testStatementsProxyPGStatement() { + try { + PooledConnection pc = getPooledConnection(); + con = pc.getConnection(); + + Statement s = con.createStatement(); + boolean b = ((org.postgresql.PGStatement) s).isUseServerPrepare(); + + PreparedStatement ps = con.prepareStatement("select 'x'"); + b = ((org.postgresql.PGStatement) ps).isUseServerPrepare(); + + CallableStatement cs = con.prepareCall("select 'x'"); + b = ((org.postgresql.PGStatement) cs).isUseServerPrepare(); + + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * Helper class to remove a listener during event dispatching. + */ + private class RemoveClose implements ConnectionEventListener { + @Override + public void connectionClosed(ConnectionEvent event) { + ((PooledConnection) event.getSource()).removeConnectionEventListener(this); + } + + @Override + public void connectionErrorOccurred(ConnectionEvent event) { + ((PooledConnection) event.getSource()).removeConnectionEventListener(this); + } + } + + /** + * Helper class that implements the event listener interface, and counts the number of events it + * sees. + */ + private class CountClose implements ConnectionEventListener { + private int count; + private int errorCount; + + @Override + public void connectionClosed(ConnectionEvent event) { + count++; + } + + @Override + public void connectionErrorOccurred(ConnectionEvent event) { + errorCount++; + } + + public int getCount() { + return count; + } + + public int getErrorCount() { + return errorCount; + } + + public void clear() { + count = errorCount = 0; + } + } + + @Test + public void testSerializable() throws IOException, ClassNotFoundException { + ConnectionPool pool = new ConnectionPool(); + pool.setDefaultAutoCommit(false); + pool.setServerName("db.myhost.com"); + pool.setDatabaseName("mydb"); + pool.setUser("user"); + pool.setPassword("pass"); + pool.setPortNumber(1111); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(pool); + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + ObjectInputStream ois = new ObjectInputStream(bais); + ConnectionPool pool2 = (ConnectionPool) ois.readObject(); + + assertEquals(pool.isDefaultAutoCommit(), pool2.isDefaultAutoCommit()); + assertEquals(pool.getServerName(), pool2.getServerName()); + assertEquals(pool.getDatabaseName(), pool2.getDatabaseName()); + assertEquals(pool.getUser(), pool2.getUser()); + assertEquals(pool.getPassword(), pool2.getPassword()); + assertEquals(pool.getPortNumber(), pool2.getPortNumber()); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/OptionalTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/OptionalTestSuite.java new file mode 100644 index 0000000..703b310 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/OptionalTestSuite.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/** + * Test suite for the JDBC 2.0 Optional Package implementation. This includes the DataSource, + * ConnectionPoolDataSource, and PooledConnection implementations. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + BaseDataSourceFailoverUrlsTest.class, + CaseOptimiserDataSourceTest.class, + ConnectionPoolTest.class, + PoolingDataSourceTest.class, + SimpleDataSourceTest.class, + SimpleDataSourceWithSetURLTest.class, + SimpleDataSourceWithUrlTest.class, +}) +public class OptionalTestSuite { + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/PoolingDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/PoolingDataSourceTest.java new file mode 100644 index 0000000..73a9824 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/PoolingDataSourceTest.java @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.fail; + +import org.postgresql.ds.common.BaseDataSource; +import org.postgresql.jdbc2.optional.PoolingDataSource; + +import org.junit.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +/** + * Minimal tests for pooling DataSource. Needs many more. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public class PoolingDataSourceTest extends BaseDataSourceTest { + private static final String DS_NAME = "JDBC 2 SE Test DataSource"; + + @Override + public void tearDown() throws Exception { + if (bds instanceof PoolingDataSource) { + ((PoolingDataSource) bds).close(); + } + super.tearDown(); + } + + /** + * Creates and configures a new SimpleDataSource. + */ + @Override + protected void initializeDataSource() { + if (bds == null) { + bds = new PoolingDataSource(); + setupDataSource(bds); + ((PoolingDataSource) bds).setDataSourceName(DS_NAME); + ((PoolingDataSource) bds).setInitialConnections(2); + ((PoolingDataSource) bds).setMaxConnections(10); + } + } + + /** + * In this case, we *do* want it to be pooled. + */ + @Override + public void testNotPooledConnection() throws SQLException { + con = getDataSourceConnection(); + String name = con.toString(); + con.close(); + con = getDataSourceConnection(); + String name2 = con.toString(); + con.close(); + assertEquals("Pooled DS doesn't appear to be pooling connections!", name, name2); + } + + /** + * In this case, the desired behavior is dereferencing. + */ + @Override + protected void compareJndiDataSource(BaseDataSource oldbds, BaseDataSource bds) { + assertSame("DataSource was serialized or recreated, should have been dereferenced", + bds, oldbds); + } + + /** + * Check that 2 DS instances can't use the same name. + */ + @Test + public void testCantReuseName() { + initializeDataSource(); + PoolingDataSource pds = new PoolingDataSource(); + try { + pds.setDataSourceName(DS_NAME); + fail("Should have denied 2nd DataSource with same name"); + } catch (IllegalArgumentException e) { + } + } + + /** + * Closing a Connection twice is not an error. + */ + @Test + public void testDoubleConnectionClose() throws SQLException { + con = getDataSourceConnection(); + con.close(); + con.close(); + } + + /** + * Closing a Statement twice is not an error. + */ + @Test + public void testDoubleStatementClose() throws SQLException { + con = getDataSourceConnection(); + Statement stmt = con.createStatement(); + stmt.close(); + stmt.close(); + con.close(); + } + + @Test + public void testConnectionObjectMethods() throws SQLException { + con = getDataSourceConnection(); + + Connection conRef = con; + assertEquals(con, conRef); + + int hc1 = con.hashCode(); + con.close(); + int hc2 = con.hashCode(); + + assertEquals(con, conRef); + assertEquals(hc1, hc2); + } + + @Test + public void testStatementObjectMethods() throws SQLException { + con = getDataSourceConnection(); + + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT 1"); + Statement stmtRef = stmt; + + assertEquals(stmt, stmtRef); + // Currently we aren't proxying ResultSet, so this doesn't + // work, see Bug #1010542. + // assertEquals(stmt, rs.getStatement()); + + int hc1 = stmt.hashCode(); + stmt.close(); + int hc2 = stmt.hashCode(); + + assertEquals(stmt, stmtRef); + assertEquals(hc1, hc2); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceTest.java new file mode 100644 index 0000000..644d273 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceTest.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import org.postgresql.ds.PGSimpleDataSource; +import org.postgresql.jdbc2.optional.SimpleDataSource; + +import org.junit.Test; + +/** + * Performs the basic tests defined in the superclass. Just adds the configuration logic. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public class SimpleDataSourceTest extends BaseDataSourceTest { + + /** + * Creates and configures a new SimpleDataSource. + */ + @Override + protected void initializeDataSource() { + if (bds == null) { + bds = new SimpleDataSource(); + setupDataSource(bds); + } + } + + @Test(expected = IllegalArgumentException.class) + public void testTypoPostgresUrl() { + PGSimpleDataSource ds = new PGSimpleDataSource(); + // this should fail because the protocol is wrong. + ds.setUrl("jdbc:postgres://localhost:5432/test"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithSetURLTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithSetURLTest.java new file mode 100644 index 0000000..17ed36b --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithSetURLTest.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import static org.junit.Assert.assertEquals; +import static org.postgresql.Driver.parseURL; + +import org.postgresql.PGProperty; +import org.postgresql.jdbc2.optional.SimpleDataSource; +import org.postgresql.test.TestUtil; + +import org.junit.Test; + +import java.util.Properties; + +/** + * Performs the basic tests defined in the superclass. Just adds the configuration logic. + */ +public class SimpleDataSourceWithSetURLTest extends BaseDataSourceTest { + /** + * Creates and configures a new SimpleDataSource using setURL method. + */ + @Override + protected void initializeDataSource() { + if (bds == null) { + bds = new SimpleDataSource(); + bds.setURL(String.format("jdbc:postgresql://%s:%d/%s?prepareThreshold=%d", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getPrepareThreshold())); + bds.setUser(TestUtil.getUser()); + bds.setPassword(TestUtil.getPassword()); + bds.setProtocolVersion(TestUtil.getProtocolVersion()); + } + } + + @Test + public void testGetURL() throws Exception { + con = getDataSourceConnection(); + + String url = bds.getURL(); + Properties properties = parseURL(url, null); + + assertEquals(TestUtil.getServer(), properties.getProperty(PGProperty.PG_HOST.getName())); + assertEquals(Integer.toString(TestUtil.getPort()), properties.getProperty(PGProperty.PG_PORT.getName())); + assertEquals(TestUtil.getDatabase(), properties.getProperty(PGProperty.PG_DBNAME.getName())); + assertEquals(Integer.toString(TestUtil.getPrepareThreshold()), properties.getProperty(PGProperty.PREPARE_THRESHOLD.getName())); + } + + @Test + public void testSetURL() throws Exception { + initializeDataSource(); + + assertEquals(TestUtil.getServer(), bds.getServerName()); + assertEquals(TestUtil.getPort(), bds.getPortNumber()); + assertEquals(TestUtil.getDatabase(), bds.getDatabaseName()); + assertEquals(TestUtil.getPrepareThreshold(), bds.getPrepareThreshold()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithUrlTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithUrlTest.java new file mode 100644 index 0000000..ad7106b --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithUrlTest.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc2.optional; + +import org.postgresql.jdbc2.optional.SimpleDataSource; +import org.postgresql.test.TestUtil; + +/** + * Performs the basic tests defined in the superclass. Just adds the configuration logic. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public class SimpleDataSourceWithUrlTest extends BaseDataSourceTest { + /** + * Creates and configures a new SimpleDataSource. + */ + @Override + protected void initializeDataSource() { + if (bds == null) { + bds = new SimpleDataSource(); + bds.setUrl("jdbc:postgresql://" + TestUtil.getServer() + ":" + TestUtil.getPort() + "/" + + TestUtil.getDatabase() + "?prepareThreshold=" + TestUtil.getPrepareThreshold()); + bds.setUser(TestUtil.getUser()); + bds.setPassword(TestUtil.getPassword()); + bds.setProtocolVersion(TestUtil.getProtocolVersion()); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeQueryParseTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeQueryParseTest.java new file mode 100644 index 0000000..58c7f8c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeQueryParseTest.java @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.NativeQuery; +import org.postgresql.core.Parser; +import org.postgresql.core.SqlCommandType; + +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; +import java.util.List; + +class CompositeQueryParseTest { + + @Test + void emptyQuery() { + assertEquals("", reparse("", true, false, true)); + } + + @Test + void whitespaceQuery() { + assertEquals("", reparse(" ", true, false, true)); + } + + @Test + void onlyEmptyQueries() { + assertEquals("", reparse(";;;; ; \n;\n", true, false, true)); + } + + @Test + void simpleQuery() { + assertEquals("select 1", reparse("select 1", true, false, true)); + } + + @Test + void simpleBind() { + assertEquals("select $1", reparse("select ?", true, true, true)); + } + + @Test + void unquotedQuestionmark() { + assertEquals("select '{\"key\": \"val\"}'::jsonb ? 'key'", + reparse("select '{\"key\": \"val\"}'::jsonb ? 'key'", true, false, true)); + } + + @Test + void repeatedQuestionmark() { + assertEquals("select '{\"key\": \"val\"}'::jsonb ? 'key'", + reparse("select '{\"key\": \"val\"}'::jsonb ?? 'key'", true, false, true)); + } + + @Test + void quotedQuestionmark() { + assertEquals("select '?'", reparse("select '?'", true, false, true)); + } + + @Test + void doubleQuestionmark() { + assertEquals("select '?', $1 ?=> $2", reparse("select '?', ? ??=> ?", true, true, true)); + } + + @Test + void compositeBasic() { + assertEquals("select 1;/*cut*/\n select 2", reparse("select 1; select 2", true, false, true)); + } + + @Test + void compositeWithBinds() { + assertEquals("select $1;/*cut*/\n select $1", reparse("select ?; select ?", true, true, true)); + } + + @Test + void trailingSemicolon() { + assertEquals("select 1", reparse("select 1;", true, false, true)); + } + + @Test + void trailingSemicolonAndSpace() { + assertEquals("select 1", reparse("select 1; ", true, false, true)); + } + + @Test + void multipleTrailingSemicolons() { + assertEquals("select 1", reparse("select 1;;;", true, false, true)); + } + + @Test + void hasReturning() throws SQLException { + List queries = Parser.parseJdbcSql("insert into foo (a,b,c) values (?,?,?) RetuRning a", true, true, false, + true, true); + NativeQuery query = queries.get(0); + assertTrue(query.command.isReturningKeywordPresent(), "The parser should find the word returning"); + + queries = Parser.parseJdbcSql("insert into foo (a,b,c) values (?,?,?)", true, true, false, true, true); + query = queries.get(0); + assertFalse(query.command.isReturningKeywordPresent(), "The parser should not find the word returning"); + + queries = Parser.parseJdbcSql("insert into foo (a,b,c) values ('returning',?,?)", true, true, false, + true, true); + query = queries.get(0); + assertFalse(query.command.isReturningKeywordPresent(), "The parser should not find the word returning as it is in quotes "); + } + + @Test + void select() throws SQLException { + List queries; + queries = Parser.parseJdbcSql("select 1 as returning from (update table)", true, true, false, true, true); + NativeQuery query = queries.get(0); + assertEquals(SqlCommandType.SELECT, query.command.getType(), "This is a select "); + assertTrue(query.command.isReturningKeywordPresent(), "Returning is OK here as it is not an insert command "); + } + + @Test + void delete() throws SQLException { + List queries = Parser.parseJdbcSql("DeLeTe from foo where a=1", true, true, false, + true, true); + NativeQuery query = queries.get(0); + assertEquals(SqlCommandType.DELETE, query.command.getType(), "This is a delete command"); + } + + @Test + void multiQueryWithBind() throws SQLException { + // braces around (42) are required to puzzle the parser + String sql = "INSERT INTO inttable(a) VALUES (?);SELECT (42)"; + List queries = Parser.parseJdbcSql(sql, true, true, true, true, true); + NativeQuery query = queries.get(0); + assertEquals("INSERT: INSERT INTO inttable(a) VALUES ($1)", + query.command.getType() + ": " + query.nativeSql, + "query(0) of " + sql); + query = queries.get(1); + assertEquals("SELECT: SELECT (42)", + query.command.getType() + ": " + query.nativeSql, + "query(1) of " + sql); + } + + @Test + void move() throws SQLException { + List queries = Parser.parseJdbcSql("MoVe NEXT FROM FOO", true, true, false, true, true); + NativeQuery query = queries.get(0); + assertEquals(SqlCommandType.MOVE, query.command.getType(), "This is a move command"); + } + + @Test + void update() throws SQLException { + List queries; + NativeQuery query; + queries = Parser.parseJdbcSql("update foo set (a=?,b=?,c=?)", true, true, false, true, true); + query = queries.get(0); + assertEquals(SqlCommandType.UPDATE, query.command.getType(), "This is an UPDATE command"); + } + + @Test + void insert() throws SQLException { + List queries = Parser.parseJdbcSql("InSeRt into foo (a,b,c) values (?,?,?) returning a", true, true, false, + true, true); + NativeQuery query = queries.get(0); + assertEquals(SqlCommandType.INSERT, query.command.getType(), "This is an INSERT command"); + + queries = Parser.parseJdbcSql("select 1 as insert", true, true, false, true, true); + query = queries.get(0); + assertEquals(SqlCommandType.SELECT, query.command.getType(), "This is a SELECT command"); + } + + @Test + void withSelect() throws SQLException { + List queries; + queries = Parser.parseJdbcSql("with update as (update foo set (a=?,b=?,c=?)) select * from update", true, true, false, true, true); + NativeQuery query = queries.get(0); + assertEquals(SqlCommandType.SELECT, query.command.getType(), "with ... () select"); + } + + @Test + void withInsert() throws SQLException { + List queries; + queries = Parser.parseJdbcSql("with update as (update foo set (a=?,b=?,c=?)) insert into table(select) values(1)", true, true, false, true, true); + NativeQuery query = queries.get(0); + assertEquals(SqlCommandType.INSERT, query.command.getType(), "with ... () insert"); + } + + @Test + void multipleEmptyQueries() { + assertEquals("select 1;/*cut*/\n" + "select 2", + reparse("select 1; ;\t;select 2", true, false, true)); + } + + @Test + void compositeWithComments() { + assertEquals("select 1;/*cut*/\n" + "/* noop */;/*cut*/\n" + "select 2", + reparse("select 1;/* noop */;select 2", true, false, true)); + } + + private String reparse(String query, boolean standardConformingStrings, boolean withParameters, + boolean splitStatements) { + try { + return toString( + Parser.parseJdbcSql(query, standardConformingStrings, withParameters, splitStatements, false, true)); + } catch (SQLException e) { + throw new IllegalStateException("Parser.parseJdbcSql: " + e.getMessage(), e); + } + } + + private String toString(List queries) { + StringBuilder sb = new StringBuilder(); + for (NativeQuery query : queries) { + if (sb.length() != 0) { + sb.append(";/*cut*/\n"); + } + sb.append(query.nativeSql); + } + return sb.toString(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeTest.java new file mode 100644 index 0000000..34c31d6 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeTest.java @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGobject; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Array; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +class CompositeTest { + + private Connection conn; + + @BeforeAll + static void beforeClass() throws Exception { + Connection conn = TestUtil.openDB(); + try { + Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3), "uuid requires PostgreSQL 8.3+"); + } finally { + conn.close(); + } + } + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createSchema(conn, "\"Composites\""); + TestUtil.createCompositeType(conn, "simplecompositetest", "i int, d decimal, u uuid"); + TestUtil.createCompositeType(conn, "nestedcompositetest", "t text, s simplecompositetest"); + TestUtil.createCompositeType(conn, "\"Composites\".\"ComplexCompositeTest\"", + "l bigint[], n nestedcompositetest[], s simplecompositetest"); + TestUtil.createTable(conn, "compositetabletest", + "s simplecompositetest, cc \"Composites\".\"ComplexCompositeTest\"[]"); + TestUtil.createTable(conn, "\"Composites\".\"Table\"", + "s simplecompositetest, cc \"Composites\".\"ComplexCompositeTest\"[]"); + } + + @AfterEach + void tearDown() throws SQLException { + TestUtil.dropTable(conn, "\"Composites\".\"Table\""); + TestUtil.dropTable(conn, "compositetabletest"); + TestUtil.dropType(conn, "\"Composites\".\"ComplexCompositeTest\""); + TestUtil.dropType(conn, "nestedcompositetest"); + TestUtil.dropType(conn, "simplecompositetest"); + TestUtil.dropSchema(conn, "\"Composites\""); + TestUtil.closeDB(conn); + } + + @Test + void simpleSelect() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT '(1,2.2,)'::simplecompositetest"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + PGobject pgo = (PGobject) rs.getObject(1); + assertEquals("simplecompositetest", pgo.getType()); + assertEquals("(1,2.2,)", pgo.getValue()); + } + + @Test + void complexSelect() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement( + "SELECT '(\"{1,2}\",{},\"(1,2.2,)\")'::\"Composites\".\"ComplexCompositeTest\""); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + PGobject pgo = (PGobject) rs.getObject(1); + assertEquals("\"Composites\".\"ComplexCompositeTest\"", pgo.getType()); + assertEquals("(\"{1,2}\",{},\"(1,2.2,)\")", pgo.getValue()); + } + + @Test + void simpleArgumentSelect() throws SQLException { + Assumptions.assumeTrue(conn.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE, "Skip if running in simple query mode"); + PreparedStatement pstmt = conn.prepareStatement("SELECT ?"); + PGobject pgo = new PGobject(); + pgo.setType("simplecompositetest"); + pgo.setValue("(1,2.2,)"); + pstmt.setObject(1, pgo); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + PGobject pgo2 = (PGobject) rs.getObject(1); + assertEquals(pgo, pgo2); + } + + @Test + void complexArgumentSelect() throws SQLException { + Assumptions.assumeTrue(conn.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE, "Skip if running in simple query mode"); + PreparedStatement pstmt = conn.prepareStatement("SELECT ?"); + PGobject pgo = new PGobject(); + pgo.setType("\"Composites\".\"ComplexCompositeTest\""); + pgo.setValue("(\"{1,2}\",{},\"(1,2.2,)\")"); + pstmt.setObject(1, pgo); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + PGobject pgo2 = (PGobject) rs.getObject(1); + assertEquals(pgo, pgo2); + } + + @Test + void compositeFromTable() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO compositetabletest VALUES(?, ?)"); + PGobject pgo1 = new PGobject(); + pgo1.setType("public.simplecompositetest"); + pgo1.setValue("(1,2.2,)"); + pstmt.setObject(1, pgo1); + String[] ctArr = new String[1]; + ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")"; + Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr); + pstmt.setArray(2, pgarr1); + int res = pstmt.executeUpdate(); + assertEquals(1, res); + pstmt = conn.prepareStatement("SELECT * FROM compositetabletest"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + PGobject pgo2 = (PGobject) rs.getObject(1); + Array pgarr2 = (Array) rs.getObject(2); + assertEquals("simplecompositetest", pgo2.getType()); + assertEquals("\"Composites\".\"ComplexCompositeTest\"", pgarr2.getBaseTypeName()); + Object[] pgobjarr2 = (Object[]) pgarr2.getArray(); + assertEquals(1, pgobjarr2.length); + PGobject arr2Elem = (PGobject) pgobjarr2[0]; + assertEquals("\"Composites\".\"ComplexCompositeTest\"", arr2Elem.getType()); + assertEquals("(\"{1,2}\",{},\"(1,2.2,)\")", arr2Elem.getValue()); + rs.close(); + pstmt = conn.prepareStatement("SELECT c FROM compositetabletest c"); + rs = pstmt.executeQuery(); + assertTrue(rs.next()); + PGobject pgo3 = (PGobject) rs.getObject(1); + assertEquals("compositetabletest", pgo3.getType()); + assertEquals("(\"(1,2.2,)\",\"{\"\"(\\\\\"\"{1,2}\\\\\"\",{},\\\\\"\"(1,2.2,)\\\\\"\")\"\"}\")", + pgo3.getValue()); + } + + @Test + void nullArrayElement() throws SQLException { + PreparedStatement pstmt = + conn.prepareStatement("SELECT array[NULL, NULL]::compositetabletest[]"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + Array arr = rs.getArray(1); + assertEquals("compositetabletest", arr.getBaseTypeName()); + Object[] items = (Object[]) arr.getArray(); + assertEquals(2, items.length); + assertNull(items[0]); + assertNull(items[1]); + } + + @Test + void tableMetadata() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO compositetabletest VALUES(?, ?)"); + PGobject pgo1 = new PGobject(); + pgo1.setType("public.simplecompositetest"); + pgo1.setValue("(1,2.2,)"); + pstmt.setObject(1, pgo1); + String[] ctArr = new String[1]; + ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")"; + Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr); + pstmt.setArray(2, pgarr1); + int res = pstmt.executeUpdate(); + assertEquals(1, res); + pstmt = conn.prepareStatement("SELECT t FROM compositetabletest t"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + String name = rs.getMetaData().getColumnTypeName(1); + assertEquals("compositetabletest", name); + } + + @Test + void complexTableNameMetadata() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO \"Composites\".\"Table\" VALUES(?, ?)"); + PGobject pgo1 = new PGobject(); + pgo1.setType("public.simplecompositetest"); + pgo1.setValue("(1,2.2,)"); + pstmt.setObject(1, pgo1); + String[] ctArr = new String[1]; + ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")"; + Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr); + pstmt.setArray(2, pgarr1); + int res = pstmt.executeUpdate(); + assertEquals(1, res); + pstmt = conn.prepareStatement("SELECT t FROM \"Composites\".\"Table\" t"); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + String name = rs.getMetaData().getColumnTypeName(1); + assertEquals("\"Composites\".\"Table\"", name); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/DatabaseMetaDataTest.java new file mode 100644 index 0000000..05c5b3d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/DatabaseMetaDataTest.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.Statement; +import java.sql.Types; + +class DatabaseMetaDataTest { + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + Statement stmt = conn.createStatement(); + stmt.execute("CREATE DOMAIN mydom AS int"); + stmt.execute("CREATE TABLE domtab (a mydom)"); + } + + @AfterEach + void tearDown() throws Exception { + Statement stmt = conn.createStatement(); + stmt.execute("DROP TABLE domtab"); + stmt.execute("DROP DOMAIN mydom"); + TestUtil.closeDB(conn); + } + + @Test + void getColumnsForDomain() throws Exception { + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getColumns("%", "%", "domtab", "%"); + assertTrue(rs.next()); + assertEquals("a", rs.getString("COLUMN_NAME")); + assertEquals(Types.DISTINCT, rs.getInt("DATA_TYPE")); + assertEquals("mydom", rs.getString("TYPE_NAME")); + assertEquals(Types.INTEGER, rs.getInt("SOURCE_DATA_TYPE")); + assertFalse(rs.next()); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeBaseTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeBaseTest.java new file mode 100644 index 0000000..ae2db30 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeBaseTest.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import java.sql.SQLException; +import java.sql.Statement; + +public class EscapeSyntaxCallModeBaseTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + Statement stmt = con.createStatement(); + stmt.execute( + "CREATE OR REPLACE FUNCTION myiofunc(a INOUT int, b OUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION mysumfunc(a int, b int) returns int AS 'BEGIN return a + b; END;' LANGUAGE plpgsql"); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + stmt.execute( + "CREATE OR REPLACE PROCEDURE myioproc(a INOUT int, b INOUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql"); + } + } + + @Override + public void tearDown() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("drop function myiofunc(a INOUT int, b OUT int) "); + stmt.execute("drop function mysumfunc(a int, b int) "); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + stmt.execute("drop procedure myioproc(a INOUT int, b INOUT int) "); + } + stmt.close(); + super.tearDown(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallIfNoReturnTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallIfNoReturnTest.java new file mode 100644 index 0000000..94f012d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallIfNoReturnTest.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.EscapeSyntaxCallMode; +import org.postgresql.util.PSQLState; + +import org.junit.Test; + +import java.sql.CallableStatement; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Properties; + +public class EscapeSyntaxCallModeCallIfNoReturnTest extends EscapeSyntaxCallModeBaseTest { + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL_IF_NO_RETURN.value()); + } + + @Test + public void testInvokeFunction() throws Throwable { + // escapeSyntaxCallMode=callIfNoReturn will cause a CALL statement to be used for the JDBC escape call + // syntax used below (since no return parameter is specified). "myiofunc" is a function, so the + // attempted invocation should fail. + PSQLState expected = PSQLState.WRONG_OBJECT_TYPE; + assumeCallableStatementsSupported(); + assumeMinimumServerVersion(ServerVersion.v11); + + CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }"); + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.setInt(1, 10); + try { + cs.execute(); + fail("Should throw an exception"); + } catch (SQLException ex) { + assertEquals(expected.getState(), ex.getSQLState()); + } + } + + @Test + public void testInvokeFunctionHavingReturnParameter() throws Throwable { + // escapeSyntaxCallMode=callIfNoReturn will cause a SELECT statement to be used for the JDBC escape call + // syntax used below (since a return parameter is specified). "mysumfunc" is a function, so the + // invocation should succeed. + assumeCallableStatementsSupported(); + CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }"); + cs.registerOutParameter(1, Types.INTEGER); + cs.setInt(2, 10); + cs.setInt(3, 20); + cs.execute(); + int ret = cs.getInt(1); + assertTrue("Expected mysumproc(10,20) to return 30 but returned " + ret, ret == 30); + } + + @Test + public void testInvokeProcedure() throws Throwable { + // escapeSyntaxCallMode=callIfNoReturn will cause a CALL statement to be used for the JDBC escape call + // syntax used below (since there is no return parameter specified). "myioproc" is a procedure, so the + // invocation should succeed. + assumeCallableStatementsSupported(); + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("{call myioproc(?,?)}"); + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.setInt(1, 10); + cs.setInt(2, 20); + cs.execute(); + // Expected output: a==1 (param 1), b==10 (param 2) + int a = cs.getInt(1); + int b = cs.getInt(2); + assertTrue("Expected myioproc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10)); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallTest.java new file mode 100644 index 0000000..93bd5c1 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallTest.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.EscapeSyntaxCallMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.Test; + +import java.sql.CallableStatement; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Properties; + +public class EscapeSyntaxCallModeCallTest extends EscapeSyntaxCallModeBaseTest { + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL.value()); + } + + @Test + public void testInvokeFunction() throws Throwable { + // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call + // syntax used below. "myiofunc" is a function, so the attempted invocation should fail. + PSQLState expected = PSQLState.WRONG_OBJECT_TYPE; + assumeCallableStatementsSupported(); + assumeMinimumServerVersion(ServerVersion.v11); + + CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }"); + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.setInt(1, 10); + try { + cs.execute(); + fail("Should throw an exception"); + } catch (SQLException ex) { + assertEquals(expected.getState(), ex.getSQLState()); + } + } + + @Test + public void testInvokeFunctionHavingReturnParameter() throws Throwable { + // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call + // syntax used below. "mysumfunc" is a function, so the attempted invocation should fail. + + //version 14 changes this to undefined function + PSQLState expected = PSQLState.WRONG_OBJECT_TYPE; + + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) { + expected = PSQLState.UNDEFINED_FUNCTION; + } + + assumeCallableStatementsSupported(); + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }"); + cs.registerOutParameter(1, Types.INTEGER); + cs.setInt(2, 10); + cs.setInt(3, 20); + try { + cs.execute(); + fail("Should throw an exception"); + } catch (SQLException ex) { + assertEquals(expected.getState(), ex.getSQLState()); + } + } + + @Test + public void testInvokeProcedure() throws Throwable { + // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call + // syntax used below. "myioproc" is a procedure, so the invocation should succeed. + assumeCallableStatementsSupported(); + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("{call myioproc(?,?)}"); + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.setInt(1, 10); + cs.setInt(2, 20); + cs.execute(); + // Expected output: a==1 (param 1), b==10 (param 2) + int a = cs.getInt(1); + int b = cs.getInt(2); + assertTrue("Expected myioproc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10)); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeSelectTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeSelectTest.java new file mode 100644 index 0000000..73b4008 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeSelectTest.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.EscapeSyntaxCallMode; +import org.postgresql.util.PSQLState; + +import org.junit.Test; + +import java.sql.CallableStatement; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Properties; + +public class EscapeSyntaxCallModeSelectTest extends EscapeSyntaxCallModeBaseTest { + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.SELECT.value()); + } + + @Test + public void testInvokeFunction() throws Throwable { + // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call + // syntax used below. "myiofunc" is a function, so the invocation should succeed. + assumeCallableStatementsSupported(); + CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }"); + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.setInt(1, 10); + cs.execute(); + // Expected output: a==1 (param 1), b==10 (param 2) + int a = cs.getInt(1); + int b = cs.getInt(2); + assertTrue("Expected myiofunc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10)); + } + + @Test + public void testInvokeFunctionHavingReturnParameter() throws Throwable { + // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call + // syntax used below. "mysumfunc" is a function, so the invocation should succeed. + assumeCallableStatementsSupported(); + CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }"); + cs.registerOutParameter(1, Types.INTEGER); + cs.setInt(2, 10); + cs.setInt(3, 20); + cs.execute(); + int ret = cs.getInt(1); + assertTrue("Expected mysumfunc(10,20) to return 30 but returned " + ret, ret == 30); + } + + @Test + public void testInvokeProcedure() throws Throwable { + // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call + // syntax used below. "myioproc" is a procedure, so the attempted invocation should fail. + assumeCallableStatementsSupported(); + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("{call myioproc(?,?)}"); + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.setInt(1, 10); + cs.setInt(2, 20); + try { + cs.execute(); + fail("Should throw an exception"); + } catch (SQLException ex) { + assertEquals(PSQLState.WRONG_OBJECT_TYPE.getState(), ex.getSQLState()); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/GeneratedKeysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/GeneratedKeysTest.java new file mode 100644 index 0000000..a7498ef --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/GeneratedKeysTest.java @@ -0,0 +1,519 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGStatement; +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLState; + +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class GeneratedKeysTest extends BaseTest4 { + public enum ReturningInQuery { + A("a"), + AB("a", "b"), + STAR("*"), + NO(); + final String[] columns; + + ReturningInQuery(String... columns) { + this.columns = columns; + } + + public int columnsReturned() { + if (columns.length == 1 && columns[0].charAt(0) == '*') { + return 100500; // does not matter much, the meaning is "every possible column" + } + return columns.length; + } + + public String getClause() { + if (columnsReturned() == 0) { + return ""; + } + StringBuilder sb = new StringBuilder(" returning "); + for (int i = 0; i < columns.length; i++) { + String column = columns[i]; + if (i != 0) { + sb.append(", "); + } + sb.append(column); + } + return sb.toString(); + } + } + + private final ReturningInQuery returningInQuery; + private final String returningClause; + + public GeneratedKeysTest(ReturningInQuery returningInQuery, BinaryMode binaryMode) throws Exception { + this.returningInQuery = returningInQuery; + this.returningClause = returningInQuery.getClause(); + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "returningInQuery = {0}, binary = {1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (ReturningInQuery returningInQuery : ReturningInQuery.values()) { + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{returningInQuery, binaryMode}); + } + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTempTable(con, "genkeys", "a serial, b varchar(5), c int"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "genkeys"); + super.tearDown(); + } + + @Test + public void testGeneratedKeys() throws SQLException { + testGeneratedKeysWithSuffix(""); + } + + private void testGeneratedKeysWithSuffix(String suffix) throws SQLException { + Statement stmt = con.createStatement(); + int count = stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + suffix, + Statement.RETURN_GENERATED_KEYS); + assertEquals(1, count); + ResultSet rs = stmt.getGeneratedKeys(); + assert1a2(rs); + } + + private void assert1a2(ResultSet rs) throws SQLException { + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertEquals(1, rs.getInt("a")); + if (returningInQuery.columnsReturned() >= 2) { + assertEquals("a", rs.getString(2)); + assertEquals("a", rs.getString("b")); + } + if (returningInQuery.columnsReturned() >= 3) { + assertEquals("2", rs.getString(3)); + assertEquals(2, rs.getInt("c")); + } + assertTrue(!rs.next()); + } + + @Test + public void testStatementUpdateCount() throws SQLException { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause, + Statement.RETURN_GENERATED_KEYS); + assertEquals(1, stmt.getUpdateCount()); + assertNull(stmt.getResultSet()); + assertTrue(!stmt.getMoreResults()); + } + + @Test + public void testCloseStatementClosesRS() throws SQLException { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause, + Statement.RETURN_GENERATED_KEYS); + ResultSet rs = stmt.getGeneratedKeys(); + stmt.close(); + assertTrue("statement was closed, thus the resultset should be closed as well", rs.isClosed()); + try { + rs.next(); + fail("Can't operate on a closed result set."); + } catch (SQLException sqle) { + } + } + + @Test + public void testReturningWithTrailingSemicolon() throws SQLException { + testGeneratedKeysWithSuffix("; "); + } + + @Test + public void testEmptyRSWithoutReturning() throws SQLException { + Statement stmt = con.createStatement(); + try { + int count = + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + "; ", + Statement.NO_GENERATED_KEYS); + assertEquals(1, count); + if (returningInQuery.columnsReturned() > 0) { + fail( + "A result was returned when none was expected error should happen when executing executeUpdate('... returning ...')"); + } + } catch (SQLException e) { + if (returningInQuery.columnsReturned() > 0 && "0100E".equals(e.getSQLState())) { + // A result was returned when none was expected + return; // just as expected + } + throw e; + } + ResultSet rs = stmt.getGeneratedKeys(); + assertFalse("Statement.NO_GENERATED_KEYS => stmt.getGeneratedKeys() should be empty", rs.next()); + } + + @Test + public void testMultipleRows() throws SQLException { + Statement stmt = con.createStatement(); + int count = stmt.executeUpdate( + "INSERT INTO genkeys VALUES (1, 'a', 2), (2, 'b', 4)" + returningClause + "; ", + new String[]{"c", "b"}); + assertEquals(2, count); + ResultSet rs = stmt.getGeneratedKeys(); + assertTrue(rs.next()); + assertCB1(rs); + assertTrue(rs.next()); + assertCB2(rs); + assertTrue(!rs.next()); + } + + @Test + public void testSerialWorks() throws SQLException { + Statement stmt = con.createStatement(); + int count = stmt.executeUpdate( + "INSERT/*fool parser*/ INTO genkeys (b,c) VALUES ('a', 2), ('b', 4)" + returningClause + "; ", + new String[]{"a"}); + assertEquals(2, count); + ResultSet rs = stmt.getGeneratedKeys(); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertTrue(!rs.next()); + } + + @Test + public void testUpdate() throws SQLException { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 3)"); + stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)"); + stmt.executeUpdate("UPDATE genkeys SET c=2 WHERE a = 1" + returningClause, + new String[]{"c", "b"}); + ResultSet rs = stmt.getGeneratedKeys(); + assertTrue(rs.next()); + assertCB1(rs); + assertTrue(!rs.next()); + } + + @Test + public void testWithInsertInsert() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v9_1); + Statement stmt = con.createStatement(); + int count = stmt.executeUpdate( + "WITH x as (INSERT INTO genkeys (b,c) VALUES ('a', 2) returning c) insert into genkeys(a,b,c) VALUES (1, 'a', 2)" + returningClause + "", + new String[]{"c", "b"}); + assertEquals(1, count); + ResultSet rs = stmt.getGeneratedKeys(); + assertTrue(rs.next()); + assertCB1(rs); + assertTrue(!rs.next()); + } + + @Test + public void testWithInsertSelect() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v9_1); + Assume.assumeTrue(returningInQuery != ReturningInQuery.NO); + Statement stmt = con.createStatement(); + int count = stmt.executeUpdate( + "WITH x as (INSERT INTO genkeys(a,b,c) VALUES (1, 'a', 2) " + returningClause + + ") select * from x", + new String[]{"c", "b"}); + assertEquals("rowcount", -1, count); + // TODO: should SELECT produce rows through getResultSet or getGeneratedKeys? + ResultSet rs = stmt.getResultSet(); + assertTrue(rs.next()); + assertCB1(rs); + assertTrue(!rs.next()); + } + + @Test + public void testDelete() throws SQLException { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)"); + stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)"); + stmt.executeUpdate("DELETE FROM genkeys WHERE a = 1" + returningClause, + new String[]{"c", "b"}); + ResultSet rs = stmt.getGeneratedKeys(); + assertTrue(rs.next()); + assertCB1(rs); + assertTrue(!rs.next()); + } + + @Test + public void testPSUpdate() throws SQLException { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', -3)"); + stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)"); + stmt.close(); + + PreparedStatement ps = + con.prepareStatement("UPDATE genkeys SET c=? WHERE a = ?" + returningClause, new String[]{"c", "b"}); + ps.setInt(1, 2); + ps.setInt(2, 1); + assertEquals(1, ps.executeUpdate()); + ResultSet rs = ps.getGeneratedKeys(); + assertTrue(rs.next()); + assertCB1(rs); + assertTrue(!rs.next()); + } + + @Test + public void testPSDelete() throws SQLException { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)"); + stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)"); + stmt.close(); + + PreparedStatement ps = + con.prepareStatement("DELETE FROM genkeys WHERE a = ?" + returningClause, new String[]{"c", "b"}); + + ps.setInt(1, 1); + assertEquals(1, ps.executeUpdate()); + ResultSet rs = ps.getGeneratedKeys(); + assertTrue(rs.next()); + assertCB1(rs); + assertTrue(!rs.next()); + + ps.setInt(1, 2); + assertEquals(1, ps.executeUpdate()); + rs = ps.getGeneratedKeys(); + assertTrue(rs.next()); + assertCB2(rs); + assertTrue(!rs.next()); + } + + private void assertCB1(ResultSet rs) throws SQLException { + ResultSetMetaData rsmd = rs.getMetaData(); + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= rsmd.getColumnCount(); i++) { + if (i > 1) { + sb.append(", "); + } + sb.append(rsmd.getColumnName(i)); + } + String columnNames = sb.toString(); + switch (returningInQuery) { + case NO: + assertEquals("Two columns should be returned since returning clause was empty and {c, b} was requested via API", + "c, b", columnNames); + assertEquals(2, rs.getInt(1)); + assertEquals("a", rs.getString(2)); + assertEquals(2, rs.getInt("c")); + assertEquals("a", rs.getString("b")); + break; + case A: + assertEquals("Just one column should be returned since returning clause was " + returningClause, + "a", columnNames); + assertEquals(1, rs.getInt(1)); + assertEquals(1, rs.getInt("a")); + break; + case AB: + assertEquals("Two columns should be returned since returning clause was " + returningClause, + "a, b", columnNames); + assertEquals(1, rs.getInt(1)); + assertEquals("a", rs.getString(2)); + assertEquals(1, rs.getInt("a")); + assertEquals("a", rs.getString("b")); + break; + case STAR: + assertEquals("Three columns should be returned since returning clause was " + returningClause, + "a, b, c", columnNames); + assertEquals(1, rs.getInt(1)); + assertEquals("a", rs.getString(2)); + assertEquals(2, rs.getInt(3)); + assertEquals(1, rs.getInt("a")); + assertEquals("a", rs.getString("b")); + assertEquals(2, rs.getInt("c")); + break; + default: + fail("Unexpected test kind: " + returningInQuery); + } + } + + private void assertCB2(ResultSet rs) throws SQLException { + switch (returningInQuery) { + case NO: + assertEquals("Two columns should be returned since returning clause was empty and {c, b} was requested via API", + 2, rs.getMetaData().getColumnCount()); + assertEquals(4, rs.getInt(1)); + assertEquals("b", rs.getString(2)); + break; + case A: + assertEquals("Just one column should be returned since returning clause was " + returningClause, + 1, rs.getMetaData().getColumnCount()); + assertEquals(2, rs.getInt(1)); + break; + case AB: + assertEquals("Two columns should be returned since returning clause was " + returningClause, + 2, rs.getMetaData().getColumnCount()); + assertEquals(2, rs.getInt(1)); + assertEquals("b", rs.getString(2)); + break; + case STAR: + assertEquals("Three columns should be returned since returning clause was " + returningClause, + 3, rs.getMetaData().getColumnCount()); + assertEquals(2, rs.getInt(1)); + assertEquals("b", rs.getString(2)); + assertEquals(4, rs.getInt(3)); + break; + default: + fail("Unexpected test kind: " + returningInQuery); + } + } + + @Test + public void testGeneratedKeysCleared() throws SQLException { + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + "; ", Statement.RETURN_GENERATED_KEYS); + ResultSet rs = stmt.getGeneratedKeys(); + assertTrue(rs.next()); + try { + stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 3)" + returningClause); + if (returningInQuery.columnsReturned() > 0) { + fail("A result was returned when none was expected error should happen when executing executeUpdate('... returning ...')"); + } + } catch (SQLException e) { + if (returningInQuery.columnsReturned() > 0 && "0100E".equals(e.getSQLState())) { + // A result was returned when none was expected + return; // just as expected + } + throw e; + } + rs = stmt.getGeneratedKeys(); + assertTrue(!rs.next()); + } + + @Test + public void testBatchGeneratedKeys() throws SQLException { + PreparedStatement ps = con.prepareStatement("INSERT INTO genkeys(c) VALUES (?)" + returningClause + "", + Statement.RETURN_GENERATED_KEYS); + ps.setInt(1, 4); + ps.addBatch(); + ps.setInt(1, 7); + ps.addBatch(); + ps.executeBatch(); + ResultSet rs = ps.getGeneratedKeys(); + assertTrue("getGeneratedKeys.next() should be non-empty", rs.next()); + assertEquals(1, rs.getInt("a")); + assertTrue(rs.next()); + assertEquals(2, rs.getInt("a")); + assertTrue(!rs.next()); + } + + private PreparedStatement prepareSelect() throws SQLException { + PreparedStatement ps; + String sql = "select c from genkeys"; + switch (returningInQuery) { + case NO: + ps = con.prepareStatement(sql); + break; + case STAR: + ps = con.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS); + break; + default: + ps = con.prepareStatement(sql, returningInQuery.columns); + } + return ps; + } + + @Test + public void selectWithGeneratedKeysViaPreparedExecuteQuery() throws SQLException { + PreparedStatement ps = prepareSelect(); + ResultSet rs = ps.executeQuery(); + assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next()); + ps.close(); + } + + @Test + public void selectWithGeneratedKeysViaPreparedExecute() throws SQLException { + PreparedStatement ps = prepareSelect(); + ps.execute(); + ResultSet rs = ps.getResultSet(); + assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next()); + ps.close(); + } + + @Test + public void selectWithGeneratedKeysViaNonPrepared() throws SQLException { + Statement s = con.createStatement(); + String sql = "select c from genkeys"; + ResultSet rs; + switch (returningInQuery) { + case NO: + s.execute(sql); + rs = s.getResultSet(); + break; + case STAR: + s.execute(sql, Statement.RETURN_GENERATED_KEYS); + rs = s.getResultSet(); + break; + default: + s.execute(sql, returningInQuery.columns); + rs = s.getResultSet(); + } + assertNotNull("SELECT statement should return results via getResultSet, not getGeneratedKeys", rs); + assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next()); + s.close(); + } + + @Test + public void breakDescribeOnFirstServerPreparedExecution() throws SQLException { + // Test code is adapted from https://github.com/pgjdbc/pgjdbc/issues/811#issuecomment-352468388 + + PreparedStatement ps = + con.prepareStatement("insert into genkeys(b) values(?)" + returningClause, + Statement.RETURN_GENERATED_KEYS); + ps.setString(1, "TEST"); + + // The below "prepareThreshold - 1" executions ensure that bind failure would happen + // exactly on prepareThreshold execution (the first one when server flips to server-prepared) + int prepareThreshold = ps.unwrap(PGStatement.class).getPrepareThreshold(); + for (int i = 0; i < prepareThreshold - 1; i++) { + ps.executeUpdate(); + } + try { + // Send a value that's too long on the 5th request + ps.setString(1, "TESTTESTTEST"); + ps.executeUpdate(); + } catch (SQLException e) { + // Expected error: org.postgresql.util.PSQLException: ERROR: value + // too long for type character varying(10) + if (!PSQLState.STRING_DATA_RIGHT_TRUNCATION.getState().equals(e.getSQLState())) { + throw e; + } + } + // Send a valid value on the next request + ps.setString(1, "TEST"); + ps.executeUpdate(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3BlobTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3BlobTest.java new file mode 100644 index 0000000..3ad1786 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3BlobTest.java @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.Blob; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class Jdbc3BlobTest { + private static final String TABLE = "blobtest"; + private static final String INSERT = "INSERT INTO " + TABLE + " VALUES (1, lo_creat(-1))"; + private static final String SELECT = "SELECT ID, DATA FROM " + TABLE + " WHERE ID = 1"; + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createTable(conn, TABLE, "ID INT PRIMARY KEY, DATA OID"); + conn.setAutoCommit(false); + } + + @AfterEach + void tearDown() throws SQLException { + conn.setAutoCommit(true); + try { + Statement stmt = conn.createStatement(); + try { + stmt.execute("SELECT lo_unlink(DATA) FROM " + TABLE); + } finally { + try { + stmt.close(); + } catch (Exception e) { + } + } + } finally { + TestUtil.dropTable(conn, TABLE); + TestUtil.closeDB(conn); + } + } + + /** + * Test the writing and reading of a single byte. + */ + @Test + void test1Byte() throws SQLException { + byte[] data = {(byte) 'a'}; + readWrite(data); + } + + /** + * Test the writing and reading of a few bytes. + */ + @Test + void manyBytes() throws SQLException { + byte[] data = "aaaaaaaaaa".getBytes(); + readWrite(data); + } + + /** + * Test writing a single byte with an offset. + */ + @Test + void test1ByteOffset() throws SQLException { + byte[] data = {(byte) 'a'}; + readWrite(10, data); + } + + /** + * Test the writing and reading of a few bytes with an offset. + */ + @Test + void manyBytesOffset() throws SQLException { + byte[] data = "aaaaaaaaaa".getBytes(); + readWrite(10, data); + } + + /** + * Tests all of the byte values from 0 - 255. + */ + @Test + void allBytes() throws SQLException { + byte[] data = new byte[256]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) i; + } + readWrite(data); + } + + @Test + void truncate() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) { + return; + } + + byte[] data = new byte[100]; + for (byte i = 0; i < data.length; i++) { + data[i] = i; + } + readWrite(data); + + PreparedStatement ps = conn.prepareStatement(SELECT); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + Blob blob = rs.getBlob("DATA"); + + assertEquals(100, blob.length()); + + blob.truncate(50); + assertEquals(50, blob.length()); + + blob.truncate(150); + assertEquals(150, blob.length()); + + data = blob.getBytes(1, 200); + assertEquals(150, data.length); + for (byte i = 0; i < 50; i++) { + assertEquals(i, data[i]); + } + + for (int i = 50; i < 150; i++) { + assertEquals(0, data[i]); + } + } + + /** + * + * @param data data to write + * @throws SQLException if something goes wrong + */ + public void readWrite(byte[] data) throws SQLException { + readWrite(1, data); + } + + /** + * + * @param offset data offset + * @param data data to write + * @throws SQLException if something goes wrong + */ + public void readWrite(int offset, byte[] data) throws SQLException { + PreparedStatement ps = conn.prepareStatement(INSERT); + ps.executeUpdate(); + ps.close(); + + ps = conn.prepareStatement(SELECT); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + Blob b = rs.getBlob("DATA"); + b.setBytes(offset, data); + + rs.close(); + ps.close(); + + ps = conn.prepareStatement(SELECT); + rs = ps.executeQuery(); + + assertTrue(rs.next()); + b = rs.getBlob("DATA"); + byte[] rspData = b.getBytes(offset, data.length); + assertArrayEquals(data, rspData, "Request should be the same as the response"); + + rs.close(); + ps.close(); + } + + /** + * Test the writing and reading of a single byte. + */ + @Test + void test1ByteStream() throws SQLException, IOException { + byte[] data = {(byte) 'a'}; + readWriteStream(data); + } + + /** + * Test the writing and reading of a few bytes. + */ + @Test + void manyBytesStream() throws SQLException, IOException { + byte[] data = "aaaaaaaaaa".getBytes(); + readWriteStream(data); + } + + /** + * Test writing a single byte with an offset. + */ + @Test + void test1ByteOffsetStream() throws SQLException, IOException { + byte[] data = {(byte) 'a'}; + readWriteStream(10, data); + } + + /** + * Test the writing and reading of a few bytes with an offset. + */ + @Test + void manyBytesOffsetStream() throws SQLException, IOException { + byte[] data = "aaaaaaaaaa".getBytes(); + readWriteStream(10, data); + } + + /** + * Tests all of the byte values from 0 - 255. + */ + @Test + void allBytesStream() throws SQLException, IOException { + byte[] data = new byte[256]; + for (int i = 0; i < data.length; i++) { + data[i] = (byte) i; + } + readWriteStream(data); + } + + public void readWriteStream(byte[] data) throws SQLException, IOException { + readWriteStream(1, data); + } + + /** + * Reads then writes data to the blob via a stream. + */ + public void readWriteStream(int offset, byte[] data) throws SQLException, IOException { + PreparedStatement ps = conn.prepareStatement(INSERT); + ps.executeUpdate(); + ps.close(); + + ps = conn.prepareStatement(SELECT); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + Blob b = rs.getBlob("DATA"); + OutputStream out = b.setBinaryStream(offset); + out.write(data); + out.flush(); + out.close(); + + rs.close(); + ps.close(); + + ps = conn.prepareStatement(SELECT); + rs = ps.executeQuery(); + + assertTrue(rs.next()); + b = rs.getBlob("DATA"); + InputStream in = b.getBinaryStream(); + byte[] rspData = new byte[data.length]; + in.skip(offset - 1); + in.read(rspData); + in.close(); + + assertArrayEquals(data, rspData, "Request should be the same as the response"); + + rs.close(); + ps.close(); + } + + @Test + void pattern() throws SQLException { + byte[] data = "abcdefghijklmnopqrstuvwxyz0123456789".getBytes(); + byte[] pattern = "def".getBytes(); + + PreparedStatement ps = conn.prepareStatement(INSERT); + ps.executeUpdate(); + ps.close(); + + ps = conn.prepareStatement(SELECT); + ResultSet rs = ps.executeQuery(); + + assertTrue(rs.next()); + Blob b = rs.getBlob("DATA"); + b.setBytes(1, data); + + rs.close(); + ps.close(); + + ps = conn.prepareStatement(SELECT); + rs = ps.executeQuery(); + + assertTrue(rs.next()); + b = rs.getBlob("DATA"); + long position = b.position(pattern, 1); + byte[] rspData = b.getBytes(position, pattern.length); + assertArrayEquals(pattern, rspData, "Request should be the same as the response"); + + rs.close(); + ps.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3CallableStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3CallableStatementTest.java new file mode 100644 index 0000000..f0f4b82 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3CallableStatementTest.java @@ -0,0 +1,1135 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLState; + +import org.junit.BeforeClass; +import org.junit.Test; + +import java.math.BigDecimal; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.time.LocalDate; + +/** + * @author davec + */ +public class Jdbc3CallableStatementTest extends BaseTest4 { + @BeforeClass + public static void beforeClass() throws Exception { + try (Connection con = TestUtil.openDB()) { + assumeCallableStatementsSupported(con); + } + } + + @Override + public void setUp() throws Exception { + super.setUp(); + Statement stmt = con.createStatement(); + stmt.execute( + "create temp table numeric_tab (MAX_VAL NUMERIC(30,15), MIN_VAL NUMERIC(30,15), NULL_VAL NUMERIC(30,15) NULL)"); + stmt.execute("insert into numeric_tab values ( 999999999999999,0.000000000000001, null)"); + stmt.execute( + "CREATE OR REPLACE FUNCTION mysum(a int, b int) returns int AS 'BEGIN return a + b; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION myiofunc(a INOUT int, b OUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION myif(a INOUT int, b IN int) AS 'BEGIN a := b; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION mynoparams() returns int AS 'BEGIN return 733; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE FUNCTION mynoparamsproc() returns void AS 'BEGIN NULL; END;' LANGUAGE plpgsql"); + + stmt.execute("create or replace function " + + "Numeric_Proc( OUT IMAX NUMERIC(30,15), OUT IMIN NUMERIC(30,15), OUT INUL NUMERIC(30,15)) as " + + "'begin " + + "select max_val into imax from numeric_tab;" + + "select min_val into imin from numeric_tab;" + + "select null_val into inul from numeric_tab;" + + + " end;' " + + "language plpgsql;"); + + stmt.execute("CREATE OR REPLACE FUNCTION test_somein_someout(" + + "pa IN int4," + + "pb OUT varchar," + + "pc OUT int8)" + + " AS " + + + "'begin " + + "pb := ''out'';" + + "pc := pa + 1;" + + "end;'" + + + "LANGUAGE plpgsql VOLATILE;" + + ); + stmt.execute("CREATE OR REPLACE FUNCTION test_allinout(" + + "pa INOUT int4," + + "pb INOUT varchar," + + "pc INOUT int8)" + + " AS " + + "'begin " + + "pa := pa + 1;" + + "pb := ''foo out'';" + + "pc := pa + 1;" + + "end;'" + + "LANGUAGE plpgsql VOLATILE;" + ); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getBooleanWithoutArg() " + + "RETURNS boolean AS ' " + + "begin return true; end; ' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getBit1WithoutArg() " + + "RETURNS bit(1) AS ' " + + "begin return B''1''; end; ' LANGUAGE plpgsql;"); + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getBit2WithoutArg() " + + "RETURNS bit(2) AS ' " + + "begin return B''10''; end; ' LANGUAGE plpgsql;"); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + stmt.execute( + "CREATE OR REPLACE PROCEDURE inonlyprocedure(a IN int) AS 'BEGIN NULL; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE PROCEDURE inoutprocedure(a INOUT int) AS 'BEGIN a := a + a; END;' LANGUAGE plpgsql"); + + } + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) { + stmt.execute("create or replace PROCEDURE testspg_refcursor(bar date, out cur1 refcursor) " + + " as $$ declare begin " + + "OPEN cur1 FOR " + + "SELECT now() as now; end $$ language plpgsql"); + } + } + + @Override + public void tearDown() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("drop function Numeric_Proc(out decimal, out decimal, out decimal)"); + stmt.execute("drop function test_somein_someout(int4)"); + stmt.execute("drop function test_allinout( inout int4, inout varchar, inout int8)"); + stmt.execute("drop function mysum(a int, b int)"); + stmt.execute("drop function myiofunc(a INOUT int, b OUT int) "); + stmt.execute("drop function myif(a INOUT int, b IN int)"); + stmt.execute("drop function mynoparams()"); + stmt.execute("drop function mynoparamsproc()"); + stmt.execute("drop function testspg__getBooleanWithoutArg ();"); + stmt.execute("drop function testspg__getBit1WithoutArg ();"); + stmt.execute("drop function testspg__getBit2WithoutArg ();"); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + stmt.execute("drop procedure inonlyprocedure(a IN int)"); + stmt.execute("drop procedure inoutprocedure(a INOUT int)"); + } + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) { + stmt.execute("DROP PROCEDURE testspg_refcursor(date);"); + } + stmt.close(); + super.tearDown(); + } + + @Test + public void testSomeInOut() throws Throwable { + CallableStatement call = con.prepareCall("{ call test_somein_someout(?,?,?) }"); + + call.registerOutParameter(2, Types.VARCHAR); + call.registerOutParameter(3, Types.BIGINT); + call.setInt(1, 20); + call.execute(); + + } + + @Test + public void testNotEnoughParameters() throws Throwable { + CallableStatement cs = con.prepareCall("{call myiofunc(?,?)}"); + cs.setInt(1, 2); + cs.registerOutParameter(2, Types.INTEGER); + try { + cs.execute(); + fail("Should throw an exception "); + } catch (SQLException ex) { + assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.SYNTAX_ERROR.getState())); + } + + } + + @Test + public void testTooManyParameters() throws Throwable { + CallableStatement cs = con.prepareCall("{call myif(?,?)}"); + try { + cs.setInt(1, 1); + cs.setInt(2, 2); + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.execute(); + fail("should throw an exception"); + } catch (SQLException ex) { + assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.SYNTAX_ERROR.getState())); + } + + } + + @Test + public void testAllInOut() throws Throwable { + CallableStatement call = con.prepareCall("{ call test_allinout(?,?,?) }"); + + call.registerOutParameter(1, Types.INTEGER); + call.registerOutParameter(2, Types.VARCHAR); + call.registerOutParameter(3, Types.BIGINT); + call.setInt(1, 20); + call.setString(2, "hi"); + call.setInt(3, 123); + call.execute(); + call.getInt(1); + call.getString(2); + call.getLong(3); + + } + + @Test + public void testNumeric() throws Throwable { + CallableStatement call = con.prepareCall("{ call Numeric_Proc(?,?,?) }"); + + call.registerOutParameter(1, Types.NUMERIC, 15); + call.registerOutParameter(2, Types.NUMERIC, 15); + call.registerOutParameter(3, Types.NUMERIC, 15); + + call.executeUpdate(); + BigDecimal ret = call.getBigDecimal(1); + assertTrue( + "correct return from getNumeric () should be 999999999999999.000000000000000 but returned " + + ret.toString(), + ret.equals(new BigDecimal("999999999999999.000000000000000"))); + + ret = call.getBigDecimal(2); + assertTrue("correct return from getNumeric ()", + ret.equals(new BigDecimal("0.000000000000001"))); + try { + ret = call.getBigDecimal(3); + } catch (NullPointerException ex) { + assertTrue("This should be null", call.wasNull()); + } + } + + @Test + public void testGetObjectDecimal() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute( + "create temp table decimal_tab ( max_val numeric(30,15), min_val numeric(30,15), nul_val numeric(30,15) )"); + stmt.execute( + "insert into decimal_tab values (999999999999999.000000000000000,0.000000000000001,null)"); + + boolean ret = stmt.execute("create or replace function " + + "decimal_proc( OUT pmax numeric, OUT pmin numeric, OUT nval numeric) as " + + "'begin " + + "select max_val into pmax from decimal_tab;" + + "select min_val into pmin from decimal_tab;" + + "select nul_val into nval from decimal_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call decimal_proc(?,?,?) }"); + cstmt.registerOutParameter(1, Types.DECIMAL); + cstmt.registerOutParameter(2, Types.DECIMAL); + cstmt.registerOutParameter(3, Types.DECIMAL); + cstmt.executeUpdate(); + BigDecimal val = (BigDecimal) cstmt.getObject(1); + assertEquals(0, val.compareTo(new BigDecimal("999999999999999.000000000000000"))); + val = (BigDecimal) cstmt.getObject(2); + assertEquals(0, val.compareTo(new BigDecimal("0.000000000000001"))); + val = (BigDecimal) cstmt.getObject(3); + assertNull(val); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function decimal_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testVarcharBool() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table vartab( max_val text, min_val text)"); + stmt.execute("insert into vartab values ('a','b')"); + boolean ret = stmt.execute("create or replace function " + + "updatevarchar( in imax text, in imin text) returns int as " + + "'begin " + + "update vartab set max_val = imax;" + + "update vartab set min_val = imin;" + + "return 0;" + + " end;' " + + "language plpgsql;"); + stmt.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call updatevarchar(?,?) }"); + cstmt.setObject(1, Boolean.TRUE, Types.VARCHAR); + cstmt.setObject(2, Boolean.FALSE, Types.VARCHAR); + + cstmt.executeUpdate(); + cstmt.close(); + ResultSet rs = con.createStatement().executeQuery("select * from vartab"); + assertTrue(rs.next()); + assertTrue(rs.getString(1).equals(Boolean.TRUE.toString())); + + assertTrue(rs.getString(2).equals(Boolean.FALSE.toString())); + rs.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function updatevarchar(text,text)"); + } catch (Exception ex) { + } + } + } + + @Test + public void testInOut() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute(createBitTab); + stmt.execute(insertBitTab); + boolean ret = stmt.execute("create or replace function " + + "insert_bit( inout IMAX boolean, inout IMIN boolean, inout INUL boolean) as " + + "'begin " + + "insert into bit_tab values( imax, imin, inul);" + + "select max_val into imax from bit_tab;" + + "select min_val into imin from bit_tab;" + + "select null_val into inul from bit_tab;" + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call insert_bit(?,?,?) }"); + cstmt.setObject(1, "true", Types.BIT); + cstmt.setObject(2, "false", Types.BIT); + cstmt.setNull(3, Types.BIT); + cstmt.registerOutParameter(1, Types.BIT); + cstmt.registerOutParameter(2, Types.BIT); + cstmt.registerOutParameter(3, Types.BIT); + cstmt.executeUpdate(); + + assertTrue(cstmt.getBoolean(1)); + assertFalse(cstmt.getBoolean(2)); + cstmt.getBoolean(3); + assertTrue(cstmt.wasNull()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function insert_bit(boolean, boolean, boolean)"); + } catch (Exception ex) { + } + } + } + + private final String createBitTab = + "create temp table bit_tab ( max_val boolean, min_val boolean, null_val boolean )"; + private final String insertBitTab = "insert into bit_tab values (true,false,null)"; + + @Test + public void testSetObjectBit() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute(createBitTab); + stmt.execute(insertBitTab); + boolean ret = stmt.execute("create or replace function " + + "update_bit( in IMAX boolean, in IMIN boolean, in INUL boolean) returns int as " + + "'begin " + + "update bit_tab set max_val = imax;" + + "update bit_tab set min_val = imin;" + + "update bit_tab set min_val = inul;" + + " return 0;" + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call update_bit(?,?,?) }"); + cstmt.setObject(1, "true", Types.BIT); + cstmt.setObject(2, "false", Types.BIT); + cstmt.setNull(3, Types.BIT); + cstmt.executeUpdate(); + cstmt.close(); + ResultSet rs = con.createStatement().executeQuery("select * from bit_tab"); + + assertTrue(rs.next()); + assertTrue(rs.getBoolean(1)); + assertFalse(rs.getBoolean(2)); + rs.getBoolean(3); + assertTrue(rs.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function update_bit(boolean, boolean, boolean)"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetBit1WithoutArg() throws SQLException { + assumeNotSimpleQueryMode(); + try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBit1WithoutArg () }")) { + call.registerOutParameter(1, Types.BOOLEAN); + call.execute(); + assertTrue(call.getBoolean(1)); + } + } + + @Test + public void testGetBit2WithoutArg() throws SQLException { + assumeNotSimpleQueryMode(); + try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBit2WithoutArg () }")) { + call.registerOutParameter(1, Types.BOOLEAN); + try { + call.execute(); + assertTrue(call.getBoolean(1)); + fail("#getBoolean(int) on bit(2) should throw"); + } catch (SQLException e) { + assertEquals(PSQLState.CANNOT_COERCE.getState(), e.getSQLState()); + } + } + } + + @Test + public void testGetObjectLongVarchar() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table longvarchar_tab ( t text, null_val text )"); + stmt.execute("insert into longvarchar_tab values ('testdata',null)"); + boolean ret = stmt.execute("create or replace function " + + "longvarchar_proc( OUT pcn text, OUT nval text) as " + + "'begin " + + "select t into pcn from longvarchar_tab;" + + "select null_val into nval from longvarchar_tab;" + + + " end;' " + + "language plpgsql;"); + + ret = stmt.execute("create or replace function " + + "lvarchar_in_name( IN pcn text) returns int as " + + "'begin " + + "update longvarchar_tab set t=pcn;" + + "return 0;" + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call longvarchar_proc(?,?) }"); + cstmt.registerOutParameter(1, Types.LONGVARCHAR); + cstmt.registerOutParameter(2, Types.LONGVARCHAR); + cstmt.executeUpdate(); + String val = (String) cstmt.getObject(1); + assertEquals("testdata", val); + val = (String) cstmt.getObject(2); + assertNull(val); + cstmt.close(); + cstmt = con.prepareCall("{ call lvarchar_in_name(?) }"); + String maxFloat = "3.4E38"; + cstmt.setObject(1, Float.valueOf(maxFloat), Types.LONGVARCHAR); + cstmt.executeUpdate(); + cstmt.close(); + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("select * from longvarchar_tab"); + assertTrue(rs.next()); + String rval = (String) rs.getObject(1); + assertEquals(rval.trim(), maxFloat.trim()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function longvarchar_proc()"); + dstmt.execute("drop function lvarchar_in_name(text)"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetBytes01() throws Throwable { + assumeByteaSupported(); + byte[] testdata = "TestData".getBytes(); + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table varbinary_tab ( vbinary bytea, null_val bytea )"); + boolean ret = stmt.execute("create or replace function " + + "varbinary_proc( OUT pcn bytea, OUT nval bytea) as " + + "'begin " + + "select vbinary into pcn from varbinary_tab;" + + "select null_val into nval from varbinary_tab;" + + + " end;' " + + "language plpgsql;"); + stmt.close(); + PreparedStatement pstmt = con.prepareStatement("insert into varbinary_tab values (?,?)"); + pstmt.setBytes(1, testdata); + pstmt.setBytes(2, null); + + pstmt.executeUpdate(); + pstmt.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call varbinary_proc(?,?) }"); + cstmt.registerOutParameter(1, Types.VARBINARY); + cstmt.registerOutParameter(2, Types.VARBINARY); + cstmt.executeUpdate(); + byte[] retval = cstmt.getBytes(1); + for (int i = 0; i < testdata.length; i++) { + assertEquals(testdata[i], retval[i]); + } + + retval = cstmt.getBytes(2); + assertNull(retval); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function varbinary_proc()"); + } catch (Exception ex) { + } + } + } + + private final String createDecimalTab = + "create temp table decimal_tab ( max_val float, min_val float, null_val float )"; + private final String insertDecimalTab = "insert into decimal_tab values (1.0E125,1.0E-130,null)"; + private final String createFloatProc = "create or replace function " + + "float_proc( OUT IMAX float, OUT IMIN float, OUT INUL float) as " + + "'begin " + + "select max_val into imax from decimal_tab;" + + "select min_val into imin from decimal_tab;" + + "select null_val into inul from decimal_tab;" + + " end;' " + + "language plpgsql;"; + + private final String createUpdateFloat = "create or replace function " + + "updatefloat_proc ( IN maxparm float, IN minparm float ) returns int as " + + "'begin " + + "update decimal_tab set max_val=maxparm;" + + "update decimal_tab set min_val=minparm;" + + "return 0;" + + " end;' " + + "language plpgsql;"; + + private final String createRealTab = + "create temp table real_tab ( max_val float(25), min_val float(25), null_val float(25) )"; + private final String insertRealTab = "insert into real_tab values (1.0E37,1.0E-37, null)"; + + private final String dropFloatProc = "drop function float_proc()"; + private final String createUpdateReal = "create or replace function " + + "update_real_proc ( IN maxparm float(25), IN minparm float(25) ) returns int as " + + "'begin " + + "update real_tab set max_val=maxparm;" + + "update real_tab set min_val=minparm;" + + "return 0;" + + " end;' " + + "language plpgsql;"; + private final String dropUpdateReal = "drop function update_real_proc(float, float)"; + private final double[] doubleValues = {1.0E125, 1.0E-130}; + private final float[] realValues = {(float) 1.0E37, (float) 1.0E-37}; + private final int[] intValues = {2147483647, -2147483648}; + + @Test + public void testUpdateReal() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute(createRealTab); + boolean ret = stmt.execute(createUpdateReal); + + stmt.execute(insertRealTab); + stmt.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call update_real_proc(?,?) }"); + BigDecimal val = new BigDecimal(intValues[0]); + float x = val.floatValue(); + cstmt.setObject(1, val, Types.REAL); + val = new BigDecimal(intValues[1]); + cstmt.setObject(2, val, Types.REAL); + cstmt.executeUpdate(); + cstmt.close(); + ResultSet rs = con.createStatement().executeQuery("select * from real_tab"); + assertTrue(rs.next()); + Float oVal = (float) intValues[0]; + Float rVal = Float.valueOf(rs.getObject(1).toString()); + assertTrue(oVal.equals(rVal)); + oVal = (float) intValues[1]; + rVal = Float.valueOf(rs.getObject(2).toString()); + assertTrue(oVal.equals(rVal)); + rs.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute(dropUpdateReal); + dstmt.close(); + } catch (Exception ex) { + } + } + } + + @Test + public void testUpdateDecimal() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute(createDecimalTab); + boolean ret = stmt.execute(createUpdateFloat); + stmt.close(); + PreparedStatement pstmt = con.prepareStatement("insert into decimal_tab values (?,?)"); + // note these are reversed on purpose + pstmt.setDouble(1, doubleValues[1]); + pstmt.setDouble(2, doubleValues[0]); + + pstmt.executeUpdate(); + pstmt.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call updatefloat_proc(?,?) }"); + cstmt.setDouble(1, doubleValues[0]); + cstmt.setDouble(2, doubleValues[1]); + cstmt.executeUpdate(); + cstmt.close(); + ResultSet rs = con.createStatement().executeQuery("select * from decimal_tab"); + assertTrue(rs.next()); + assertTrue(rs.getDouble(1) == doubleValues[0]); + assertTrue(rs.getDouble(2) == doubleValues[1]); + rs.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function updatefloat_proc(float, float)"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetBytes02() throws Throwable { + assumeByteaSupported(); + byte[] testdata = "TestData".getBytes(); + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table longvarbinary_tab ( vbinary bytea, null_val bytea )"); + boolean ret = stmt.execute("create or replace function " + + "longvarbinary_proc( OUT pcn bytea, OUT nval bytea) as " + + "'begin " + + "select vbinary into pcn from longvarbinary_tab;" + + "select null_val into nval from longvarbinary_tab;" + + + " end;' " + + "language plpgsql;"); + stmt.close(); + PreparedStatement pstmt = con.prepareStatement("insert into longvarbinary_tab values (?,?)"); + pstmt.setBytes(1, testdata); + pstmt.setBytes(2, null); + + pstmt.executeUpdate(); + pstmt.close(); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call longvarbinary_proc(?,?) }"); + cstmt.registerOutParameter(1, Types.LONGVARBINARY); + cstmt.registerOutParameter(2, Types.LONGVARBINARY); + cstmt.executeUpdate(); + byte[] retval = cstmt.getBytes(1); + for (int i = 0; i < testdata.length; i++) { + assertEquals(testdata[i], retval[i]); + } + + retval = cstmt.getBytes(2); + assertNull(retval); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function longvarbinary_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetObjectFloat() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute(createDecimalTab); + stmt.execute(insertDecimalTab); + boolean ret = stmt.execute(createFloatProc); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call float_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.FLOAT); + cstmt.registerOutParameter(2, java.sql.Types.FLOAT); + cstmt.registerOutParameter(3, java.sql.Types.FLOAT); + cstmt.executeUpdate(); + Double val = (Double) cstmt.getObject(1); + assertTrue(val.doubleValue() == doubleValues[0]); + + val = (Double) cstmt.getObject(2); + assertTrue(val.doubleValue() == doubleValues[1]); + + val = (Double) cstmt.getObject(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute(dropFloatProc); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetDouble01() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table d_tab ( max_val float, min_val float, null_val float )"); + stmt.execute("insert into d_tab values (1.0E125,1.0E-130,null)"); + boolean ret = stmt.execute("create or replace function " + + "double_proc( OUT IMAX float, OUT IMIN float, OUT INUL float) as " + + "'begin " + + "select max_val into imax from d_tab;" + + "select min_val into imin from d_tab;" + + "select null_val into inul from d_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call double_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.DOUBLE); + cstmt.registerOutParameter(2, java.sql.Types.DOUBLE); + cstmt.registerOutParameter(3, java.sql.Types.DOUBLE); + cstmt.executeUpdate(); + assertTrue(cstmt.getDouble(1) == 1.0E125); + assertTrue(cstmt.getDouble(2) == 1.0E-130); + cstmt.getDouble(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function double_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetDoubleAsReal() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table d_tab ( max_val float, min_val float, null_val float )"); + stmt.execute("insert into d_tab values (3.4E38,1.4E-45,null)"); + boolean ret = stmt.execute("create or replace function " + + "double_proc( OUT IMAX float, OUT IMIN float, OUT INUL float) as " + + "'begin " + + "select max_val into imax from d_tab;" + + "select min_val into imin from d_tab;" + + "select null_val into inul from d_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call double_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.REAL); + cstmt.registerOutParameter(2, java.sql.Types.REAL); + cstmt.registerOutParameter(3, java.sql.Types.REAL); + cstmt.executeUpdate(); + assertTrue(cstmt.getFloat(1) == 3.4E38f); + assertTrue(cstmt.getFloat(2) == 1.4E-45f); + cstmt.getFloat(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function double_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetShort01() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table short_tab ( max_val int2, min_val int2, null_val int2 )"); + stmt.execute("insert into short_tab values (32767,-32768,null)"); + boolean ret = stmt.execute("create or replace function " + + "short_proc( OUT IMAX int2, OUT IMIN int2, OUT INUL int2) as " + + "'begin " + + "select max_val into imax from short_tab;" + + "select min_val into imin from short_tab;" + + "select null_val into inul from short_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call short_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.SMALLINT); + cstmt.registerOutParameter(2, java.sql.Types.SMALLINT); + cstmt.registerOutParameter(3, java.sql.Types.SMALLINT); + cstmt.executeUpdate(); + assertEquals(32767, cstmt.getShort(1)); + assertEquals(-32768, cstmt.getShort(2)); + cstmt.getShort(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function short_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetInt01() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table i_tab ( max_val int, min_val int, null_val int )"); + stmt.execute("insert into i_tab values (2147483647,-2147483648,null)"); + boolean ret = stmt.execute("create or replace function " + + "int_proc( OUT IMAX int, OUT IMIN int, OUT INUL int) as " + + "'begin " + + "select max_val into imax from i_tab;" + + "select min_val into imin from i_tab;" + + "select null_val into inul from i_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call int_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.INTEGER); + cstmt.registerOutParameter(2, java.sql.Types.INTEGER); + cstmt.registerOutParameter(3, java.sql.Types.INTEGER); + cstmt.executeUpdate(); + assertEquals(2147483647, cstmt.getInt(1)); + assertEquals(-2147483648, cstmt.getInt(2)); + cstmt.getInt(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function int_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetLong01() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table l_tab ( max_val int8, min_val int8, null_val int8 )"); + stmt.execute("insert into l_tab values (9223372036854775807,-9223372036854775808,null)"); + boolean ret = stmt.execute("create or replace function " + + "bigint_proc( OUT IMAX int8, OUT IMIN int8, OUT INUL int8) as " + + "'begin " + + "select max_val into imax from l_tab;" + + "select min_val into imin from l_tab;" + + "select null_val into inul from l_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call bigint_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.BIGINT); + cstmt.registerOutParameter(2, java.sql.Types.BIGINT); + cstmt.registerOutParameter(3, java.sql.Types.BIGINT); + cstmt.executeUpdate(); + assertEquals(9223372036854775807L, cstmt.getLong(1)); + assertEquals(-9223372036854775808L, cstmt.getLong(2)); + cstmt.getLong(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function bigint_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetBoolean01() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute(createBitTab); + stmt.execute(insertBitTab); + boolean ret = stmt.execute("create or replace function " + + "bit_proc( OUT IMAX boolean, OUT IMIN boolean, OUT INUL boolean) as " + + "'begin " + + "select max_val into imax from bit_tab;" + + "select min_val into imin from bit_tab;" + + "select null_val into inul from bit_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call bit_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.BIT); + cstmt.registerOutParameter(2, java.sql.Types.BIT); + cstmt.registerOutParameter(3, java.sql.Types.BIT); + cstmt.executeUpdate(); + assertTrue(cstmt.getBoolean(1)); + assertFalse(cstmt.getBoolean(2)); + cstmt.getBoolean(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function bit_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testGetBooleanWithoutArg() throws SQLException { + assumeNotSimpleQueryMode(); + try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBooleanWithoutArg () }")) { + call.registerOutParameter(1, Types.BOOLEAN); + call.execute(); + assertTrue(call.getBoolean(1)); + } + } + + @Test + public void testGetByte01() throws Throwable { + try { + Statement stmt = con.createStatement(); + stmt.execute("create temp table byte_tab ( max_val int2, min_val int2, null_val int2 )"); + stmt.execute("insert into byte_tab values (127,-128,null)"); + boolean ret = stmt.execute("create or replace function " + + "byte_proc( OUT IMAX int2, OUT IMIN int2, OUT INUL int2) as " + + "'begin " + + "select max_val into imax from byte_tab;" + + "select min_val into imin from byte_tab;" + + "select null_val into inul from byte_tab;" + + + " end;' " + + "language plpgsql;"); + } catch (Exception ex) { + fail(ex.getMessage()); + throw ex; + } + try { + CallableStatement cstmt = con.prepareCall("{ call byte_proc(?,?,?) }"); + cstmt.registerOutParameter(1, java.sql.Types.TINYINT); + cstmt.registerOutParameter(2, java.sql.Types.TINYINT); + cstmt.registerOutParameter(3, java.sql.Types.TINYINT); + cstmt.executeUpdate(); + assertEquals(127, cstmt.getByte(1)); + assertEquals(-128, cstmt.getByte(2)); + cstmt.getByte(3); + assertTrue(cstmt.wasNull()); + } catch (Exception ex) { + fail(ex.getMessage()); + } finally { + try { + Statement dstmt = con.createStatement(); + dstmt.execute("drop function byte_proc()"); + } catch (Exception ex) { + } + } + } + + @Test + public void testMultipleOutExecutions() throws SQLException { + CallableStatement cs = con.prepareCall("{call myiofunc(?, ?)}"); + for (int i = 0; i < 10; i++) { + cs.registerOutParameter(1, Types.INTEGER); + cs.registerOutParameter(2, Types.INTEGER); + cs.setInt(1, i); + cs.execute(); + assertEquals(1, cs.getInt(1)); + assertEquals(i, cs.getInt(2)); + cs.clearParameters(); + } + } + + @Test + public void testSum() throws SQLException { + CallableStatement cs = con.prepareCall("{?= call mysum(?, ?)}"); + cs.registerOutParameter(1, Types.INTEGER); + cs.setInt(2, 2); + cs.setInt(3, 3); + cs.execute(); + assertEquals("2+3 should be 5 when executed via {?= call mysum(?, ?)}", 5, cs.getInt(1)); + } + + @Test + public void testFunctionNoParametersWithParentheses() throws SQLException { + CallableStatement cs = con.prepareCall("{?= call mynoparams()}"); + cs.registerOutParameter(1, Types.INTEGER); + cs.execute(); + assertEquals("{?= call mynoparam()} should return 733, but did not.", 733, cs.getInt(1)); + TestUtil.closeQuietly(cs); + } + + @Test + public void testFunctionNoParametersWithoutParentheses() throws SQLException { + CallableStatement cs = con.prepareCall("{?= call mynoparams}"); + cs.registerOutParameter(1, Types.INTEGER); + cs.execute(); + assertEquals("{?= call mynoparam()} should return 733, but did not.", 733, cs.getInt(1)); + TestUtil.closeQuietly(cs); + } + + @Test + public void testProcedureNoParametersWithParentheses() throws SQLException { + CallableStatement cs = con.prepareCall("{ call mynoparamsproc()}"); + cs.execute(); + TestUtil.closeQuietly(cs); + } + + @Test + public void testProcedureNoParametersWithoutParentheses() throws SQLException { + CallableStatement cs = con.prepareCall("{ call mynoparamsproc}"); + cs.execute(); + TestUtil.closeQuietly(cs); + } + + @Test + public void testProcedureInOnlyNativeCall() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("call inonlyprocedure(?)"); + cs.setInt(1, 5); + cs.execute(); + TestUtil.closeQuietly(cs); + } + + @Test + public void testProcedureInOutNativeCall() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v11); + // inoutprocedure(a INOUT int) returns a*2 via the INOUT parameter + CallableStatement cs = con.prepareCall("call inoutprocedure(?)"); + cs.setInt(1, 5); + cs.registerOutParameter(1, Types.INTEGER); + cs.execute(); + assertEquals("call inoutprocedure(?) should return 10 (when input param = 5) via the INOUT parameter, but did not.", 10, cs.getInt(1)); + TestUtil.closeQuietly(cs); + } + + @Test + public void testCall5Times() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v14); + // call this enough times to change to binary mode + for (int i = 0; i < 6; i++) { + con.setAutoCommit(false); + try (CallableStatement proc = con.prepareCall("call testspg_refcursor( ? , ? )")) { + proc.setDate(1, java.sql.Date.valueOf(LocalDate.now())); + proc.registerOutParameter(2, Types.REF_CURSOR); + proc.execute(); + try (ResultSet results = (ResultSet) proc.getObject(2)) { + while (results.next()) { + System.out.println(" " + i + " " + results.getTimestamp("now").toLocalDateTime()); + } + } + } + con.commit(); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3SavepointTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3SavepointTest.java new file mode 100644 index 0000000..6e82a3d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3SavepointTest.java @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Savepoint; +import java.sql.Statement; + +class Jdbc3SavepointTest { + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createTable(conn, "savepointtable", "id int primary key"); + conn.setAutoCommit(false); + } + + @AfterEach + void tearDown() throws SQLException { + conn.setAutoCommit(true); + TestUtil.dropTable(conn, "savepointtable"); + TestUtil.closeDB(conn); + } + + private boolean hasSavepoints() throws SQLException { + return true; + } + + private void addRow(int id) throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO savepointtable VALUES (?)"); + pstmt.setInt(1, id); + pstmt.executeUpdate(); + pstmt.close(); + } + + private int countRows() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM savepointtable"); + rs.next(); + int count = rs.getInt(1); + rs.close(); + return count; + } + + @Test + void autoCommitFails() throws SQLException { + if (!hasSavepoints()) { + return; + } + + conn.setAutoCommit(true); + + try { + conn.setSavepoint(); + fail("Can't create a savepoint with autocommit."); + } catch (SQLException sqle) { + } + + try { + conn.setSavepoint("spname"); + fail("Can't create a savepoint with autocommit."); + } catch (SQLException sqle) { + } + } + + @Test + void cantMixSavepointTypes() throws SQLException { + if (!hasSavepoints()) { + return; + } + + Savepoint namedSavepoint = conn.setSavepoint("named"); + Savepoint unNamedSavepoint = conn.setSavepoint(); + + try { + namedSavepoint.getSavepointId(); + fail("Can't get id from named savepoint."); + } catch (SQLException sqle) { + } + + try { + unNamedSavepoint.getSavepointName(); + fail("Can't get name from unnamed savepoint."); + } catch (SQLException sqle) { + } + + } + + @Test + void rollingBackToSavepoints() throws SQLException { + if (!hasSavepoints()) { + return; + } + + Savepoint empty = conn.setSavepoint(); + addRow(1); + Savepoint onerow = conn.setSavepoint("onerow"); + addRow(2); + + assertEquals(2, countRows()); + conn.rollback(onerow); + assertEquals(1, countRows()); + conn.rollback(empty); + assertEquals(0, countRows()); + } + + @Test + void globalRollbackWorks() throws SQLException { + if (!hasSavepoints()) { + return; + } + + conn.setSavepoint(); + addRow(1); + conn.setSavepoint("onerow"); + addRow(2); + + assertEquals(2, countRows()); + conn.rollback(); + assertEquals(0, countRows()); + } + + @Test + void continueAfterError() throws SQLException { + if (!hasSavepoints()) { + return; + } + + addRow(1); + Savepoint savepoint = conn.setSavepoint(); + try { + addRow(1); + fail("Should have thrown duplicate key exception"); + } catch (SQLException sqle) { + conn.rollback(savepoint); + } + + assertEquals(1, countRows()); + addRow(2); + assertEquals(2, countRows()); + } + + @Test + void releaseSavepoint() throws SQLException { + if (!hasSavepoints()) { + return; + } + + Savepoint savepoint = conn.setSavepoint("mysavepoint"); + conn.releaseSavepoint(savepoint); + try { + savepoint.getSavepointName(); + fail("Can't use savepoint after release."); + } catch (SQLException sqle) { + } + + savepoint = conn.setSavepoint(); + conn.releaseSavepoint(savepoint); + try { + savepoint.getSavepointId(); + fail("Can't use savepoint after release."); + } catch (SQLException sqle) { + } + } + + @Test + void complicatedSavepointName() throws SQLException { + if (!hasSavepoints()) { + return; + } + + Savepoint savepoint = conn.setSavepoint("name with spaces + \"quotes\""); + conn.rollback(savepoint); + conn.releaseSavepoint(savepoint); + } + + @Test + void rollingBackToInvalidSavepointFails() throws SQLException { + if (!hasSavepoints()) { + return; + } + + Savepoint sp1 = conn.setSavepoint(); + Savepoint sp2 = conn.setSavepoint(); + + conn.rollback(sp1); + try { + conn.rollback(sp2); + fail("Can't rollback to a savepoint that's invalid."); + } catch (SQLException sqle) { + } + } + + @Test + void rollbackMultipleTimes() throws SQLException { + if (!hasSavepoints()) { + return; + } + + addRow(1); + Savepoint savepoint = conn.setSavepoint(); + + addRow(2); + conn.rollback(savepoint); + assertEquals(1, countRows()); + + conn.rollback(savepoint); + assertEquals(1, countRows()); + + addRow(2); + conn.rollback(savepoint); + assertEquals(1, countRows()); + + conn.releaseSavepoint(savepoint); + assertEquals(1, countRows()); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3TestSuite.java new file mode 100644 index 0000000..a059d5f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3TestSuite.java @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/* + * Executes all known tests for JDBC3 + */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + CompositeQueryParseTest.class, + CompositeTest.class, + DatabaseMetaDataTest.class, + EscapeSyntaxCallModeCallTest.class, + EscapeSyntaxCallModeCallIfNoReturnTest.class, + EscapeSyntaxCallModeSelectTest.class, + GeneratedKeysTest.class, + Jdbc3BlobTest.class, + Jdbc3CallableStatementTest.class, + Jdbc3SavepointTest.class, + ParameterMetaDataTest.class, + ProcedureTransactionTest.class, + ResultSetTest.class, + SendRecvBufferSizeTest.class, + SqlCommandParseTest.class, + StringTypeParameterTest.class, + TypesTest.class, +}) +public class Jdbc3TestSuite { + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ParameterMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ParameterMetaDataTest.java new file mode 100644 index 0000000..b3b86d3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ParameterMetaDataTest.java @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class ParameterMetaDataTest extends BaseTest4 { + public ParameterMetaDataTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + Assume.assumeTrue("simple protocol only does not support describe statement requests", + preferQueryMode != PreferQueryMode.SIMPLE); + TestUtil.createTable(con, "parametertest", + "a int4, b float8, c text, d point, e timestamp with time zone"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "parametertest"); + super.tearDown(); + } + + @Test + public void testParameterMD() throws SQLException { + PreparedStatement pstmt = + con.prepareStatement("SELECT a FROM parametertest WHERE b = ? AND c = ? AND d >^ ? "); + ParameterMetaData pmd = pstmt.getParameterMetaData(); + + assertEquals(3, pmd.getParameterCount()); + assertEquals(Types.DOUBLE, pmd.getParameterType(1)); + assertEquals("float8", pmd.getParameterTypeName(1)); + assertEquals("java.lang.Double", pmd.getParameterClassName(1)); + assertEquals(Types.VARCHAR, pmd.getParameterType(2)); + assertEquals("text", pmd.getParameterTypeName(2)); + assertEquals("java.lang.String", pmd.getParameterClassName(2)); + assertEquals(Types.OTHER, pmd.getParameterType(3)); + assertEquals("point", pmd.getParameterTypeName(3)); + assertEquals("org.postgresql.geometric.PGpoint", pmd.getParameterClassName(3)); + + pstmt.close(); + } + + @Test + public void testFailsOnBadIndex() throws SQLException { + PreparedStatement pstmt = + con.prepareStatement("SELECT a FROM parametertest WHERE b = ? AND c = ?"); + ParameterMetaData pmd = pstmt.getParameterMetaData(); + try { + pmd.getParameterType(0); + fail("Can't get parameter for index < 1."); + } catch (SQLException sqle) { + } + try { + pmd.getParameterType(3); + fail("Can't get parameter for index 3 with only two parameters."); + } catch (SQLException sqle) { + } + } + + // Make sure we work when mashing two queries into a single statement. + @Test + public void testMultiStatement() throws SQLException { + PreparedStatement pstmt = con.prepareStatement( + "SELECT a FROM parametertest WHERE b = ? AND c = ? ; SELECT b FROM parametertest WHERE a = ?"); + ParameterMetaData pmd = pstmt.getParameterMetaData(); + + assertEquals(3, pmd.getParameterCount()); + assertEquals(Types.DOUBLE, pmd.getParameterType(1)); + assertEquals("float8", pmd.getParameterTypeName(1)); + assertEquals(Types.VARCHAR, pmd.getParameterType(2)); + assertEquals("text", pmd.getParameterTypeName(2)); + assertEquals(Types.INTEGER, pmd.getParameterType(3)); + assertEquals("int4", pmd.getParameterTypeName(3)); + + pstmt.close(); + + } + + // Here we test that we can legally change the resolved type + // from text to varchar with the complicating factor that there + // is also an unknown parameter. + // + @Test + public void testTypeChangeWithUnknown() throws SQLException { + PreparedStatement pstmt = + con.prepareStatement("SELECT a FROM parametertest WHERE c = ? AND e = ?"); + ParameterMetaData pmd = pstmt.getParameterMetaData(); + + pstmt.setString(1, "Hi"); + pstmt.setTimestamp(2, new Timestamp(0L)); + + ResultSet rs = pstmt.executeQuery(); + rs.close(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ProcedureTransactionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ProcedureTransactionTest.java new file mode 100644 index 0000000..07c85c3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ProcedureTransactionTest.java @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.EscapeSyntaxCallMode; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLState; + +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class ProcedureTransactionTest extends BaseTest4 { + @BeforeClass + public static void beforeClass() throws Exception { + try (Connection con = TestUtil.openDB()) { + assumeCallableStatementsSupported(con); + } + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL_IF_NO_RETURN.value()); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + Statement stmt = con.createStatement(); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + stmt.execute("create temp table proc_test ( some_val bigint )"); + stmt.execute( + "CREATE OR REPLACE PROCEDURE mycommitproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); commit; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE PROCEDURE myrollbackproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); rollback; END;' LANGUAGE plpgsql"); + stmt.execute( + "CREATE OR REPLACE PROCEDURE mynotxnproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); END;' LANGUAGE plpgsql"); + } + } + + @Override + public void tearDown() throws SQLException { + Statement stmt = con.createStatement(); + if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) { + stmt.execute("drop procedure mycommitproc(a INOUT bigint) "); + stmt.execute("drop procedure myrollbackproc(a INOUT bigint) "); + stmt.execute("drop procedure mynotxnproc(a INOUT bigint) "); + stmt.execute("drop table proc_test "); + } + stmt.close(); + super.tearDown(); + } + + @Test + public void testProcWithNoTxnControl() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("call mynotxnproc(?)"); + int val = 1; + cs.setInt(1, val); + cs.execute(); + TestUtil.closeQuietly(cs); + + cs = con.prepareCall("select some_val from proc_test where some_val = ?"); + cs.setInt(1, val); + ResultSet rs = cs.executeQuery(); + + assertTrue(rs.next()); + assertTrue(rs.getInt(1) == val); + + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(cs); + } + + @Test + public void testProcWithCommitInside() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("call mycommitproc(?)"); + int val = 2; + cs.setInt(1, val); + cs.execute(); + TestUtil.closeQuietly(cs); + + cs = con.prepareCall("select some_val from proc_test where some_val = ?"); + cs.setInt(1, val); + ResultSet rs = cs.executeQuery(); + + assertTrue(rs.next()); + assertTrue(rs.getInt(1) == val); + + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(cs); + } + + @Test + public void testProcWithRollbackInside() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("call myrollbackproc(?)"); + int val = 3; + cs.setInt(1, val); + cs.execute(); + TestUtil.closeQuietly(cs); + + cs = con.prepareCall("select some_val from proc_test where some_val = ?"); + cs.setInt(1, val); + ResultSet rs = cs.executeQuery(); + + assertFalse(rs.next()); + + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(cs); + } + + @Test + public void testProcAutoCommitTrue() throws SQLException { + con.setAutoCommit(true); + testProcAutoCommit(); + } + + @Test + public void testProcAutoCommitFalse() throws SQLException { + // setting autocommit false enables application transaction control, meaning JDBC driver issues a BEGIN + // as of PostgreSQL 11, Stored Procedures with transaction control inside the procedure cannot be + // invoked inside a transaction, the procedure must start the top level transaction + // see: https://www.postgresql.org/docs/current/plpgsql-transactions.html + con.setAutoCommit(false); + try { + testProcAutoCommit(); + fail("Should throw an exception"); + } catch (SQLException ex) { + //2D000 invalid_transaction_termination + assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.INVALID_TRANSACTION_TERMINATION.getState())); + con.rollback(); + } + + } + + private void testProcAutoCommit() throws SQLException { + assumeMinimumServerVersion(ServerVersion.v11); + CallableStatement cs = con.prepareCall("call mycommitproc(?)"); + int val = 4; + cs.setInt(1, val); + cs.execute(); + TestUtil.closeQuietly(cs); + + cs = con.prepareCall("select some_val from proc_test where some_val = ?"); + cs.setInt(1, val); + ResultSet rs = cs.executeQuery(); + + assertTrue(rs.next()); + assertTrue(rs.getInt(1) == val); + + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(cs); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ResultSetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ResultSetTest.java new file mode 100644 index 0000000..d2286d3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ResultSetTest.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +class ResultSetTest { + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + Statement stmt = conn.createStatement(); + stmt.execute("CREATE TEMP TABLE hold(a int)"); + stmt.execute("INSERT INTO hold VALUES (1)"); + stmt.execute("INSERT INTO hold VALUES (2)"); + stmt.close(); + } + + @AfterEach + void tearDown() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("DROP TABLE hold"); + stmt.close(); + TestUtil.closeDB(conn); + } + + @Test + void holdableResultSet() throws SQLException { + Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, + ResultSet.HOLD_CURSORS_OVER_COMMIT); + + conn.setAutoCommit(false); + stmt.setFetchSize(1); + + ResultSet rs = stmt.executeQuery("SELECT a FROM hold ORDER BY a"); + + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + + conn.commit(); + + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + assertFalse(rs.next()); + + rs.close(); + stmt.close(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SendRecvBufferSizeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SendRecvBufferSizeTest.java new file mode 100644 index 0000000..96034c9 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SendRecvBufferSizeTest.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class SendRecvBufferSizeTest extends BaseTest4 { + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.SEND_BUFFER_SIZE.set(props, "1024"); + PGProperty.RECEIVE_BUFFER_SIZE.set(props, "1024"); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "hold", "a int"); + Statement stmt = con.createStatement(); + stmt.execute("INSERT INTO hold VALUES (1)"); + stmt.execute("INSERT INTO hold VALUES (2)"); + stmt.close(); + } + + @After + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "hold"); + super.tearDown(); + } + + // dummy test + @Test + public void testSelect() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("select * from hold"); + stmt.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SqlCommandParseTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SqlCommandParseTest.java new file mode 100644 index 0000000..cb2c64e --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SqlCommandParseTest.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.core.NativeQuery; +import org.postgresql.core.Parser; +import org.postgresql.core.SqlCommandType; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; + +public class SqlCommandParseTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {SqlCommandType.INSERT, "insert/**/ into table(select) values(1)"}, + {SqlCommandType.SELECT, "select'abc'/**/ as insert"}, + {SqlCommandType.INSERT, "INSERT/*fool /*nest comments -- parser*/*/ INTO genkeys (b,c) VALUES ('a', 2), ('b', 4) SELECT"}, + {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert into table(select) values(1)"}, + {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert into table(select) select * from update"}, + {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert/**/ into table(select) values(1)"}, + {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert /**/ into table(select) values(1)"}, + {SqlCommandType.SELECT, "with update as (update foo set (a=?,b=?,c=?)) insert --\nas () select 1"}, + {SqlCommandType.SELECT, "with update as (update foo set (a=?,b=?,c=?)) insert --\n/* dfhg \n*/\nas () select 1"}, + {SqlCommandType.SELECT, "WITH x as (INSERT INTO genkeys(a,b,c) VALUES (1, 'a', 2) returning returning a, b) select * from x"}, + // No idea if it works, but it should be parsed as WITH + {SqlCommandType.WITH, "with update as (update foo set (a=?,b=?,c=?)) copy from stdin"}, + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "expected={0}, sql={1}") + void run(SqlCommandType type, String sql) throws SQLException { + List queries; + queries = Parser.parseJdbcSql(sql, true, true, false, true, true); + NativeQuery query = queries.get(0); + assertEquals(type, query.command.getType(), sql); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/StringTypeParameterTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/StringTypeParameterTest.java new file mode 100644 index 0000000..569031a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/StringTypeParameterTest.java @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2005, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLState; + +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class StringTypeParameterTest extends BaseTest4 { + private static final String UNSPECIFIED_STRING_TYPE = "unspecified"; + + private final String stringType; + + public StringTypeParameterTest(String stringType) { + this.stringType = stringType; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + // Assume enum supported + Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)); + TestUtil.createEnumType(con, "mood", "'happy', 'sad'"); + TestUtil.createTable(con, "stringtypetest", "m mood"); + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + if (stringType != null) { + props.put("stringtype", stringType); + } + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "stringtypetest"); + TestUtil.dropType(con, "mood"); + super.tearDown(); + } + + @Parameterized.Parameters(name = "stringType = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (String stringType : new String[]{null, "varchar", UNSPECIFIED_STRING_TYPE}) { + ids.add(new Object[]{stringType}); + } + return ids; + } + + @Test + public void testVarcharAsEnum() throws Exception { + Assume.assumeFalse(UNSPECIFIED_STRING_TYPE.equals(stringType)); + Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE); + + PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)"); + for (int i = 0; i < 2; i++) { + update.clearParameters(); + if (i == 0) { + update.setString(1, "sad"); + } else { + update.setObject(1, "sad", Types.VARCHAR); + } + try { + update.executeUpdate(); + fail("Expected 'column \"m\" is of type mood but expression is of type character varying', " + + (i == 0 ? "setString(1, \"sad\")" : "setObject(1, \"sad\", Types.VARCHAR)")); + } catch (SQLException e) { + // Exception exception is + // ERROR: column "m" is of type mood but expression is of type character varying + if (!PSQLState.DATATYPE_MISMATCH.getState().equals(e.getSQLState())) { + throw e; + } + } + } + TestUtil.closeQuietly(update); + } + + @Test + public void testOtherAsEnum() throws Exception { + PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)"); + update.setObject(1, "happy", Types.OTHER); + update.executeUpdate(); + // all good + TestUtil.closeQuietly(update); + } + + @Test + public void testMultipleEnumBinds() throws Exception { + Assume.assumeFalse(UNSPECIFIED_STRING_TYPE.equals(stringType)); + Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE); + + PreparedStatement query = + con.prepareStatement("select * from stringtypetest where m = ? or m = ?"); + query.setString(1, "sad"); + query.setObject(2, "sad", Types.VARCHAR); + try { + query.executeQuery(); + fail("Expected 'operator does not exist: mood = character varying'"); + } catch (SQLException e) { + // Exception exception is + // ERROR: operator does not exist: mood = character varying + if (!PSQLState.UNDEFINED_FUNCTION.getState().equals(e.getSQLState())) { + throw e; + } + } + TestUtil.closeQuietly(query); + } + + @Test + public void testParameterUnspecified() throws Exception { + Assume.assumeTrue(UNSPECIFIED_STRING_TYPE.equals(stringType)); + + PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)"); + update.setString(1, "happy"); + update.executeUpdate(); + // all good + + update.clearParameters(); + update.setObject(1, "happy", Types.VARCHAR); + update.executeUpdate(); + // all good + update.close(); + + PreparedStatement query = con.prepareStatement("select * from stringtypetest where m = ?"); + query.setString(1, "happy"); + ResultSet rs = query.executeQuery(); + assertTrue(rs.next()); + assertEquals("happy", rs.getObject("m")); + rs.close(); + + query.clearParameters(); + query.setObject(1, "happy", Types.VARCHAR); + rs = query.executeQuery(); + assertTrue(rs.next()); + assertEquals("happy", rs.getObject("m")); + + // all good + rs.close(); + query.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TestReturning.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TestReturning.java new file mode 100644 index 0000000..d792d5c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TestReturning.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class TestReturning extends BaseTest4 { + + public enum ColumnsReturned { + Id("Id"), + id("id"), + ID("*"), + QUOTED("\"Id\""), + NO(); + + final String[] columns; + + ColumnsReturned(String... columns) { + this.columns = columns; + } + + public int columnsReturned() { + if (columns.length == 1 && columns[0].charAt(0) == '*') { + return 100500; // does not matter much, the meaning is "every possible column" + } + return columns.length; + } + + public String[] getColumnNames() { + if (columnsReturned() == 0) { + return new String[]{}; + } + + return columns; + } + } + + static String []returningOptions = {"true", "false"}; + + @Parameterized.Parameters(name = "returningInQuery = {0}, quoteReturning = {1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (ColumnsReturned columnsReturned : ColumnsReturned.values()) { + for (String q : returningOptions) { + ids.add(new Object[]{columnsReturned, q}); + } + } + return ids; + } + + private final ColumnsReturned columnsReturned; + private final String quoteReturning; + + public TestReturning(ColumnsReturned columnsReturned, String quoteReturning) throws Exception { + this.columnsReturned = columnsReturned; + this.quoteReturning = quoteReturning; + } + + protected void updateProperties(Properties props) { + PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(props, quoteReturning); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTempTable(con, "genkeys", "\"Id\" serial, b varchar(5), c int"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "genkeys"); + super.tearDown(); + } + + private void testGeneratedKeys(Connection conn, String sql, String[] columnNames, boolean exceptionExpected) throws SQLException { + + try (PreparedStatement stmt = conn.prepareStatement(sql, columnNames)) { + stmt.execute(); + ResultSet rs = stmt.getGeneratedKeys(); + assertNotNull(rs); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + } catch (SQLException e) { + if ( !exceptionExpected ) { + fail("error getting column names: " + e.getMessage()); + } + } + } + + @Test + public void testMixedCase() throws SQLException { + + String insertSql = "INSERT INTO genkeys (b,c) VALUES ('hello', 1)"; + + testGeneratedKeys(con, insertSql, new String[]{"Id"}, "false".equals(quoteReturning)); + testGeneratedKeys(con, insertSql, new String[]{"id"}, true); + testGeneratedKeys(con, insertSql, new String[]{"ID"}, true); + testGeneratedKeys(con, insertSql, new String[]{"\"Id\""}, "true".equals(quoteReturning)); + testGeneratedKeys(con, insertSql, new String[]{"bad"}, true); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TypesTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TypesTest.java new file mode 100644 index 0000000..f1bb91e --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TypesTest.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc3; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; + +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; + +public class TypesTest extends BaseTest4 { + + private Connection conn; + + @Override + public void setUp() throws Exception { + super.setUp(); + conn = con; + Statement stmt = conn.createStatement(); + stmt.execute( + "CREATE OR REPLACE FUNCTION return_bool(boolean) RETURNS boolean AS 'BEGIN RETURN $1; END;' LANGUAGE plpgsql"); + stmt.close(); + } + + @Override + public void tearDown() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("DROP FUNCTION return_bool(boolean)"); + stmt.close(); + super.tearDown(); + } + + @Test + public void testPreparedBoolean() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT ?,?,?,?"); + pstmt.setNull(1, Types.BOOLEAN); + pstmt.setObject(2, null, Types.BOOLEAN); + pstmt.setBoolean(3, true); + pstmt.setObject(4, Boolean.FALSE); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertTrue(!rs.getBoolean(1)); + assertTrue(rs.wasNull()); + assertNull(rs.getObject(2)); + assertTrue(rs.getBoolean(3)); + // Only the V3 protocol return will be strongly typed. + // The V2 path will return a String because it doesn't know + // any better. + if (preferQueryMode != PreferQueryMode.SIMPLE) { + assertTrue(!((Boolean) rs.getObject(4)).booleanValue()); + } + } + + @Test + public void testPreparedByte() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT ?,?"); + pstmt.setByte(1, (byte) 1); + pstmt.setObject(2, (byte) 2); + ResultSet rs = pstmt.executeQuery(); + assertTrue(rs.next()); + assertEquals((byte) 1, rs.getByte(1)); + assertFalse(rs.wasNull()); + assertEquals((byte) 2, rs.getByte(2)); + assertFalse(rs.wasNull()); + rs.close(); + pstmt.close(); + } + + @Test + public void testCallableBoolean() throws SQLException { + assumeCallableStatementsSupported(); + CallableStatement cs = conn.prepareCall("{? = call return_bool(?)}"); + cs.registerOutParameter(1, Types.BOOLEAN); + cs.setBoolean(2, true); + cs.execute(); + assertEquals(true, cs.getBoolean(1)); + cs.close(); + } + + @Test + public void testUnknownType() throws SQLException { + Statement stmt = conn.createStatement(); + + ResultSet rs = stmt.executeQuery("select 'foo1' as icon1, 'foo2' as icon2 "); + assertTrue(rs.next()); + assertEquals("failed", "foo1", rs.getString("icon1")); + assertEquals("failed", "foo2", rs.getString("icon2")); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ArrayTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ArrayTest.java new file mode 100644 index 0000000..db1db95 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ArrayTest.java @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.geometric.PGbox; +import org.postgresql.jdbc.PgConnection; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.test.util.RegexMatcher; +import org.postgresql.util.PGobject; +import org.postgresql.util.PGtokenizer; + +import org.hamcrest.MatcherAssert; +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Array; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.UUID; + +@RunWith(Parameterized.class) +public class ArrayTest extends BaseTest4 { + + private Connection conn; + + public ArrayTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + conn = con; + + TestUtil.createTable(conn, "arrtest", + "intarr int[], decarr decimal(2,1)[], strarr text[]" + + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? ", uuidarr uuid[]" : "") + + ", floatarr float8[]" + + ", intarr2 int4[][]"); + TestUtil.createTable(conn, "arrcompprnttest", "id serial, name character(10)"); + TestUtil.createTable(conn, "arrcompchldttest", + "id serial, name character(10), description character varying, parent integer"); + TestUtil.createTable(conn, "\"CorrectCasing\"", "id serial"); + TestUtil.createTable(conn, "\"Evil.Table\"", "id serial"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(conn, "arrtest"); + TestUtil.dropTable(conn, "arrcompprnttest"); + TestUtil.dropTable(conn, "arrcompchldttest"); + TestUtil.dropTable(conn, "\"CorrectCasing\""); + + super.tearDown(); + } + + @Test + public void testCreateArrayOfBool() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bool[]"); + pstmt.setArray(1, conn.unwrap(PgConnection.class).createArrayOf("boolean", new boolean[]{true, true, false})); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Boolean[] out = (Boolean[]) arr.getArray(); + + Assert.assertEquals(3, out.length); + Assert.assertEquals(Boolean.TRUE, out[0]); + Assert.assertEquals(Boolean.TRUE, out[1]); + Assert.assertEquals(Boolean.FALSE, out[2]); + } + + @Test + public void testCreateArrayOfInt() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::int[]"); + Integer[] in = new Integer[3]; + in[0] = 0; + in[1] = -1; + in[2] = 2; + pstmt.setArray(1, conn.createArrayOf("int4", in)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Integer[] out = (Integer[]) arr.getArray(); + + Assert.assertEquals(3, out.length); + Assert.assertEquals(0, out[0].intValue()); + Assert.assertEquals(-1, out[1].intValue()); + Assert.assertEquals(2, out[2].intValue()); + } + + @Test + public void testCreateArrayOfBytes() throws SQLException { + + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bytea[]"); + final byte[][] in = new byte[][]{{0x01, (byte) 0xFF, (byte) 0x12}, {}, {(byte) 0xAC, (byte) 0xE4}, null}; + final Array createdArray = conn.createArrayOf("bytea", in); + + byte[][] inCopy = (byte[][]) createdArray.getArray(); + + Assert.assertEquals(4, inCopy.length); + + Assert.assertArrayEquals(in[0], inCopy[0]); + Assert.assertArrayEquals(in[1], inCopy[1]); + Assert.assertArrayEquals(in[2], inCopy[2]); + Assert.assertArrayEquals(in[3], inCopy[3]); + Assert.assertNull(inCopy[3]); + + pstmt.setArray(1, createdArray); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + + byte[][] out = (byte[][]) arr.getArray(); + + Assert.assertEquals(4, out.length); + + Assert.assertArrayEquals(in[0], out[0]); + Assert.assertArrayEquals(in[1], out[1]); + Assert.assertArrayEquals(in[2], out[2]); + Assert.assertArrayEquals(in[3], out[3]); + Assert.assertNull(out[3]); + } + + @Test + public void testCreateArrayOfBytesFromString() throws SQLException { + + assumeMinimumServerVersion("support for bytea[] as string requires hex string support from 9.0", + ServerVersion.v9_0); + + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bytea[]"); + final byte[][] in = new byte[][]{{0x01, (byte) 0xFF, (byte) 0x12}, {}, {(byte) 0xAC, (byte) 0xE4}, null}; + + pstmt.setString(1, "{\"\\\\x01ff12\",\"\\\\x\",\"\\\\xace4\",NULL}"); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + + byte[][] out = (byte[][]) arr.getArray(); + + Assert.assertEquals(4, out.length); + + Assert.assertArrayEquals(in[0], out[0]); + Assert.assertArrayEquals(in[1], out[1]); + Assert.assertArrayEquals(in[2], out[2]); + Assert.assertArrayEquals(in[3], out[3]); + Assert.assertNull(out[3]); + } + + @Test + public void testCreateArrayOfSmallInt() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::smallint[]"); + Short[] in = new Short[3]; + in[0] = 0; + in[1] = -1; + in[2] = 2; + pstmt.setArray(1, conn.createArrayOf("int2", in)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Short[] out = (Short[]) arr.getArray(); + + Assert.assertEquals(3, out.length); + Assert.assertEquals(0, out[0].shortValue()); + Assert.assertEquals(-1, out[1].shortValue()); + Assert.assertEquals(2, out[2].shortValue()); + } + + @Test + public void testCreateArrayOfMultiString() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::text[]"); + String[][] in = new String[2][2]; + in[0][0] = "a"; + in[0][1] = ""; + in[1][0] = "\\"; + in[1][1] = "\"\\'z"; + pstmt.setArray(1, conn.createArrayOf("text", in)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + String[][] out = (String[][]) arr.getArray(); + + Assert.assertEquals(2, out.length); + Assert.assertEquals(2, out[0].length); + Assert.assertEquals("a", out[0][0]); + Assert.assertEquals("", out[0][1]); + Assert.assertEquals("\\", out[1][0]); + Assert.assertEquals("\"\\'z", out[1][1]); + } + + @Test + public void testCreateArrayOfMultiJson() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_2)) { + return; + } + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::json[]"); + PGobject p1 = new PGobject(); + p1.setType("json"); + p1.setValue("{\"x\": 10}"); + + PGobject p2 = new PGobject(); + p2.setType("json"); + p2.setValue("{\"x\": 20}"); + PGobject[] in = new PGobject[]{p1, p2}; + pstmt.setArray(1, conn.createArrayOf("json", in)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + ResultSet arrRs = arr.getResultSet(); + Assert.assertTrue(arrRs.next()); + Assert.assertEquals(in[0], arrRs.getObject(2)); + + Assert.assertTrue(arrRs.next()); + Assert.assertEquals(in[1], arrRs.getObject(2)); + } + + @Test + public void testCreateArrayWithNonStandardDelimiter() throws SQLException { + PGbox[] in = new PGbox[2]; + in[0] = new PGbox(1, 2, 3, 4); + in[1] = new PGbox(5, 6, 7, 8); + + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::box[]"); + pstmt.setArray(1, conn.createArrayOf("box", in)); + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + ResultSet arrRs = arr.getResultSet(); + Assert.assertTrue(arrRs.next()); + Assert.assertEquals(in[0], arrRs.getObject(2)); + Assert.assertTrue(arrRs.next()); + Assert.assertEquals(in[1], arrRs.getObject(2)); + Assert.assertFalse(arrRs.next()); + } + + @Test + public void testCreateArrayOfNull() throws SQLException { + String sql = "SELECT ?"; + // We must provide the type information for V2 protocol + if (preferQueryMode == PreferQueryMode.SIMPLE) { + sql = "SELECT ?::int8[]"; + } + + PreparedStatement pstmt = conn.prepareStatement(sql); + String[] in = new String[2]; + in[0] = null; + in[1] = null; + pstmt.setArray(1, conn.createArrayOf("int8", in)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Long[] out = (Long[]) arr.getArray(); + + Assert.assertEquals(2, out.length); + Assert.assertNull(out[0]); + Assert.assertNull(out[1]); + } + + @Test + public void testCreateEmptyArrayOfIntViaAlias() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::int[]"); + Integer[] in = new Integer[0]; + pstmt.setArray(1, conn.createArrayOf("integer", in)); + + ResultSet rs = pstmt.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + Integer[] out = (Integer[]) arr.getArray(); + + Assert.assertEquals(0, out.length); + + ResultSet arrRs = arr.getResultSet(); + Assert.assertFalse(arrRs.next()); + } + + @Test + public void testCreateArrayWithoutServer() throws SQLException { + String[][] in = new String[2][2]; + in[0][0] = "a"; + in[0][1] = ""; + in[1][0] = "\\"; + in[1][1] = "\"\\'z"; + + Array arr = conn.createArrayOf("varchar", in); + String[][] out = (String[][]) arr.getArray(); + + Assert.assertEquals(2, out.length); + Assert.assertEquals(2, out[0].length); + Assert.assertEquals("a", out[0][0]); + Assert.assertEquals("", out[0][1]); + Assert.assertEquals("\\", out[1][0]); + Assert.assertEquals("\"\\'z", out[1][1]); + } + + @Test + public void testCreatePrimitiveArray() throws SQLException { + double[][] in = new double[2][2]; + in[0][0] = 3.5; + in[0][1] = -4.5; + in[1][0] = 10.0 / 3; + in[1][1] = 77; + + Array arr = conn.createArrayOf("float8", in); + Double[][] out = (Double[][]) arr.getArray(); + + Assert.assertEquals(2, out.length); + Assert.assertEquals(2, out[0].length); + Assert.assertEquals(3.5, out[0][0], 0.00001); + Assert.assertEquals(-4.5, out[0][1], 0.00001); + Assert.assertEquals(10.0 / 3, out[1][0], 0.00001); + Assert.assertEquals(77, out[1][1], 0.00001); + } + + @Test + public void testUUIDArray() throws SQLException { + Assume.assumeTrue("UUID is not supported in PreferQueryMode.SIMPLE", + preferQueryMode != PreferQueryMode.SIMPLE); + Assume.assumeTrue("UUID requires PostgreSQL 8.3+", + TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)); + UUID uuid1 = UUID.randomUUID(); + UUID uuid2 = UUID.randomUUID(); + UUID uuid3 = UUID.randomUUID(); + + // insert a uuid array, and check + PreparedStatement pstmt1 = conn.prepareStatement("INSERT INTO arrtest(uuidarr) VALUES (?)"); + pstmt1.setArray(1, conn.createArrayOf("uuid", new UUID[]{uuid1, uuid2, uuid3})); + pstmt1.executeUpdate(); + + PreparedStatement pstmt2 = + conn.prepareStatement("SELECT uuidarr FROM arrtest WHERE uuidarr @> ?"); + pstmt2.setObject(1, conn.createArrayOf("uuid", new UUID[]{uuid1}), Types.OTHER); + ResultSet rs = pstmt2.executeQuery(); + Assert.assertTrue(rs.next()); + Array arr = rs.getArray(1); + UUID[] out = (UUID[]) arr.getArray(); + + Assert.assertEquals(3, out.length); + Assert.assertEquals(uuid1, out[0]); + Assert.assertEquals(uuid2, out[1]); + Assert.assertEquals(uuid3, out[2]); + + // concatenate a uuid, and check + UUID uuid4 = UUID.randomUUID(); + PreparedStatement pstmt3 = + conn.prepareStatement("UPDATE arrtest SET uuidarr = uuidarr || ? WHERE uuidarr @> ?"); + pstmt3.setObject(1, uuid4, Types.OTHER); + pstmt3.setArray(2, conn.createArrayOf("uuid", new UUID[]{uuid1})); + pstmt3.executeUpdate(); + + // -- + pstmt2.setObject(1, conn.createArrayOf("uuid", new UUID[]{uuid4}), Types.OTHER); + rs = pstmt2.executeQuery(); + Assert.assertTrue(rs.next()); + arr = rs.getArray(1); + out = (UUID[]) arr.getArray(); + + Assert.assertEquals(4, out.length); + Assert.assertEquals(uuid1, out[0]); + Assert.assertEquals(uuid2, out[1]); + Assert.assertEquals(uuid3, out[2]); + Assert.assertEquals(uuid4, out[3]); + } + + @Test + public void testSetObjectFromJavaArray() throws SQLException { + String[] strArray = new String[]{"a", "b", "c"}; + Object[] objCopy = Arrays.copyOf(strArray, strArray.length, Object[].class); + + PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest(strarr) VALUES (?)"); + + // cannot handle generic Object[] + try { + pstmt.setObject(1, objCopy, Types.ARRAY); + pstmt.executeUpdate(); + Assert.fail("setObject() with a Java array parameter and Types.ARRAY shouldn't succeed"); + } catch (org.postgresql.util.PSQLException ex) { + // Expected failure. + } + + try { + pstmt.setObject(1, objCopy); + pstmt.executeUpdate(); + Assert.fail("setObject() with a Java array parameter and no Types argument shouldn't succeed"); + } catch (org.postgresql.util.PSQLException ex) { + // Expected failure. + } + + pstmt.setObject(1, strArray); + pstmt.executeUpdate(); + + pstmt.setObject(1, strArray, Types.ARRAY); + pstmt.executeUpdate(); + + // Correct way, though the use of "text" as a type is non-portable. + // Only supported for JDK 1.6 and JDBC4 + Array sqlArray = conn.createArrayOf("text", strArray); + pstmt.setArray(1, sqlArray); + pstmt.executeUpdate(); + + pstmt.close(); + } + + @Test + public void testGetArrayOfComposites() throws SQLException { + Assume.assumeTrue("array_agg(expression) requires PostgreSQL 8.4+", + TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_4)); + + PreparedStatement insertParentPstmt = + conn.prepareStatement("INSERT INTO arrcompprnttest (name) " + + "VALUES ('aParent');"); + insertParentPstmt.execute(); + + String[] children = { + "November 5, 2013", + "\"A Book Title\"", + "4\" by 6\"", + "5\",3\""}; + + PreparedStatement insertChildrenPstmt = + conn.prepareStatement("INSERT INTO arrcompchldttest (name,description,parent) " + + "VALUES ('child1',?,1)," + + "('child2',?,1)," + + "('child3',?,1)," + + "('child4',?,1);"); + + insertChildrenPstmt.setString(1, children[0]); + insertChildrenPstmt.setString(2, children[1]); + insertChildrenPstmt.setString(3, children[2]); + insertChildrenPstmt.setString(4, children[3]); + + insertChildrenPstmt.execute(); + + PreparedStatement pstmt = conn.prepareStatement( + "SELECT arrcompprnttest.name, " + + "array_agg(" + + "DISTINCT(arrcompchldttest.id, " + + "arrcompchldttest.name, " + + "arrcompchldttest.description)) " + + "AS children " + + "FROM arrcompprnttest " + + "LEFT JOIN arrcompchldttest " + + "ON (arrcompchldttest.parent = arrcompprnttest.id) " + + "WHERE arrcompprnttest.id=? " + + "GROUP BY arrcompprnttest.name;"); + pstmt.setInt(1, 1); + ResultSet rs = pstmt.executeQuery(); + + assertNotNull(rs); + Assert.assertTrue(rs.next()); + + Array childrenArray = rs.getArray("children"); + assertNotNull(childrenArray); + + ResultSet rsChildren = childrenArray.getResultSet(); + assertNotNull(rsChildren); + while (rsChildren.next()) { + String comp = rsChildren.getString(2); + PGtokenizer token = new PGtokenizer(PGtokenizer.removePara(comp), ','); + token.remove("\"", "\""); // remove surrounding double quotes + if (2 < token.getSize()) { + int childID = Integer.parseInt(token.getToken(0)); + // remove double quotes escaping with double quotes + String value = token.getToken(2).replace("\"\"", "\""); + Assert.assertEquals(children[childID - 1], value); + } else { + Assert.fail("Needs to have 3 tokens"); + } + } + } + + @Test + public void testCasingComposite() throws SQLException { + Assume.assumeTrue("Arrays of composite types requires PostgreSQL 8.3+", + TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)); + + PGobject cc = new PGobject(); + cc.setType("\"CorrectCasing\""); + cc.setValue("(1)"); + Object[] in = new Object[1]; + in[0] = cc; + + Array arr = conn.createArrayOf("\"CorrectCasing\"", in); + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::\"CorrectCasing\"[]"); + pstmt.setArray(1, arr); + ResultSet rs = pstmt.executeQuery(); + + Assert.assertTrue(rs.next()); + Object[] resArr = (Object[]) rs.getArray(1).getArray(); + + Assert.assertTrue(resArr[0] instanceof PGobject); + PGobject resObj = (PGobject) resArr[0]; + Assert.assertEquals("(1)", resObj.getValue()); + } + + @Test + public void testCasingBuiltinAlias() throws SQLException { + Array arr = conn.createArrayOf("INT", new Integer[]{1, 2, 3}); + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::INT[]"); + pstmt.setArray(1, arr); + ResultSet rs = pstmt.executeQuery(); + + Assert.assertTrue(rs.next()); + Integer[] resArr = (Integer[]) rs.getArray(1).getArray(); + + Assert.assertArrayEquals(new Integer[]{1, 2, 3}, resArr); + } + + @Test + public void testCasingBuiltinNonAlias() throws SQLException { + Array arr = conn.createArrayOf("INT4", new Integer[]{1, 2, 3}); + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::INT4[]"); + pstmt.setArray(1, arr); + ResultSet rs = pstmt.executeQuery(); + + Assert.assertTrue(rs.next()); + Integer[] resArr = (Integer[]) rs.getArray(1).getArray(); + + Assert.assertArrayEquals(new Integer[]{1, 2, 3}, resArr); + } + + @Test + public void testEvilCasing() throws SQLException { + Assume.assumeTrue("Arrays of composite types requires PostgreSQL 8.3+", + TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)); + + PGobject cc = new PGobject(); + cc.setType("\"Evil.Table\""); + cc.setValue("(1)"); + Object[] in = new Object[1]; + in[0] = cc; + + Array arr = conn.createArrayOf("\"Evil.Table\"", in); + PreparedStatement pstmt = conn.prepareStatement("SELECT ?::\"Evil.Table\"[]"); + pstmt.setArray(1, arr); + ResultSet rs = pstmt.executeQuery(); + + Assert.assertTrue(rs.next()); + Object[] resArr = (Object[]) rs.getArray(1).getArray(); + + Assert.assertTrue(resArr[0] instanceof PGobject); + PGobject resObj = (PGobject) resArr[0]; + Assert.assertEquals("(1)", resObj.getValue()); + } + + @Test + public void testToString() throws SQLException { + Double[] d = new Double[4]; + + d[0] = 3.5; + d[1] = -4.5; + d[2] = null; + d[3] = 77.0; + + Array arr = con.createArrayOf("float8", d); + PreparedStatement pstmt = con.prepareStatement("INSERT INTO arrtest(floatarr) VALUES (?)"); + ResultSet rs = null; + + try { + pstmt.setArray(1, arr); + pstmt.execute(); + } finally { + TestUtil.closeQuietly(pstmt); + } + + Statement stmt = null; + try { + stmt = con.createStatement(); + + rs = stmt.executeQuery("select floatarr from arrtest"); + + while (rs.next()) { + Array doubles = rs.getArray(1); + String actual = doubles.toString(); + if (actual != null) { + // if a binary array is provided, the string representation looks like [0:1][0:1]={{1,2},{3,4}} + int idx = actual.indexOf('='); + if (idx > 0) { + actual = actual.substring(idx + 1); + } + // Remove all double quotes. They do not make a difference here. + actual = actual.replaceAll("\"", ""); + } + //the string format may vary based on how data stored + MatcherAssert.assertThat(actual, RegexMatcher.matchesPattern("\\{3\\.5,-4\\.5,NULL,77(.0)?\\}")); + } + + } finally { + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(stmt); + } + } + + @Test + public void nullArray() throws SQLException { + PreparedStatement ps = con.prepareStatement("INSERT INTO arrtest(floatarr) VALUES (?)"); + + ps.setNull(1, Types.ARRAY, "float8[]"); + ps.execute(); + + ps.close(); + ps = con.prepareStatement("select floatarr from arrtest"); + ResultSet rs = ps.executeQuery(); + Assert.assertTrue("arrtest should contain a row", rs.next()); + Array getArray = rs.getArray(1); + Assert.assertNull("null array should return null value on getArray", getArray); + Object getObject = rs.getObject(1); + Assert.assertNull("null array should return null on getObject", getObject); + } + + @Test + public void createNullArray() throws SQLException { + Array arr = con.createArrayOf("float8", null); + assertNotNull(arr); + Assert.assertNull(arr.getArray()); + } + + @Test + public void multiDimIntArray() throws SQLException { + Array arr = con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}}); + PreparedStatement ps = con.prepareStatement("select ?::int4[][]"); + ps.setArray(1, arr); + ResultSet rs = ps.executeQuery(); + rs.next(); + Array resArray = rs.getArray(1); + String stringValue = resArray.toString(); + // if a binary array is provided, the string representation looks like [0:1][0:1]={{1,2},{3,4}} + int idx = stringValue.indexOf('='); + if (idx > 0) { + stringValue = stringValue.substring(idx + 1); + } + // Both {{"1","2"},{"3","4"}} and {{1,2},{3,4}} are the same array representation + stringValue = stringValue.replaceAll("\"", ""); + Assert.assertEquals("{{1,2},{3,4}}", stringValue); + TestUtil.closeQuietly(rs); + TestUtil.closeQuietly(ps); + } + + @Test + public void insertAndQueryMultiDimArray() throws SQLException { + Array arr = con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}}); + PreparedStatement insertPs = con.prepareStatement("INSERT INTO arrtest(intarr2) VALUES (?)"); + insertPs.setArray(1, arr); + insertPs.execute(); + insertPs.close(); + + PreparedStatement selectPs = con.prepareStatement("SELECT intarr2 FROM arrtest"); + ResultSet rs = selectPs.executeQuery(); + rs.next(); + + Array array = rs.getArray(1); + Integer[][] secondRowValues = (Integer[][]) array.getArray(2, 1); + + Assert.assertEquals(3, secondRowValues[0][0].intValue()); + Assert.assertEquals(4, secondRowValues[0][1].intValue()); + } + + @Test + public void testJsonbArray() throws SQLException { + Assume.assumeTrue("jsonb requires PostgreSQL 9.4+", TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)); + TestUtil.createTempTable(con, "jsonbarray", "jbarray jsonb[]"); + try (Statement stmt = con.createStatement()) { + stmt.executeUpdate("insert into jsonbarray values( ARRAY['{\"a\":\"a\"}'::jsonb, '{\"b\":\"b\"}'::jsonb] )"); + try (ResultSet rs = stmt.executeQuery("select jbarray from jsonbarray")) { + assertTrue(rs.next()); + Array jsonArray = rs.getArray(1); + assertNotNull(jsonArray); + assertEquals("jsonb", jsonArray.getBaseTypeName()); + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryStreamTest.java new file mode 100644 index 0000000..9ea02d2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryStreamTest.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.nio.ByteBuffer; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Random; + +public class BinaryStreamTest extends BaseTest4 { + + private ByteBuffer testData; + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeByteaSupported(); + TestUtil.createTable(con, "images", "img bytea"); + + Random random = new Random(31459); + testData = ByteBuffer.allocate(200 * 1024); + while (testData.remaining() > 0) { + testData.putLong(random.nextLong()); + } + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "images"); + super.tearDown(); + } + + private void insertStreamKownLength(byte[] data) throws Exception { + PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?")); + try { + updatePS.setBinaryStream(1, new ByteArrayInputStream(data), data.length); + updatePS.executeUpdate(); + } finally { + updatePS.close(); + } + } + + private void insertStreamUnknownLength(byte[] data) throws Exception { + PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?")); + try { + updatePS.setBinaryStream(1, new ByteArrayInputStream(data)); + updatePS.executeUpdate(); + } finally { + updatePS.close(); + } + } + + private void validateContent(byte[] data) throws Exception { + PreparedStatement selectPS = con.prepareStatement(TestUtil.selectSQL("images", "img")); + try { + ResultSet rs = selectPS.executeQuery(); + try { + rs.next(); + byte[] actualData = rs.getBytes(1); + Assert.assertArrayEquals("Sent and received data are not the same", data, actualData); + } finally { + rs.close(); + } + } finally { + selectPS.close(); + } + + PreparedStatement deletePS = con.prepareStatement("DELETE FROM images"); + try { + deletePS.executeUpdate(); + } finally { + deletePS.close(); + } + } + + private byte[] getTestData(int size) { + testData.rewind(); + byte[] data = new byte[size]; + testData.get(data); + return data; + } + + @Test + public void testKnownLengthEmpty() throws Exception { + byte[] data = new byte[0]; + insertStreamKownLength(data); + validateContent(data); + } + + @Test + public void testKnownLength2Kb() throws Exception { + byte[] data = getTestData(2 * 1024); + insertStreamKownLength(data); + validateContent(data); + } + + @Test + public void testKnownLength10Kb() throws Exception { + byte[] data = getTestData(10 * 1024); + insertStreamKownLength(data); + validateContent(data); + } + + @Test + public void testKnownLength100Kb() throws Exception { + byte[] data = getTestData(100 * 1024); + insertStreamKownLength(data); + validateContent(data); + } + + @Test + public void testKnownLength200Kb() throws Exception { + byte[] data = getTestData(200 * 1024); + insertStreamKownLength(data); + validateContent(data); + } + + @Test + public void testUnknownLengthEmpty() throws Exception { + byte[] data = getTestData(2 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength2Kb() throws Exception { + byte[] data = getTestData(2 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength10Kb() throws Exception { + byte[] data = getTestData(10 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength100Kb() throws Exception { + byte[] data = getTestData(100 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength200Kb() throws Exception { + byte[] data = getTestData(200 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryTest.java new file mode 100644 index 0000000..1089e3a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryTest.java @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.Assert.assertEquals; + +import org.postgresql.PGConnection; +import org.postgresql.PGResultSetMetaData; +import org.postgresql.PGStatement; +import org.postgresql.core.Field; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assume; +import org.junit.Test; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +/** + * We don't want to use the binary protocol for one-off queries as it involves another round-trip to + * the server to 'describe' the query. If we use the query enough times (see + * {@link PGConnection#setPrepareThreshold(int)} then we'll change to using the binary protocol to + * save bandwidth and reduce decoding time. + */ +public class BinaryTest extends BaseTest4 { + private ResultSet results; + private PreparedStatement statement; + + @Override + public void setUp() throws Exception { + super.setUp(); + Assume.assumeTrue("Server-prepared statements are not supported in 'simple protocol only'", + preferQueryMode != PreferQueryMode.SIMPLE); + statement = con.prepareStatement("select 1"); + + ((PGStatement) statement).setPrepareThreshold(5); + } + + @Test + public void testPreparedStatement_3() throws Exception { + ((PGStatement) statement).setPrepareThreshold(3); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + ((PGStatement) statement).setPrepareThreshold(5); + } + + @Test + public void testPreparedStatement_1() throws Exception { + ((PGStatement) statement).setPrepareThreshold(1); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + ((PGStatement) statement).setPrepareThreshold(5); + } + + @Test + public void testPreparedStatement_0() throws Exception { + ((PGStatement) statement).setPrepareThreshold(0); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.TEXT_FORMAT, getFormat(results)); + + ((PGStatement) statement).setPrepareThreshold(5); + } + + @Test + public void testPreparedStatement_negative1() throws Exception { + ((PGStatement) statement).setPrepareThreshold(-1); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + results = statement.executeQuery(); + assertEquals(Field.BINARY_FORMAT, getFormat(results)); + + ((PGStatement) statement).setPrepareThreshold(5); + } + + @Test + public void testReceiveBinary() throws Exception { + PreparedStatement ps = con.prepareStatement("select ?"); + for (int i = 0; i < 10; i++) { + ps.setInt(1, 42 + i); + ResultSet rs = ps.executeQuery(); + assertEquals("One row should be returned", true, rs.next()); + assertEquals(42 + i, rs.getInt(1)); + rs.close(); + } + ps.close(); + } + + private int getFormat(ResultSet results) throws SQLException { + return ((PGResultSetMetaData) results.getMetaData()).getFormat(1); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BlobTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BlobTest.java new file mode 100644 index 0000000..70dd3cb --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BlobTest.java @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.sql.Blob; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +/** + * This test-case is only for JDBC4 blob methods. Take a look at + * {@link org.postgresql.test.jdbc2.BlobTest} for base tests concerning blobs + */ +class BlobTest { + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createTable(conn, "testblob", "id name,lo oid"); + conn.setAutoCommit(false); + } + + @AfterEach + void tearDown() throws Exception { + conn.setAutoCommit(true); + try { + Statement stmt = conn.createStatement(); + try { + stmt.execute("SELECT lo_unlink(lo) FROM testblob"); + } finally { + try { + stmt.close(); + } catch (Exception e) { + } + } + } finally { + TestUtil.dropTable(conn, "testblob"); + TestUtil.closeDB(conn); + } + } + + @Test + void setBlobWithStream() throws Exception { + byte[] data = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque bibendum dapibus varius." + .getBytes("UTF-8"); + try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) { + insertPS.setBlob(1, new ByteArrayInputStream(data)); + insertPS.executeUpdate(); + } + + try (Statement selectStmt = conn.createStatement() ) { + try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) { + assertTrue(rs.next()); + + Blob actualBlob = rs.getBlob(1); + byte[] actualBytes = actualBlob.getBytes(1, (int) actualBlob.length()); + + assertArrayEquals(data, actualBytes); + } + } + } + + @Test + void setBlobWithStreamAndLength() throws Exception { + byte[] fullData = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse placerat tristique tellus, id tempus lectus." + .getBytes("UTF-8"); + byte[] data = + "Lorem ipsum dolor sit amet, consectetur adipiscing elit.".getBytes("UTF-8"); + + try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) { + insertPS.setBlob(1, new ByteArrayInputStream(fullData), data.length); + insertPS.executeUpdate(); + } + + try ( Statement selectStmt = conn.createStatement() ) { + try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) { + assertTrue(rs.next()); + + Blob actualBlob = rs.getBlob(1); + byte[] actualBytes = actualBlob.getBytes(1, (int) actualBlob.length()); + + assertArrayEquals(data, actualBytes); + } + } + } + + @Test + void getBinaryStreamWithBoundaries() throws Exception { + byte[] data = + "Cras vestibulum tellus eu sapien imperdiet ornare.".getBytes("UTF-8"); + try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) { + insertPS.setBlob(1, new ByteArrayInputStream(data), data.length); + insertPS.executeUpdate(); + } + try ( Statement selectStmt = conn.createStatement() ) { + try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) { + assertTrue(rs.next()); + + byte[] actualData = new byte[10]; + Blob actualBlob = rs.getBlob(1); + InputStream stream = actualBlob.getBinaryStream(6, 10); + try { + stream.read(actualData); + assertEquals(-1, stream.read(new byte[1]), "Stream should be at end"); + } finally { + stream.close(); + } + assertEquals("vestibulum", new String(actualData, "UTF-8")); + } + } + } + + @Test + void getBinaryStreamWithBoundaries2() throws Exception { + byte[] data = + "Cras vestibulum tellus eu sapien imperdiet ornare.".getBytes("UTF-8"); + + try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) { + insertPS.setBlob(1, new ByteArrayInputStream(data), data.length); + insertPS.executeUpdate(); + } + + try ( Statement selectStmt = conn.createStatement() ) { + try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) { + assertTrue(rs.next()); + + byte[] actualData = new byte[9]; + Blob actualBlob = rs.getBlob(1); + try ( InputStream stream = actualBlob.getBinaryStream(6, 10) ) { + // read 9 bytes 1 at a time + for (int i = 0; i < 9; i++) { + actualData[i] = (byte) stream.read(); + } + /* try to read past the end and make sure we get 1 byte */ + assertEquals(1, stream.read(new byte[2]), "There should be 1 byte left"); + /* now read one more and we should get an EOF */ + assertEquals(-1, stream.read(new byte[1]), "Stream should be at end"); + } + assertEquals("vestibulu", new String(actualData, "UTF-8")); + } + } + } + + @Test + void free() throws SQLException { + try ( Statement stmt = conn.createStatement() ) { + stmt.execute("INSERT INTO testblob(lo) VALUES(lo_creat(-1))"); + try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob")) { + assertTrue(rs.next()); + + Blob blob = rs.getBlob(1); + blob.free(); + try { + blob.length(); + fail("Should have thrown an Exception because it was freed."); + } catch (SQLException sqle) { + // expected + } + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/CharacterStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/CharacterStreamTest.java new file mode 100644 index 0000000..64da80f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/CharacterStreamTest.java @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.Reader; +import java.io.StringReader; +import java.sql.PreparedStatement; +import java.sql.SQLFeatureNotSupportedException; + +public class CharacterStreamTest extends BaseTest4 { + + private static final String TEST_TABLE_NAME = "charstream"; + private static final String TEST_COLUMN_NAME = "cs"; + + private static final String _insert; + private static final String _select; + + static { + _insert = String.format("INSERT INTO %s (%s) VALUES (?)", TEST_TABLE_NAME, TEST_COLUMN_NAME); + _select = String.format("SELECT %s FROM %s", TEST_COLUMN_NAME, TEST_TABLE_NAME); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTempTable(con, TEST_TABLE_NAME, "cs text"); + } + + private void insertStreamKnownIntLength(String data) throws Exception { + PreparedStatement insertPS = con.prepareStatement(_insert); + try { + Reader reader = data != null ? new StringReader(data) : null; + int length = data != null ? data.length() : 0; + insertPS.setCharacterStream(1, reader, length); + insertPS.executeUpdate(); + } finally { + TestUtil.closeQuietly(insertPS); + } + } + + private void insertStreamKnownLongLength(String data) throws Exception { + PreparedStatement insertPS = con.prepareStatement(_insert); + try { + Reader reader = data != null ? new StringReader(data) : null; + long length = data != null ? data.length() : 0; + insertPS.setCharacterStream(1, reader, length); + insertPS.executeUpdate(); + } finally { + TestUtil.closeQuietly(insertPS); + } + } + + private void insertStreamUnknownLength(String data) throws Exception { + PreparedStatement insertPS = con.prepareStatement(_insert); + try { + Reader reader = data != null ? new StringReader(data) : null; + insertPS.setCharacterStream(1, reader); + insertPS.executeUpdate(); + } finally { + TestUtil.closeQuietly(insertPS); + } + } + + private void validateContent(String data) throws Exception { + String actualData = TestUtil.queryForString(con, _select); + Assert.assertEquals("Sent and received data are not the same", data, actualData); + } + + private String getTestData(int size) { + StringBuilder buf = new StringBuilder(size); + String s = "This is a test string.\n"; + int slen = s.length(); + int len = 0; + + while ((len + slen) < size) { + buf.append(s); + len += slen; + } + + while (len < size) { + buf.append('.'); + len++; + } + + return buf.toString(); + } + + @Test + public void testKnownIntLengthNull() throws Exception { + String data = null; + insertStreamKnownIntLength(data); + validateContent(data); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testKnownLongLengthNull() throws Exception { + String data = null; + insertStreamKnownLongLength(data); + validateContent(data); + } + + @Test + public void testUnknownLengthNull() throws Exception { + String data = null; + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testKnownIntLengthEmpty() throws Exception { + String data = ""; + insertStreamKnownIntLength(data); + validateContent(data); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testKnownLongLengthEmpty() throws Exception { + String data = ""; + insertStreamKnownLongLength(data); + validateContent(data); + } + + @Test + public void testUnknownLengthEmpty() throws Exception { + String data = ""; + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testKnownIntLength2Kb() throws Exception { + String data = getTestData(2 * 1024); + insertStreamKnownIntLength(data); + validateContent(data); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testKnownLongLength2Kb() throws Exception { + String data = getTestData(2 * 1024); + insertStreamKnownLongLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength2Kb() throws Exception { + String data = getTestData(2 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testKnownIntLength10Kb() throws Exception { + String data = getTestData(10 * 1024); + insertStreamKnownIntLength(data); + validateContent(data); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testKnownLongLength10Kb() throws Exception { + String data = getTestData(10 * 1024); + insertStreamKnownLongLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength10Kb() throws Exception { + String data = getTestData(10 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testKnownIntLength100Kb() throws Exception { + String data = getTestData(100 * 1024); + insertStreamKnownIntLength(data); + validateContent(data); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testKnownLongLength100Kb() throws Exception { + String data = getTestData(100 * 1024); + insertStreamKnownLongLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength100Kb() throws Exception { + String data = getTestData(100 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } + + @Test + public void testKnownIntLength200Kb() throws Exception { + String data = getTestData(200 * 1024); + insertStreamKnownIntLength(data); + validateContent(data); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void testKnownLongLength200Kb() throws Exception { + String data = getTestData(200 * 1024); + insertStreamKnownLongLength(data); + validateContent(data); + } + + @Test + public void testUnknownLength200Kb() throws Exception { + String data = getTestData(200 * 1024); + insertStreamUnknownLength(data); + validateContent(data); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ClientInfoTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ClientInfoTest.java new file mode 100644 index 0000000..84fb9b6 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ClientInfoTest.java @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2010, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; + +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class ClientInfoTest extends BaseTest4 { + + private String getAppName() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SHOW application_name"); + rs.next(); + String appName = rs.getString(1); + rs.close(); + stmt.close(); + return appName; + } + + @Test + public void testSetAppName() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + return; + } + + con.setClientInfo("ApplicationName", "my app"); + assertEquals("my app", getAppName()); + assertEquals("my app", con.getClientInfo("ApplicationName")); + assertEquals("my app", con.getClientInfo().getProperty("ApplicationName")); + } + + @Test + public void testExplicitSetAppNameNotificationIsParsed() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + return; + } + + String appName = "test-42"; + + Statement s = con.createStatement(); + s.execute("set application_name='" + appName + "'"); + s.close(); + assertEquals("application_name was set to " + appName + ", and it should be visible via " + + "con.getClientInfo", appName, con.getClientInfo("ApplicationName")); + assertEquals("application_name was set to " + appName + ", and it should be visible via " + + "con.getClientInfo", appName, con.getClientInfo().get("ApplicationName")); + } + + @Test + public void testSetAppNameProps() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + return; + } + + Properties props = new Properties(); + props.put("ApplicationName", "my app"); + con.setClientInfo(props); + assertEquals("my app", getAppName()); + assertEquals("my app", con.getClientInfo("ApplicationName")); + assertEquals("my app", con.getClientInfo().getProperty("ApplicationName")); + } + + /** + * Test that no exception is thrown when an unknown property is set. + */ + @Test + public void testWarningOnUnknownName() throws SQLException { + try { + con.setClientInfo("NonexistentClientInfoName", "NoValue"); + } catch (SQLClientInfoException e) { + fail("Trying to set a nonexistent name must not throw an exception (spec)"); + } + assertNotNull(con.getWarnings()); + } + + /** + * Test that a name missing in the properties given to setClientInfo should be unset (spec). + */ + @Test + public void testMissingName() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) { + return; + } + + con.setClientInfo("ApplicationName", "my app"); + + // According to the spec, empty properties must clear all (because all names are missing) + con.setClientInfo(new Properties()); + + String applicationName = con.getClientInfo("ApplicationName"); + assertTrue("".equals(applicationName) || applicationName == null); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ConnectionValidTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ConnectionValidTimeoutTest.java new file mode 100644 index 0000000..5fda51f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ConnectionValidTimeoutTest.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.annotations.DisabledIfServerVersionBelow; +import org.postgresql.test.util.StrangeProxyServer; + +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.sql.Connection; +import java.util.Arrays; +import java.util.Properties; + +@DisabledIfServerVersionBelow("9.4") +public class ConnectionValidTimeoutTest { + + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {500, 1, 600}, + {1500, 1, 1100}, + {0, 1, 1100}, + {500, 0, 600}, + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "networkTimeoutMillis={0}, validationTimeoutSeconds={1}, expectedMaxValidationTimeMillis={2}") + @Timeout(30) + void isValidRespectsSmallerTimeout(int networkTimeoutMillis, int validationTimeoutSeconds, int expectedMaxValidationTimeMillis) throws Exception { + try (StrangeProxyServer proxyServer = new StrangeProxyServer(TestUtil.getServer(), TestUtil.getPort())) { + final Properties props = new Properties(); + props.setProperty(TestUtil.SERVER_HOST_PORT_PROP, String.format("%s:%s", "localhost", proxyServer.getServerPort())); + try (Connection conn = TestUtil.openDB(props)) { + assertTrue(conn.isValid(validationTimeoutSeconds), "Connection through proxy should be valid"); + + conn.setNetworkTimeout(null, networkTimeoutMillis); + assertTrue(conn.isValid(validationTimeoutSeconds), "Connection through proxy should still be valid"); + + proxyServer.stopForwardingOlderClients(); + + long start = System.currentTimeMillis(); + boolean result = conn.isValid(validationTimeoutSeconds); + long elapsed = System.currentTimeMillis() - start; + + assertFalse(result, "Broken connection should not be valid"); + + assertTrue(elapsed <= expectedMaxValidationTimeMillis, + String.format( + "Connection validation should not take longer than %d ms" + + " when network timeout is %d ms and validation timeout is %d s" + + " (actual result: %d ms)", + expectedMaxValidationTimeMillis, + networkTimeoutMillis, + validationTimeoutSeconds, + elapsed) + ); + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataHideUnprivilegedObjectsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataHideUnprivilegedObjectsTest.java new file mode 100644 index 0000000..8c7afe4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataHideUnprivilegedObjectsTest.java @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PgConnection; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +/** + * Tests that database objects for which the current user has no privileges are filtered out from + * the DatabaseMetaData depending on whether the connection parameter hideUnprivilegedObjects is + * set to true. + */ +public class DatabaseMetaDataHideUnprivilegedObjectsTest { + public static final String COLUMNS = "digit int4, name text"; + private static Connection hidingCon; + private static Connection nonHidingCon; + private static Connection privilegedCon; + private static PgConnection pgConnection; + private static DatabaseMetaData hidingDatabaseMetaData; + private static DatabaseMetaData nonHidingDatabaseMetaData; + + @BeforeAll + static void setUp() throws Exception { + Properties props = new Properties(); + privilegedCon = TestUtil.openPrivilegedDB(); + pgConnection = privilegedCon.unwrap(PgConnection.class); + Statement stmt = privilegedCon.createStatement(); + + createTestDataObjectsWithRangeOfPrivilegesInSchema("high_privileges_schema"); + // Grant Test User ALL privileges on schema. + stmt.executeUpdate("GRANT ALL ON SCHEMA high_privileges_schema TO " + TestUtil.getUser()); + stmt.executeUpdate("REVOKE ALL ON SCHEMA high_privileges_schema FROM public"); + + createTestDataObjectsWithRangeOfPrivilegesInSchema("low_privileges_schema"); + // Grant Test User USAGE privileges on schema. + stmt.executeUpdate("GRANT USAGE ON SCHEMA low_privileges_schema TO " + TestUtil.getUser()); + stmt.executeUpdate("REVOKE ALL ON SCHEMA low_privileges_schema FROM public"); + + createTestDataObjectsWithRangeOfPrivilegesInSchema("no_privileges_schema"); + // Revoke ALL privileges from Test User USAGE on schema. + stmt.executeUpdate("REVOKE ALL ON SCHEMA no_privileges_schema FROM " + TestUtil.getUser()); + stmt.executeUpdate("REVOKE ALL ON SCHEMA no_privileges_schema FROM public"); + + stmt.close(); + + nonHidingDatabaseMetaData = getNonHidingDatabaseMetaData(props); + hidingDatabaseMetaData = getHidingDatabaseMetaData(props); + } + + private static DatabaseMetaData getHidingDatabaseMetaData(Properties props) throws Exception { + PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(props, true); + hidingCon = TestUtil.openDB(props); + if (isSuperUser(hidingCon)) { + fail("Test for hiding database objects will not work while:" + TestUtil.getUser() + + " has a SUPERUSER role."); + } + return hidingCon.getMetaData(); + } + + private static DatabaseMetaData getNonHidingDatabaseMetaData(Properties props) throws Exception { + nonHidingCon = TestUtil.openDB(props); + return nonHidingCon.getMetaData(); + } + + private static void createTestDataObjectsWithRangeOfPrivilegesInSchema(String schema) + throws SQLException { + TestUtil.createSchema(privilegedCon, schema); + createSimpleTablesInSchema(schema, + new String[]{"owned_table", "all_grants_table", "insert_granted_table", + "select_granted_table", "no_grants_table"}); + + Statement stmt = privilegedCon.createStatement(); + stmt.executeUpdate( + "CREATE FUNCTION " + schema + "." + + "execute_granted_add_function(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT"); + stmt.executeUpdate( + "CREATE FUNCTION " + schema + "." + + "no_grants_add_function(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT"); + + if (pgConnection.haveMinimumServerVersion(ServerVersion.v11)) { + stmt.executeUpdate( + "CREATE PROCEDURE " + schema + "." + + "execute_granted_insert_procedure( a integer, b integer) LANGUAGE SQL AS 'select $1 + $2;'"); + stmt.executeUpdate( + "CREATE PROCEDURE " + schema + "." + + "no_grants_insert_procedure( a integer, b integer) LANGUAGE SQL AS 'select $1 + $2;'"); + + } + stmt.executeUpdate( + "CREATE OR REPLACE VIEW " + schema + "." + "select_granted_view AS SELECT name FROM " + + schema + "." + "select_granted_table"); + stmt.executeUpdate( + "CREATE OR REPLACE VIEW " + schema + "." + "no_grants_view AS SELECT name FROM " + schema + + "." + "owned_table"); + stmt.executeUpdate( + "CREATE TYPE " + schema + "." + "usage_granted_composite_type AS (f1 int, f2 text)"); + stmt.executeUpdate( + "CREATE TYPE " + schema + "." + "no_grants_composite_type AS (f1 int, f2 text)"); + stmt.executeUpdate( + "CREATE DOMAIN " + schema + "." + "usage_granted_us_postal_code_domain CHAR(5) NOT NULL"); + stmt.executeUpdate( + "CREATE DOMAIN " + schema + "." + "no_grants_us_postal_code_domain AS CHAR(5) NOT NULL"); + + if (pgConnection.haveMinimumServerVersion(ServerVersion.v9_2)) { + stmt.executeUpdate( + "REVOKE ALL ON TYPE " + schema + "." + + "usage_granted_composite_type FROM public RESTRICT"); + stmt.executeUpdate( + "REVOKE ALL ON TYPE " + schema + "." + "no_grants_composite_type FROM public RESTRICT"); + stmt.executeUpdate("GRANT USAGE on TYPE " + schema + "." + "usage_granted_composite_type TO " + + TestUtil.getUser()); + stmt.executeUpdate( + "REVOKE ALL ON TYPE " + schema + "." + + "usage_granted_us_postal_code_domain FROM public RESTRICT"); + stmt.executeUpdate( + "REVOKE ALL ON TYPE " + schema + "." + + "no_grants_us_postal_code_domain FROM public RESTRICT"); + stmt.executeUpdate( + "GRANT USAGE on TYPE " + schema + "." + "usage_granted_us_postal_code_domain TO " + + TestUtil.getUser()); + } + revokeAllOnFunctions(schema, new String[]{"execute_granted_add_function(integer, integer)", + "no_grants_add_function(integer, integer)"}); + + revokeAllOnTables(schema, + new String[]{"owned_table", "all_grants_table", "insert_granted_table", + "select_granted_table", "no_grants_table", "select_granted_view", "no_grants_view"}); + + stmt.executeUpdate( + "GRANT ALL ON FUNCTION " + schema + "." + + "execute_granted_add_function(integer, integer) TO " + + TestUtil.getUser()); + + if (pgConnection.haveMinimumServerVersion(ServerVersion.v11)) { + revokeAllOnProcedures(schema, new String[]{"execute_granted_insert_procedure(integer, integer)", + "no_grants_insert_procedure(integer, integer)"}); + stmt.executeUpdate( + "GRANT ALL ON PROCEDURE " + schema + "." + + "execute_granted_insert_procedure(integer, integer) TO " + + TestUtil.getUser()); + + } + stmt.executeUpdate( + "ALTER TABLE " + schema + "." + "owned_table OWNER TO " + TestUtil.getUser()); + stmt.executeUpdate( + "GRANT ALL ON TABLE " + schema + "." + "all_grants_table TO " + TestUtil.getUser()); + stmt.executeUpdate("GRANT INSERT ON TABLE " + schema + "." + "insert_granted_table TO " + + TestUtil.getUser()); + stmt.executeUpdate("GRANT SELECT ON TABLE " + schema + "." + "select_granted_table TO " + + TestUtil.getUser()); + stmt.executeUpdate("GRANT SELECT ON TABLE " + schema + "." + "select_granted_view TO " + + TestUtil.getUser()); + stmt.close(); + } + + private static void revokeAllOnProcedures(String schema, String[] procedures + ) throws SQLException { + Statement stmt = privilegedCon.createStatement(); + for (String procedure : procedures) { + stmt.executeUpdate( + "REVOKE ALL ON PROCEDURE " + schema + "." + procedure + " FROM public RESTRICT"); + stmt.executeUpdate( + "REVOKE ALL ON PROCEDURE " + schema + "." + procedure + " FROM " + TestUtil.getUser() + + " RESTRICT"); + } + stmt.close(); + } + + private static void revokeAllOnFunctions(String schema, String[] functions + ) throws SQLException { + Statement stmt = privilegedCon.createStatement(); + for (String function : functions) { + stmt.executeUpdate( + "REVOKE ALL ON FUNCTION " + schema + "." + function + " FROM public RESTRICT"); + stmt.executeUpdate("REVOKE ALL ON FUNCTION " + schema + "." + + function + " FROM " + TestUtil.getUser() + + " RESTRICT"); + } + stmt.close(); + } + + private static void revokeAllOnTables(String schema, String[] tables + ) throws SQLException { + Statement stmt = privilegedCon.createStatement(); + for (String table : tables) { + stmt.executeUpdate("REVOKE ALL ON TABLE " + schema + "." + table + " FROM public RESTRICT"); + stmt.executeUpdate( + "REVOKE ALL ON TABLE " + schema + "." + table + " FROM " + TestUtil.getUser() + + " RESTRICT"); + } + stmt.close(); + } + + private static void createSimpleTablesInSchema(String schema, String[] tables + ) throws SQLException { + for (String tableName : tables) { + TestUtil.createTable(privilegedCon, schema + "." + tableName, COLUMNS); + } + } + + @AfterAll + static void tearDown() throws SQLException { + TestUtil.closeDB(hidingCon); + TestUtil.closeDB(nonHidingCon); + TestUtil.dropSchema(privilegedCon, "high_privileges_schema"); + TestUtil.dropSchema(privilegedCon, "low_privileges_schema"); + TestUtil.dropSchema(privilegedCon, "no_privileges_schema"); + TestUtil.closeDB(privilegedCon); + } + + private static boolean isSuperUser(Connection connection) throws SQLException { + // Check if we're operating as a superuser. + Statement st = connection.createStatement(); + st.executeQuery("SHOW is_superuser;"); + ResultSet rs = st.getResultSet(); + rs.next(); // One row is guaranteed + boolean connIsSuper = "on".equalsIgnoreCase(rs.getString(1)); + st.close(); + return connIsSuper; + } + + @Test + void getSchemas() throws SQLException { + List schemasWithHiding = getSchemaNames(hidingDatabaseMetaData); + assertThat(schemasWithHiding, + hasItems("pg_catalog", "information_schema", + "high_privileges_schema", "low_privileges_schema")); + assertThat(schemasWithHiding, + not(hasItem("no_privileges_schema"))); + + List schemasWithNoHiding = getSchemaNames(nonHidingDatabaseMetaData); + assertThat(schemasWithNoHiding, + hasItems("pg_catalog", "information_schema", + "high_privileges_schema", "low_privileges_schema", "no_privileges_schema")); + } + + List getSchemaNames(DatabaseMetaData databaseMetaData) throws SQLException { + List schemaNames = new ArrayList<>(); + ResultSet rs = databaseMetaData.getSchemas(); + while (rs.next()) { + schemaNames.add(rs.getString("TABLE_SCHEM")); + } + return schemaNames; + } + + @Test + void getTables() throws SQLException { + List tablesWithHiding = getTableNames(hidingDatabaseMetaData, "high_privileges_schema"); + + assertThat(tablesWithHiding, + hasItems( + "owned_table", + "all_grants_table", + "insert_granted_table", + "select_granted_table")); + assertThat(tablesWithHiding, + not(hasItem("no_grants_table"))); + + List tablesWithNoHiding = + getTableNames(nonHidingDatabaseMetaData, "high_privileges_schema"); + assertThat(tablesWithNoHiding, + hasItems( + "owned_table", + "all_grants_table", + "insert_granted_table", + "select_granted_table", + "no_grants_table")); + + tablesWithHiding = getTableNames(hidingDatabaseMetaData, "low_privileges_schema"); + + assertThat(tablesWithHiding, + hasItems( + "owned_table", + "all_grants_table", + "insert_granted_table", + "select_granted_table")); + assertThat(tablesWithHiding, + not(hasItem("no_grants_table"))); + + tablesWithNoHiding = + getTableNames(nonHidingDatabaseMetaData, "low_privileges_schema"); + assertThat(tablesWithNoHiding, + hasItems( + "owned_table", + "all_grants_table", + "insert_granted_table", + "select_granted_table", + "no_grants_table")); + + // Or should the tables names not be returned because the schema is not visible? + tablesWithHiding = getTableNames(hidingDatabaseMetaData, "no_privileges_schema"); + + assertThat(tablesWithHiding, + hasItems( + "owned_table", + "all_grants_table", + "insert_granted_table", + "select_granted_table")); + assertThat(tablesWithHiding, + not(hasItem("no_grants_table"))); + + tablesWithNoHiding = + getTableNames(nonHidingDatabaseMetaData, "no_privileges_schema"); + assertThat(tablesWithNoHiding, + hasItems( + "owned_table", + "all_grants_table", + "insert_granted_table", + "select_granted_table", + "no_grants_table")); + + } + + List getTableNames(DatabaseMetaData databaseMetaData, String schemaPattern) + throws SQLException { + List tableNames = new ArrayList<>(); + ResultSet rs = databaseMetaData.getTables(null, schemaPattern, null, new String[]{"TABLE"}); + while (rs.next()) { + tableNames.add(rs.getString("TABLE_NAME")); + } + return tableNames; + } + + @Test + void getViews() throws SQLException { + List viewsWithHiding = getViewNames(hidingDatabaseMetaData, "high_privileges_schema"); + + assertThat(viewsWithHiding, + hasItems( + "select_granted_view")); + assertThat(viewsWithHiding, + not(hasItem("no_grants_view"))); + + List viewsWithNoHiding = + getViewNames(nonHidingDatabaseMetaData, "high_privileges_schema"); + assertThat(viewsWithNoHiding, + hasItems( + "select_granted_view", + "no_grants_view")); + + viewsWithHiding = getViewNames(hidingDatabaseMetaData, "low_privileges_schema"); + + assertThat(viewsWithHiding, + hasItems( + "select_granted_view")); + assertThat(viewsWithHiding, + not(hasItem("no_grants_view"))); + + viewsWithNoHiding = + getViewNames(nonHidingDatabaseMetaData, "low_privileges_schema"); + assertThat(viewsWithNoHiding, + hasItems( + "select_granted_view", + "no_grants_view")); + + // Or should the view names not be returned because the schema is not visible? + viewsWithHiding = getViewNames(hidingDatabaseMetaData, "no_privileges_schema"); + + assertThat(viewsWithHiding, + hasItems( + "select_granted_view")); + assertThat(viewsWithHiding, + not(hasItem("no_grants_view"))); + + viewsWithNoHiding = + getViewNames(nonHidingDatabaseMetaData, "no_privileges_schema"); + assertThat(viewsWithNoHiding, + hasItems( + "select_granted_view", + "no_grants_view")); + + } + + List getViewNames(DatabaseMetaData databaseMetaData, String schemaPattern) + throws SQLException { + List viewNames = new ArrayList<>(); + ResultSet rs = databaseMetaData.getTables(null, schemaPattern, null, new String[]{"VIEW"}); + while (rs.next()) { + viewNames.add(rs.getString("TABLE_NAME")); + } + return viewNames; + } + + @Test + void getFunctions() throws SQLException { + List functionsWithHiding = + getFunctionNames(hidingDatabaseMetaData, "high_privileges_schema"); + assertThat(functionsWithHiding, + hasItem("execute_granted_add_function")); + assertThat(functionsWithHiding, + not(hasItem("no_grants_add_function"))); + + List functionsWithNoHiding = + getFunctionNames(nonHidingDatabaseMetaData, "high_privileges_schema"); + assertThat(functionsWithNoHiding, + hasItems("execute_granted_add_function", "no_grants_add_function")); + + functionsWithHiding = + getFunctionNames(hidingDatabaseMetaData, "low_privileges_schema"); + assertThat(functionsWithHiding, + hasItem("execute_granted_add_function")); + assertThat(functionsWithHiding, + not(hasItem("no_grants_add_function"))); + + functionsWithNoHiding = + getFunctionNames(nonHidingDatabaseMetaData, "low_privileges_schema"); + assertThat(functionsWithNoHiding, + hasItems("execute_granted_add_function", "no_grants_add_function")); + + // Or should the function names not be returned because the schema is not visible? + functionsWithHiding = + getFunctionNames(hidingDatabaseMetaData, "no_privileges_schema"); + assertThat(functionsWithHiding, + hasItem("execute_granted_add_function")); + assertThat(functionsWithHiding, + not(hasItem("no_grants_add_function"))); + + functionsWithNoHiding = + getFunctionNames(nonHidingDatabaseMetaData, "no_privileges_schema"); + assertThat(functionsWithNoHiding, + hasItems("execute_granted_add_function", "no_grants_add_function")); + } + + List getFunctionNames(DatabaseMetaData databaseMetaData, String schemaPattern) + throws SQLException { + List functionNames = new ArrayList<>(); + ResultSet rs = databaseMetaData.getFunctions(null, schemaPattern, null); + while (rs.next()) { + functionNames.add(rs.getString("FUNCTION_NAME")); + } + return functionNames; + } + + @Test + void getProcedures() throws SQLException { + String executeGranted = TestUtil.haveMinimumServerVersion(hidingCon, ServerVersion.v11) ? "execute_granted_insert_procedure" : "execute_granted_add_function"; + String noGrants = TestUtil.haveMinimumServerVersion(hidingCon, ServerVersion.v11) ? "no_grants_insert_procedure" : "no_grants_add_function"; + + List proceduresWithHiding = + getProcedureNames(hidingDatabaseMetaData, "high_privileges_schema"); + assertThat(proceduresWithHiding, + hasItem(executeGranted)); + assertThat(proceduresWithHiding, + not(hasItem(noGrants))); + + List proceduresWithNoHiding = + getProcedureNames(nonHidingDatabaseMetaData, "high_privileges_schema"); + assertThat(proceduresWithNoHiding, + hasItems(executeGranted, noGrants)); + + proceduresWithHiding = + getProcedureNames(hidingDatabaseMetaData, "low_privileges_schema"); + assertThat(proceduresWithHiding, + hasItem(executeGranted)); + assertThat(proceduresWithHiding, + not(hasItem(noGrants))); + + proceduresWithNoHiding = + getProcedureNames(nonHidingDatabaseMetaData, "low_privileges_schema"); + assertThat(proceduresWithNoHiding, + hasItems(executeGranted, noGrants)); + + // Or should the function names not be returned because the schema is not visible? + proceduresWithHiding = + getProcedureNames(hidingDatabaseMetaData, "no_privileges_schema"); + assertThat(proceduresWithHiding, + hasItem(executeGranted)); + assertThat(proceduresWithHiding, + not(hasItem(noGrants))); + + proceduresWithNoHiding = + getProcedureNames(nonHidingDatabaseMetaData, "no_privileges_schema"); + assertThat(proceduresWithNoHiding, + hasItems(executeGranted, noGrants)); + + } + + List getProcedureNames(DatabaseMetaData databaseMetaData, String schemaPattern) + throws SQLException { + List procedureNames = new ArrayList<>(); + ResultSet rs = databaseMetaData.getProcedures(null, schemaPattern, null); + while (rs.next()) { + procedureNames.add(rs.getString("PROCEDURE_NAME")); + } + return procedureNames; + } + + /* + * According to the JDBC JavaDoc, the applicable UDTs are: JAVA_OBJECT, STRUCT, or DISTINCT. + */ + @Test + void getUDTs() throws SQLException { + if (pgConnection.haveMinimumServerVersion(ServerVersion.v9_2)) { + List typesWithHiding = getTypeNames(hidingDatabaseMetaData, "high_privileges_schema"); + assertThat(typesWithHiding, + hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain")); + assertThat(typesWithHiding, + not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain"))); + + typesWithHiding = getTypeNames(hidingDatabaseMetaData, "low_privileges_schema"); + assertThat(typesWithHiding, + hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain")); + assertThat(typesWithHiding, + not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain"))); + + // Or should the types names not be returned because the schema is not visible? + typesWithHiding = getTypeNames(hidingDatabaseMetaData, "no_privileges_schema"); + assertThat(typesWithHiding, + hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain")); + assertThat(typesWithHiding, + not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain"))); + } + + List typesWithNoHiding = + getTypeNames(nonHidingDatabaseMetaData, "high_privileges_schema"); + assertThat(typesWithNoHiding, + hasItems("usage_granted_composite_type", "no_grants_composite_type", + "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain")); + + typesWithNoHiding = + getTypeNames(nonHidingDatabaseMetaData, "low_privileges_schema"); + assertThat(typesWithNoHiding, + hasItems("usage_granted_composite_type", "no_grants_composite_type", + "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain")); + + typesWithNoHiding = + getTypeNames(nonHidingDatabaseMetaData, "no_privileges_schema"); + assertThat(typesWithNoHiding, + hasItems("usage_granted_composite_type", "no_grants_composite_type", + "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain")); + } + + /* + From the Postgres JDBC driver source code, we are mapping the types: + java.sql.Types.DISTINCT to the Postgres type: TYPTYPE_COMPOSITE 'c' # composite (e.g., table's rowtype) + java.sql.Types.STRUCT to the Postgres type: TYPTYPE_DOMAIN 'd' # domain over another type + */ + List getTypeNames(DatabaseMetaData databaseMetaData, String schemaPattern) throws SQLException { + List typeNames = new ArrayList<>(); + ResultSet rs = databaseMetaData.getUDTs(null, schemaPattern, null, null); + while (rs.next()) { + typeNames.add(rs.getString("TYPE_NAME")); + } + return typeNames; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataTest.java new file mode 100644 index 0000000..80e16e4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataTest.java @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +class DatabaseMetaDataTest { + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.dropSequence(conn, "sercoltest_a_seq"); + TestUtil.createTable(conn, "sercoltest", "a serial, b int"); + TestUtil.createSchema(conn, "hasfunctions"); + TestUtil.createSchema(conn, "nofunctions"); + TestUtil.createSchema(conn, "hasprocedures"); + TestUtil.createSchema(conn, "noprocedures"); + TestUtil.execute(conn, "create function hasfunctions.addfunction (integer, integer) " + + "RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE"); + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) { + TestUtil.execute(conn, "create procedure hasprocedures.addprocedure() " + + "LANGUAGE plpgsql AS $$ BEGIN SELECT 1; END; $$"); + } + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropSequence(conn, "sercoltest_a_seq"); + TestUtil.dropTable(conn, "sercoltest"); + TestUtil.dropSchema(conn, "hasfunctions"); + TestUtil.dropSchema(conn, "nofunctions"); + TestUtil.dropSchema(conn, "hasprocedures"); + TestUtil.dropSchema(conn, "noprocedures"); + TestUtil.closeDB(conn); + } + + @Test + void getClientInfoProperties() throws Exception { + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getClientInfoProperties(); + if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_0)) { + assertFalse(rs.next()); + return; + } + + assertTrue(rs.next()); + assertEquals("ApplicationName", rs.getString("NAME")); + } + + @Test + void getColumnsForAutoIncrement() throws Exception { + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getColumns("%", "%", "sercoltest", "%"); + assertTrue(rs.next()); + assertEquals("a", rs.getString("COLUMN_NAME")); + assertEquals("YES", rs.getString("IS_AUTOINCREMENT")); + + assertTrue(rs.next()); + assertEquals("b", rs.getString("COLUMN_NAME")); + assertEquals("NO", rs.getString("IS_AUTOINCREMENT")); + + assertFalse(rs.next()); + } + + @Test + void getSchemas() throws SQLException { + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getSchemas("", "publ%"); + + assertTrue(rs.next()); + assertEquals("public", rs.getString("TABLE_SCHEM")); + assertNull(rs.getString("TABLE_CATALOG")); + assertFalse(rs.next()); + } + + @Test + void getFunctionsInSchemaForFunctions() throws SQLException { + DatabaseMetaData dbmd = conn.getMetaData(); + + try (ResultSet rs = dbmd.getFunctions("", "hasfunctions", "")) { + List list = assertFunctionRSAndReturnList(rs); + assertEquals(1, list.size(), "There should be one function in the hasfunctions schema"); + assertListContains("getFunctions('', 'hasfunctions', '') must contain addfunction", list, "hasfunctions", "addfunction"); + } + + try (ResultSet rs = dbmd.getFunctions("", "hasfunctions", "addfunction")) { + List list = assertFunctionRSAndReturnList(rs); + assertEquals(1, list.size(), "There should be one function in the hasfunctions schema with name addfunction"); + assertListContains("getFunctions('', 'hasfunctions', 'addfunction') must contain addfunction", list, "hasfunctions", "addfunction"); + } + + try (ResultSet rs = dbmd.getFunctions("", "nofunctions", "")) { + boolean hasFunctions = rs.next(); + assertFalse(hasFunctions, "There should be no functions in the nofunctions schema"); + } + } + + @Test + void getFunctionsInSchemaForProcedures() throws SQLException { + // Due to the introduction of actual stored procedures in PostgreSQL 11, getFunctions should not return procedures for PostgreSQL versions 11+ + // On older installation we do not create the procedures so the below schemas should all be empty + DatabaseMetaData dbmd = conn.getMetaData(); + + // Search for functions in schema "hasprocedures" + try (ResultSet rs = dbmd.getFunctions("", "hasprocedures", null)) { + assertFalse(rs.next(), "The hasprocedures schema not return procedures from getFunctions"); + } + // Search for functions in schema "noprocedures" (which should never expect records) + try (ResultSet rs = dbmd.getFunctions("", "noprocedures", null)) { + assertFalse(rs.next(), "The noprocedures schema should not have functions"); + } + // Search for functions by procedure name "addprocedure" + try (ResultSet rs = dbmd.getFunctions("", "hasprocedures", "addprocedure")) { + assertFalse(rs.next(), "Should not return procedures from getFunctions by schema + name"); + } + } + + @Test + void getProceduresInSchemaForFunctions() throws SQLException { + // Due to the introduction of actual stored procedures in PostgreSQL 11, getProcedures should not return functions for PostgreSQL versions 11+ + DatabaseMetaData dbmd = conn.getMetaData(); + + // Search for procedures in schema "hasfunctions" (which should expect a record only for PostgreSQL < 11) + try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", null)) { + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) { + assertFalse(rs.next(), "PostgreSQL11+ should not return functions from getProcedures"); + } else { + // PostgreSQL prior to 11 should return functions from getProcedures + assertProcedureRS(rs); + } + } + + // Search for procedures in schema "nofunctions" (which should never expect records) + try (ResultSet rs = dbmd.getProcedures("", "nofunctions", null)) { + assertFalse(rs.next(), "getProcedures(...) should not return procedures for schema nofunctions"); + } + + // Search for procedures by function name "addfunction" within schema "hasfunctions" (which should expect a record for PostgreSQL < 11) + try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", "addfunction")) { + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) { + assertFalse(rs.next(), "PostgreSQL11+ should not return functions from getProcedures"); + } else { + // PostgreSQL prior to 11 should return functions from getProcedures + assertProcedureRS(rs); + } + } + + // Search for procedures by function name "addfunction" within schema "nofunctions" (which should never expect records) + try (ResultSet rs = dbmd.getProcedures("", "nofunctions", "addfunction")) { + assertFalse(rs.next(), "getProcedures(...) should not return procedures for schema nofunctions + addfunction"); + } + } + + @Test + void getProceduresInSchemaForProcedures() throws SQLException { + // Only run this test for PostgreSQL version 11+; assertions for versions prior would be vacuously true as we don't create a procedure in the setup for older versions + Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)); + + DatabaseMetaData dbmd = conn.getMetaData(); + + try (ResultSet rs = dbmd.getProcedures("", "hasprocedures", null)) { + int count = assertProcedureRS(rs); + assertEquals(1, count, "getProcedures() should be non-empty for the hasprocedures schema"); + } + + try (ResultSet rs = dbmd.getProcedures("", "noprocedures", null)) { + assertFalse(rs.next(), "getProcedures() should be empty for the hasprocedures schema"); + } + + try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", null)) { + assertFalse(rs.next(), "getProcedures() should be empty for the nofunctions schema"); + } + + try (ResultSet rs = dbmd.getProcedures("", "nofunctions", null)) { + assertFalse(rs.next(), "getProcedures() should be empty for the nofunctions schema"); + } + } + + @Test + void getFunctionsWithBlankPatterns() throws SQLException { + int minFuncCount = 1000; + DatabaseMetaData dbmd = conn.getMetaData(); + + final int totalCount; + try (ResultSet rs = dbmd.getFunctions("", "", "")) { + List list = assertFunctionRSAndReturnList(rs); + totalCount = list.size(); // Rest of this test will validate against this value + assertThat(totalCount > minFuncCount, is(true)); + assertListContains("getFunctions('', '', '') must contain addfunction", list, "hasfunctions", "addfunction"); + } + + // Should be same as blank pattern + try (ResultSet rs = dbmd.getFunctions(null, null, null)) { + int count = assertGetFunctionRS(rs); + assertThat(count, is(totalCount)); + } + + // Catalog parameter has no affect on our getFunctions filtering + try (ResultSet rs = dbmd.getFunctions("ANYTHING_WILL_WORK", null, null)) { + int count = assertGetFunctionRS(rs); + assertThat(count, is(totalCount)); + } + + // Filter by schema + try (ResultSet rs = dbmd.getFunctions("", "pg_catalog", null)) { + int count = assertGetFunctionRS(rs); + assertThat(count > minFuncCount, is(true)); + } + + // Filter by schema and function name + try (ResultSet rs = dbmd.getFunctions("", "pg_catalog", "abs")) { + int count = assertGetFunctionRS(rs); + assertThat(count >= 1, is(true)); + } + + // Filter by function name only + try (ResultSet rs = dbmd.getFunctions("", "", "abs")) { + int count = assertGetFunctionRS(rs); + assertThat(count >= 1, is(true)); + } + } + + private static class CatalogObject implements Comparable { + private final String catalog; + private final String schema; + private final String name; + private final String specificName; + + private CatalogObject(String catalog, String schema, String name, String specificName) { + this.catalog = catalog; + this.schema = schema; + this.name = name; + this.specificName = specificName; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + (catalog == null ? 0 : catalog.hashCode()); + result = prime * result + (name == null ? 0 : name.hashCode()); + result = prime * result + (schema == null ? 0 : schema.hashCode()); + result = prime * result + (specificName == null ? 0 : specificName.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } else if (obj == this) { + return true; + } + return compareTo((CatalogObject) obj) == 0; + } + + @Override + public int compareTo(CatalogObject other) { + int comp = catalog.compareTo(other.catalog); + if (comp != 0) { + return comp; + } + comp = schema.compareTo(other.schema); + if (comp != 0) { + return comp; + } + comp = name.compareTo(other.name); + if (comp != 0) { + return comp; + } + comp = specificName.compareTo(other.specificName); + if (comp != 0) { + return comp; + } + return 0; + } + } + + /** Assert some basic result from ResultSet of a GetFunctions method. Return the total row count. */ + private int assertGetFunctionRS(ResultSet rs) throws SQLException { + return assertFunctionRSAndReturnList(rs).size(); + } + + private List assertFunctionRSAndReturnList(ResultSet rs) throws SQLException { + // There should be at least one row + assertThat(rs.next(), is(true)); + assertThat(rs.getString("FUNCTION_CAT"), is(System.getProperty("database"))); + assertThat(rs.getString("FUNCTION_SCHEM"), notNullValue()); + assertThat(rs.getString("FUNCTION_NAME"), notNullValue()); + assertThat(rs.getShort("FUNCTION_TYPE") >= 0, is(true)); + assertThat(rs.getString("SPECIFIC_NAME"), notNullValue()); + + // Ensure there is enough column and column value retrieve by index should be same as column name (ordered) + assertThat(rs.getMetaData().getColumnCount(), is(6)); + assertThat(rs.getString(1), is(rs.getString("FUNCTION_CAT"))); + assertThat(rs.getString(2), is(rs.getString("FUNCTION_SCHEM"))); + assertThat(rs.getString(3), is(rs.getString("FUNCTION_NAME"))); + assertThat(rs.getString(4), is(rs.getString("REMARKS"))); + assertThat(rs.getShort(5), is(rs.getShort("FUNCTION_TYPE"))); + assertThat(rs.getString(6), is(rs.getString("SPECIFIC_NAME"))); + + // Get all result and assert they are ordered per javadoc spec: + // FUNCTION_CAT, FUNCTION_SCHEM, FUNCTION_NAME and SPECIFIC_NAME + List result = new ArrayList<>(); + do { + CatalogObject obj = new CatalogObject( + rs.getString("FUNCTION_CAT"), + rs.getString("FUNCTION_SCHEM"), + rs.getString("FUNCTION_NAME"), + rs.getString("SPECIFIC_NAME")); + result.add(obj); + } while (rs.next()); + + List orderedResult = new ArrayList<>(result); + Collections.sort(orderedResult); + assertThat(result, is(orderedResult)); + + return result; + } + + private int assertProcedureRS(ResultSet rs) throws SQLException { + return assertProcedureRSAndReturnList(rs).size(); + } + + private List assertProcedureRSAndReturnList(ResultSet rs) throws SQLException { + // There should be at least one row + assertThat(rs.next(), is(true)); + assertThat(rs.getString("PROCEDURE_CAT"), nullValue()); + assertThat(rs.getString("PROCEDURE_SCHEM"), notNullValue()); + assertThat(rs.getString("PROCEDURE_NAME"), notNullValue()); + assertThat(rs.getShort("PROCEDURE_TYPE") >= 0, is(true)); + assertThat(rs.getString("SPECIFIC_NAME"), notNullValue()); + + // Ensure there is enough column and column value retrieve by index should be same as column name (ordered) + assertThat(rs.getMetaData().getColumnCount(), is(9)); + assertThat(rs.getString(1), is(rs.getString("PROCEDURE_CAT"))); + assertThat(rs.getString(2), is(rs.getString("PROCEDURE_SCHEM"))); + assertThat(rs.getString(3), is(rs.getString("PROCEDURE_NAME"))); + // Per JDBC spec, indexes 4, 5, and 6 are reserved for future use + assertThat(rs.getString(7), is(rs.getString("REMARKS"))); + assertThat(rs.getShort(8), is(rs.getShort("PROCEDURE_TYPE"))); + assertThat(rs.getString(9), is(rs.getString("SPECIFIC_NAME"))); + + // Get all result and assert they are ordered per javadoc spec: + // FUNCTION_CAT, FUNCTION_SCHEM, FUNCTION_NAME and SPECIFIC_NAME + List result = new ArrayList<>(); + do { + CatalogObject obj = new CatalogObject( + rs.getString("PROCEDURE_CAT"), + rs.getString("PROCEDURE_SCHEM"), + rs.getString("PROCEDURE_NAME"), + rs.getString("SPECIFIC_NAME")); + result.add(obj); + } while (rs.next()); + + List orderedResult = new ArrayList<>(result); + Collections.sort(orderedResult); + assertThat(result, is(orderedResult)); + + return result; + } + + private void assertListContains(String message, List list, String schema, String name) throws SQLException { + boolean found = list.stream().anyMatch(item -> item.schema.equals(schema) && item.name.equals(name)); + assertTrue(found, message + "; schema=" + schema + " name=" + name); + } + + @Test + void getFunctionsWithSpecificTypes() throws SQLException { + // These function creation are borrow from jdbc2/DatabaseMetaDataTest + // We modify to ensure new function created are returned by getFunctions() + + DatabaseMetaData dbmd = conn.getMetaData(); + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_4)) { + Statement stmt = conn.createStatement(); + stmt.execute( + "CREATE OR REPLACE FUNCTION getfunc_f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL"); + ResultSet rs = dbmd.getFunctions("", "", "getfunc_f1"); + assertThat(rs.next(), is(true)); + assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f1")); + assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionNoTable)); + assertThat(rs.next(), is(false)); + rs.close(); + stmt.execute("DROP FUNCTION getfunc_f1(int, varchar)"); + + stmt.execute( + "CREATE OR REPLACE FUNCTION getfunc_f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql"); + rs = dbmd.getFunctions("", "", "getfunc_f3"); + assertThat(rs.next(), is(true)); + assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f3")); + assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionNoTable)); + assertThat(rs.next(), is(false)); + rs.close(); + stmt.execute("DROP FUNCTION getfunc_f3(int, varchar)"); + + // RETURNS TABLE requires PostgreSQL 8.4+ + stmt.execute( + "CREATE OR REPLACE FUNCTION getfunc_f5() RETURNS TABLE (i int) LANGUAGE sql AS 'SELECT 1'"); + + rs = dbmd.getFunctions("", "", "getfunc_f5"); + assertThat(rs.next(), is(true)); + assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f5")); + assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionReturnsTable)); + assertThat(rs.next(), is(false)); + rs.close(); + stmt.execute("DROP FUNCTION getfunc_f5()"); + } else { + // For PG 8.3 or 8.2 it will resulted in unknown function type + Statement stmt = conn.createStatement(); + stmt.execute( + "CREATE OR REPLACE FUNCTION getfunc_f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL"); + ResultSet rs = dbmd.getFunctions("", "", "getfunc_f1"); + assertThat(rs.next(), is(true)); + assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f1")); + assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionResultUnknown)); + assertThat(rs.next(), is(false)); + rs.close(); + stmt.execute("DROP FUNCTION getfunc_f1(int, varchar)"); + + stmt.execute( + "CREATE OR REPLACE FUNCTION getfunc_f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql"); + rs = dbmd.getFunctions("", "", "getfunc_f3"); + assertThat(rs.next(), is(true)); + assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f3")); + assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionResultUnknown)); + assertThat(rs.next(), is(false)); + rs.close(); + stmt.execute("DROP FUNCTION getfunc_f3(int, varchar)"); + } + } + + @Test + void sortedDataTypes() throws SQLException { + // https://github.com/pgjdbc/pgjdbc/issues/716 + DatabaseMetaData dbmd = conn.getMetaData(); + ResultSet rs = dbmd.getTypeInfo(); + int lastType = Integer.MIN_VALUE; + while (rs.next()) { + int type = rs.getInt("DATA_TYPE"); + assertTrue(lastType <= type); + lastType = type; + } + } + + @Test + void getSqlTypes() throws SQLException { + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) { + try (Connection privileged = TestUtil.openPrivilegedDB()) { + try (Statement stmt = privileged.createStatement()) { + // create a function called array_in + stmt.execute("CREATE OR REPLACE FUNCTION public.array_in(anyarray, oid, integer)\n" + + " RETURNS anyarray\n" + + " LANGUAGE internal\n" + + " STABLE PARALLEL SAFE STRICT\n" + + "AS $function$array_in$function$"); + } + DatabaseMetaData dbmd = privileged.getMetaData(); + ResultSet rs = dbmd.getTypeInfo(); + try (Statement stmt = privileged.createStatement()) { + stmt.execute("drop function public.array_in(anyarray, oid, integer)"); + } + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/IsValidTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/IsValidTest.java new file mode 100644 index 0000000..5be6ad8 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/IsValidTest.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.core.TransactionState; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assume; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.SQLException; + +public class IsValidTest extends BaseTest4 { + @Test + public void testIsValidShouldNotModifyTransactionStateOutsideTransaction() throws SQLException { + TransactionState initialTransactionState = TestUtil.getTransactionState(con); + assertTrue("Connection should be valid", con.isValid(0)); + TestUtil.assertTransactionState("Transaction state should not be modified by non-transactional Connection.isValid(...)", con, initialTransactionState); + } + + @Test + public void testIsValidShouldNotModifyTransactionStateInEmptyTransaction() throws SQLException { + con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + con.setAutoCommit(false); + TransactionState transactionState = TestUtil.getTransactionState(con); + assertTrue("Connection should be valid", con.isValid(0)); + TestUtil.assertTransactionState("Transaction state should not be modified by Connection.isValid(...) within an empty transaction", con, transactionState); + } + + @Test + public void testIsValidShouldNotModifyTransactionStateInNonEmptyTransaction() throws SQLException { + con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + con.setAutoCommit(false); + TestUtil.executeQuery(con, "SELECT 1"); + TransactionState transactionState = TestUtil.getTransactionState(con); + assertTrue("Connection should be valid", con.isValid(0)); + TestUtil.assertTransactionState("Transaction state should not be modified by Connection.isValid(...) within a non-empty transaction", con, transactionState); + } + + @Test + public void testIsValidRemoteClose() throws SQLException, InterruptedException { + Assume.assumeTrue("Unable to use pg_terminate_backend(...) before version 8.4", TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)); + + boolean wasTerminated = TestUtil.terminateBackend(con); + assertTrue("The backend should be terminated", wasTerminated); + + // Keeps checking for up to 5-seconds that the connection is marked invalid + for (int i = 0; i < 500; i++) { + if (!con.isValid(0)) { + break; + } + // Wait a bit to give the connection a chance to gracefully handle the termination + Thread.sleep(10); + } + assertFalse("The terminated connection should not be valid", con.isValid(0)); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/Jdbc4TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/Jdbc4TestSuite.java new file mode 100644 index 0000000..908f5f8 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/Jdbc4TestSuite.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/* + * Executes all known tests for JDBC4 + */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + ArrayTest.class, + BinaryStreamTest.class, + BinaryTest.class, + BlobTest.class, + CharacterStreamTest.class, + ClientInfoTest.class, + ConnectionValidTimeoutTest.class, + DatabaseMetaDataHideUnprivilegedObjectsTest.class, + DatabaseMetaDataTest.class, + IsValidTest.class, + JsonbTest.class, + PGCopyInputStreamTest.class, + UUIDTest.class, + WrapperTest.class, + XmlTest.class, +}) +public class Jdbc4TestSuite { +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/JsonbTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/JsonbTest.java new file mode 100644 index 0000000..6248ed2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/JsonbTest.java @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Array; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class JsonbTest extends BaseTest4 { + public JsonbTest(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + Assume.assumeTrue("jsonb requires PostgreSQL 9.4+", TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)); + TestUtil.createTable(con, "jsonbtest", "detail jsonb"); + Statement stmt = con.createStatement(); + stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"a\": 1}')"); + stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"b\": 1}')"); + stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"c\": 1}')"); + stmt.close(); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "jsonbtest"); + super.tearDown(); + } + + @Test + public void testJsonbNonPreparedStatement() throws SQLException { + Statement stmt = con.createStatement(); + + ResultSet rs = stmt.executeQuery("SELECT count(1) FROM jsonbtest WHERE detail ? 'a' = false;"); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + stmt.close(); + } + + @Test + public void testJsonbPreparedStatement() throws SQLException { + PreparedStatement stmt = con.prepareStatement("SELECT count(1) FROM jsonbtest WHERE detail ?? 'a' = false;"); + ResultSet rs = stmt.executeQuery(); + assertTrue(rs.next()); + assertEquals(2, rs.getInt(1)); + rs.close(); + stmt.close(); + } + + @Test + public void jsonbArray() throws SQLException { + jsonArrayGet("jsonb", String.class); + } + + @Test + public void jsonArray() throws SQLException { + jsonArrayGet("json", String.class); + } + + private void jsonArrayGet(String type, Class arrayElement) throws SQLException { + PreparedStatement stmt = con.prepareStatement("SELECT '{[2],[3]}'::" + type + "[]"); + ResultSet rs = stmt.executeQuery(); + assertTrue(rs.next()); + Array array = rs.getArray(1); + Object[] objectArray = (Object[]) array.getArray(); + Assert.assertEquals( + "'{[2],[3]}'::" + type + "[] should come up as Java array with two entries", + "[[2], [3]]", + Arrays.deepToString(objectArray) + ); + + Assert.assertEquals( + type + " array entries should come up as strings", + arrayElement.getName() + ", " + arrayElement.getName(), + objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName() + ); + rs.close(); + stmt.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/PGCopyInputStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/PGCopyInputStreamTest.java new file mode 100644 index 0000000..c92cc50 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/PGCopyInputStreamTest.java @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.PGConnection; +import org.postgresql.copy.PGCopyInputStream; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +class PGCopyInputStreamTest { + private static final int NUM_TEST_ROWS = 4; + /** + * COPY .. TO STDOUT terminates each row of data with a LF regardless of platform so the size of + * each output row will always be two, one byte for the character and one for the LF. + */ + private static final int COPY_ROW_SIZE = 2; // One character plus newline + private static final int COPY_DATA_SIZE = NUM_TEST_ROWS * COPY_ROW_SIZE; + private static final String COPY_SQL = String.format("COPY (SELECT i FROM generate_series(0, %d - 1) i) TO STDOUT", NUM_TEST_ROWS); + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + } + + @AfterEach + void tearDown() throws SQLException { + TestUtil.closeDB(conn); + } + + @Test + void readBytesCorrectlyHandlesEof() throws SQLException, IOException { + PGConnection pgConn = conn.unwrap(PGConnection.class); + try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) { + // large enough to read everything on the next step + byte[] buf = new byte[COPY_DATA_SIZE + 100]; + assertEquals(COPY_DATA_SIZE, in.read(buf), "First read should get the entire table into the byte array"); + assertEquals(-1, in.read(buf), "Subsequent read should return -1 to indicate stream is finished"); + } + } + + @Test + void readBytesCorrectlyReadsDataInChunks() throws SQLException, IOException { + PGConnection pgConn = conn.unwrap(PGConnection.class); + try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) { + // Read in row sized chunks + List chunks = readFully(in, COPY_ROW_SIZE); + assertEquals(NUM_TEST_ROWS, chunks.size(), "Should read one chunk per row"); + assertEquals("0\n1\n2\n3\n", chunksToString(chunks), "Entire table should have be read"); + } + } + + @Test + void copyAPI() throws SQLException, IOException { + PGConnection pgConn = conn.unwrap(PGConnection.class); + try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) { + List chunks = readFromCopyFully(in); + assertEquals(NUM_TEST_ROWS, chunks.size(), "Should read one chunk per row"); + assertEquals("0\n1\n2\n3\n", chunksToString(chunks), "Entire table should have be read"); + } + } + + @Test + void mixedAPI() throws SQLException, IOException { + PGConnection pgConn = conn.unwrap(PGConnection.class); + try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) { + // First read using java.io.InputStream API + byte[] firstChar = new byte[1]; + in.read(firstChar); + assertArrayEquals("0".getBytes(), firstChar, "IO API should read first character"); + + // Read remainder of first row using CopyOut API + assertArrayEquals("\n".getBytes(), in.readFromCopy(), "readFromCopy() should return remainder of first row"); + + // Then read the rest using CopyOut API + List chunks = readFromCopyFully(in); + assertEquals(NUM_TEST_ROWS - 1, chunks.size(), "Should read one chunk per row"); + assertEquals("1\n2\n3\n", chunksToString(chunks), "Rest of table should have be read"); + } + } + + private static List readFully(PGCopyInputStream in, int size) throws SQLException, IOException { + List chunks = new ArrayList<>(); + do { + byte[] buf = new byte[size]; + if (in.read(buf) <= 0) { + break; + } + chunks.add(buf); + } while (true); + return chunks; + } + + private static List readFromCopyFully(PGCopyInputStream in) throws SQLException, IOException { + List chunks = new ArrayList<>(); + byte[] buf; + while ((buf = in.readFromCopy()) != null) { + chunks.add(buf); + } + return chunks; + } + + private static String chunksToString(List chunks) { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + chunks.forEach(chunk -> out.write(chunk, 0, chunk.length)); + return new String(out.toByteArray(), StandardCharsets.UTF_8); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/UUIDTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/UUIDTest.java new file mode 100644 index 0000000..05d834c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/UUIDTest.java @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLState; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collection; +import java.util.UUID; + +@RunWith(Parameterized.class) +public class UUIDTest extends BaseTest4 { + + public UUIDTest(BinaryMode binaryMode, StringType stringType) { + setBinaryMode(binaryMode); + setStringType(stringType); + } + + @Parameterized.Parameters(name = "binary={0}, stringType={1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + for (StringType stringType : StringType.values()) { + ids.add(new Object[]{binaryMode, stringType}); + } + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeMinimumServerVersion(ServerVersion.v8_3); + + Statement stmt = con.createStatement(); + stmt.execute("CREATE TEMP TABLE uuidtest(id uuid)"); + stmt.close(); + } + + @Override + public void tearDown() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("DROP TABLE IF EXISTS uuidtest"); + stmt.close(); + super.tearDown(); + } + + @Test + public void testUUID() throws SQLException { + UUID uuid = UUID.randomUUID(); + PreparedStatement ps = con.prepareStatement("INSERT INTO uuidtest VALUES (?)"); + ps.setObject(1, uuid, Types.OTHER); + ps.executeUpdate(); + ps.close(); + + Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT id FROM uuidtest"); + assertTrue(rs.next()); + + UUID uuid2 = (UUID) rs.getObject(1); + assertEquals(uuid, rs.getObject(1)); + assertEquals(uuid.toString(), rs.getString(1)); + + rs.close(); + stmt.close(); + } + + @Test + public void testUUIDString() throws SQLException { + String uuid = "0dcdf03a-058c-4fa3-b210-8385cb6810d5"; + PreparedStatement ps = con.prepareStatement("INSERT INTO uuidtest VALUES (?)"); + ps.setString(1, uuid); + try { + ps.executeUpdate(); + if (getStringType() == StringType.VARCHAR && preferQueryMode != PreferQueryMode.SIMPLE) { + Assert.fail( + "setString(, uuid) should fail to insert value into UUID column when stringType=varchar." + + " Expecting error <>"); + } + } catch (SQLException e) { + if (getStringType() == StringType.VARCHAR + && PSQLState.DATATYPE_MISMATCH.getState().equals(e.getSQLState())) { + // The following error is expected in stringType=varchar mode + // ERROR: column "id" is of type uuid but expression is of type character varying + return; + } + throw e; + } finally { + TestUtil.closeQuietly(ps); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/WrapperTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/WrapperTest.java new file mode 100644 index 0000000..4d631f3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/WrapperTest.java @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGConnection; +import org.postgresql.PGStatement; +import org.postgresql.ds.PGSimpleDataSource; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; + +class WrapperTest { + + private Connection conn; + private Statement statement; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + statement = conn.prepareStatement("SELECT 1"); + } + + @AfterEach + void tearDown() throws SQLException { + statement.close(); + TestUtil.closeDB(conn); + } + + /** + * This interface is private, and so cannot be supported by any wrapper. + */ + private interface PrivateInterface { + } + + @Test + void connectionIsWrapperForPrivate() throws SQLException { + assertFalse(conn.isWrapperFor(PrivateInterface.class)); + } + + @Test + void connectionIsWrapperForConnection() throws SQLException { + assertTrue(conn.isWrapperFor(Connection.class)); + } + + @Test + void connectionIsWrapperForPGConnection() throws SQLException { + assertTrue(conn.isWrapperFor(PGConnection.class)); + } + + @Test + void connectionUnwrapPrivate() throws SQLException { + try { + conn.unwrap(PrivateInterface.class); + fail("unwrap of non-wrapped interface should fail"); + } catch (SQLException e) { + } + } + + @Test + void connectionUnwrapConnection() throws SQLException { + Object v = conn.unwrap(Connection.class); + assertNotNull(v); + assertTrue(v instanceof Connection, "connection.unwrap(PGConnection.class) should return PGConnection instance" + + ", actual instance is " + v); + } + + @Test + void connectionUnwrapPGConnection() throws SQLException { + Object v = conn.unwrap(PGConnection.class); + assertNotNull(v); + assertTrue(v instanceof PGConnection, "connection.unwrap(PGConnection.class) should return PGConnection instance" + + ", actual instance is " + v); + } + + @Test + void connectionUnwrapPGDataSource() throws SQLException { + PGSimpleDataSource dataSource = new PGSimpleDataSource(); + dataSource.setDatabaseName(TestUtil.getDatabase()); + dataSource.setServerName(TestUtil.getServer()); + dataSource.setPortNumber(TestUtil.getPort()); + Connection connection = dataSource.getConnection(TestUtil.getUser(), TestUtil.getPassword()); + assertNotNull(connection, "Unable to obtain a connection from PGSimpleDataSource"); + Object v = connection.unwrap(PGConnection.class); + assertTrue(v instanceof PGConnection, + "connection.unwrap(PGConnection.class) should return PGConnection instance" + + ", actual instance is " + v); + } + + @Test + void statementIsWrapperForPrivate() throws SQLException { + assertFalse(statement.isWrapperFor(PrivateInterface.class), "Should not be a wrapper for PrivateInterface"); + } + + @Test + void statementIsWrapperForStatement() throws SQLException { + assertTrue(statement.isWrapperFor(Statement.class), "Should be a wrapper for Statement"); + } + + @Test + void statementIsWrapperForPGStatement() throws SQLException { + assertTrue(statement.isWrapperFor(PGStatement.class), "Should be a wrapper for PGStatement"); + } + + @Test + void statementUnwrapPrivate() throws SQLException { + try { + statement.unwrap(PrivateInterface.class); + fail("unwrap of non-wrapped interface should fail"); + } catch (SQLException e) { + } + } + + @Test + void statementUnwrapStatement() throws SQLException { + Object v = statement.unwrap(Statement.class); + assertNotNull(v); + assertTrue(v instanceof Statement, "Should be instance of Statement, actual instance of " + v); + } + + @Test + void statementUnwrapPGStatement() throws SQLException { + Object v = statement.unwrap(PGStatement.class); + assertNotNull(v); + assertTrue(v instanceof PGStatement, "Should be instance of PGStatement, actual instance of " + v); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/XmlTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/XmlTest.java new file mode 100644 index 0000000..74a4925 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/XmlTest.java @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2008, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; +import org.w3c.dom.Node; + +import java.io.IOException; +import java.io.StringReader; +import java.io.StringWriter; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Types; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import javax.xml.transform.ErrorListener; +import javax.xml.transform.Result; +import javax.xml.transform.Source; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMResult; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.sax.SAXResult; +import javax.xml.transform.sax.SAXSource; +import javax.xml.transform.stax.StAXResult; +import javax.xml.transform.stax.StAXSource; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; + +public class XmlTest extends BaseTest4 { + private static final String _xsl = + "B"; + private static final String _xmlDocument = "12"; + private static final String _xmlFragment = "fg"; + + private final Transformer xslTransformer; + private final Transformer identityTransformer; + + public XmlTest() throws Exception { + TransformerFactory factory = TransformerFactory.newInstance(); + xslTransformer = factory.newTransformer(new StreamSource(new StringReader(_xsl))); + xslTransformer.setErrorListener(new Ignorer()); + identityTransformer = factory.newTransformer(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeMinimumServerVersion(ServerVersion.v8_3); + assumeTrue("Server has been compiled --with-libxml", isXmlEnabled(con)); + + Statement stmt = con.createStatement(); + stmt.execute("CREATE TEMP TABLE xmltest(id int primary key, val xml)"); + stmt.execute("INSERT INTO xmltest VALUES (1, '" + _xmlDocument + "')"); + stmt.execute("INSERT INTO xmltest VALUES (2, '" + _xmlFragment + "')"); + stmt.close(); + } + + private static boolean isXmlEnabled(Connection conn) { + try { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT 'b'::xml"); + rs.close(); + stmt.close(); + return true; + } catch (SQLException sqle) { + return false; + } + } + + @Override + public void tearDown() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("DROP TABLE IF EXISTS xmltest"); + stmt.close(); + super.tearDown(); + } + + private ResultSet getRS() throws SQLException { + Statement stmt = con.createStatement(); + return stmt.executeQuery("SELECT val FROM xmltest"); + } + + @Test + public void testUpdateRS() throws SQLException { + Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE); + ResultSet rs = stmt.executeQuery("SELECT id, val FROM xmltest"); + assertTrue(rs.next()); + SQLXML xml = rs.getSQLXML(2); + rs.updateSQLXML(2, xml); + rs.updateRow(); + } + + @Test + public void testDOMParse() throws SQLException { + ResultSet rs = getRS(); + + assertTrue(rs.next()); + SQLXML xml = rs.getSQLXML(1); + DOMSource source = xml.getSource(DOMSource.class); + Node doc = source.getNode(); + Node root = doc.getFirstChild(); + assertEquals("a", root.getNodeName()); + Node first = root.getFirstChild(); + assertEquals("b", first.getNodeName()); + assertEquals("1", first.getTextContent()); + Node last = root.getLastChild(); + assertEquals("b", last.getNodeName()); + assertEquals("2", last.getTextContent()); + + assertTrue(rs.next()); + try { + xml = rs.getSQLXML(1); + source = xml.getSource(DOMSource.class); + fail("Can't retrieve a fragment."); + } catch (SQLException sqle) { + } + } + + private void transform(Source source) throws Exception { + StringWriter writer = new StringWriter(); + StreamResult result = new StreamResult(writer); + xslTransformer.transform(source, result); + assertEquals("B1B2", writer.toString()); + } + + private void testRead(Class sourceClass) throws Exception { + ResultSet rs = getRS(); + + assertTrue(rs.next()); + SQLXML xml = rs.getSQLXML(1); + Source source = xml.getSource(sourceClass); + transform(source); + + assertTrue(rs.next()); + xml = rs.getSQLXML(1); + try { + source = xml.getSource(sourceClass); + transform(source); + fail("Can't transform a fragment."); + } catch (Exception sqle) { + } + } + + @Test + public void testDOMRead() throws Exception { + testRead(DOMSource.class); + } + + @Test + public void testSAXRead() throws Exception { + testRead(SAXSource.class); + } + + @Test + public void testStAXRead() throws Exception { + testRead(StAXSource.class); + } + + @Test + public void testStreamRead() throws Exception { + testRead(StreamSource.class); + } + + private void testWrite(Class resultClass) throws Exception { + Statement stmt = con.createStatement(); + stmt.execute("DELETE FROM xmltest"); + stmt.close(); + + PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)"); + SQLXML xml = con.createSQLXML(); + Result result = xml.setResult(resultClass); + + Source source = new StreamSource(new StringReader(_xmlDocument)); + identityTransformer.transform(source, result); + + ps.setInt(1, 1); + ps.setSQLXML(2, xml); + ps.executeUpdate(); + ps.close(); + + ResultSet rs = getRS(); + assertTrue(rs.next()); + + // DOMResults tack on the additional header. + // + String header = ""; + if (DOMResult.class.equals(resultClass)) { + header = ""; + } + + assertEquals(header + _xmlDocument, rs.getString(1)); + xml = rs.getSQLXML(1); + assertEquals(header + _xmlDocument, xml.getString()); + + assertTrue(!rs.next()); + } + + @Test + public void testDomWrite() throws Exception { + testWrite(DOMResult.class); + } + + @Test + public void testStAXWrite() throws Exception { + testWrite(StAXResult.class); + } + + @Test + public void testStreamWrite() throws Exception { + testWrite(StreamResult.class); + } + + @Test + public void testSAXWrite() throws Exception { + testWrite(SAXResult.class); + } + + @Test + public void testFree() throws SQLException { + ResultSet rs = getRS(); + assertTrue(rs.next()); + SQLXML xml = rs.getSQLXML(1); + xml.free(); + xml.free(); + try { + xml.getString(); + fail("Not freed."); + } catch (SQLException sqle) { + } + } + + @Test + public void testGetObject() throws SQLException { + ResultSet rs = getRS(); + assertTrue(rs.next()); + SQLXML xml = (SQLXML) rs.getObject(1); + } + + private SQLXML newConsumableSQLXML(String content) throws Exception { + SQLXML xml = (SQLXML) Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{SQLXML.class}, new InvocationHandler() { + SQLXML xml = con.createSQLXML(); + boolean consumed = false; + Set consumingMethods = new HashSet<>(Arrays.asList( + SQLXML.class.getMethod("getBinaryStream"), + SQLXML.class.getMethod("getCharacterStream"), + SQLXML.class.getMethod("getString") + )); + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + if (consumingMethods.contains(method)) { + if (consumed) { + fail("SQLXML-object already consumed"); + } else { + consumed = true; + } + } + return method.invoke(xml, args); + } + }); + xml.setString(content); + return xml; + } + + @Test + public void testSet() throws Exception { + Statement stmt = con.createStatement(); + stmt.execute("DELETE FROM xmltest"); + stmt.close(); + + PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)"); + ps.setInt(1, 1); + ps.setSQLXML(2, newConsumableSQLXML(_xmlDocument)); + assertEquals(1, ps.executeUpdate()); + ps.setInt(1, 2); + ps.setObject(2, newConsumableSQLXML(_xmlDocument)); + assertEquals(1, ps.executeUpdate()); + ResultSet rs = getRS(); + assertTrue(rs.next()); + Object o = rs.getObject(1); + assertTrue(o instanceof SQLXML); + assertEquals(_xmlDocument, ((SQLXML) o).getString()); + assertTrue(rs.next()); + assertEquals(_xmlDocument, rs.getSQLXML(1).getString()); + assertTrue(!rs.next()); + } + + @Test + public void testSetNull() throws SQLException { + Statement stmt = con.createStatement(); + stmt.execute("DELETE FROM xmltest"); + stmt.close(); + + PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)"); + ps.setInt(1, 1); + ps.setNull(2, Types.SQLXML); + ps.executeUpdate(); + ps.setInt(1, 2); + ps.setObject(2, null, Types.SQLXML); + ps.executeUpdate(); + SQLXML xml = con.createSQLXML(); + xml.setString(null); + ps.setInt(1, 3); + ps.setObject(2, xml); + ps.executeUpdate(); + ps.close(); + + ResultSet rs = getRS(); + assertTrue(rs.next()); + assertNull(rs.getObject(1)); + assertTrue(rs.next()); + assertNull(rs.getSQLXML(1)); + assertTrue(rs.next()); + assertNull(rs.getSQLXML("val")); + assertTrue(!rs.next()); + } + + @Test + public void testEmpty() throws SQLException, IOException { + SQLXML xml = con.createSQLXML(); + + try { + xml.getString(); + fail("Cannot retrieve data from an uninitialized object."); + } catch (SQLException sqle) { + } + + try { + xml.getSource(null); + fail("Cannot retrieve data from an uninitialized object."); + } catch (SQLException sqle) { + } + } + + @Test + public void testDoubleSet() throws SQLException { + SQLXML xml = con.createSQLXML(); + + xml.setString(""); + + try { + xml.setString(""); + fail("Can't set a value after its been initialized."); + } catch (SQLException sqle) { + } + + ResultSet rs = getRS(); + assertTrue(rs.next()); + xml = rs.getSQLXML(1); + try { + xml.setString(""); + fail("Can't set a value after its been initialized."); + } catch (SQLException sqle) { + } + } + + // Don't print warning and errors to System.err, it just + // clutters the display. + static class Ignorer implements ErrorListener { + @Override + public void error(TransformerException t) { + } + + @Override + public void fatalError(TransformerException t) { + } + + @Override + public void warning(TransformerException t) { + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/AbortTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/AbortTest.java new file mode 100644 index 0000000..e619cd9 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/AbortTest.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2010, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4.jdbc41; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; + +import java.sql.SQLException; +import java.sql.Statement; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +public class AbortTest extends BaseTest4 { + + private static final int SLEEP_SECONDS = 30; + private static final int SLEEP_MILLISECONDS = SLEEP_SECONDS * 1000; + + @Test + public void testAbort() throws SQLException, InterruptedException, ExecutionException { + final ExecutorService executor = Executors.newFixedThreadPool(2); + long startTime = System.currentTimeMillis(); + Future workerFuture = executor.submit(new Callable() { + public SQLException call() { + try { + Statement stmt = con.createStatement(); + stmt.execute("SELECT pg_sleep(" + SLEEP_SECONDS + ")"); + } catch (SQLException e) { + return e; + } + return null; + } + }); + Future abortFuture = executor.submit(new Callable() { + public SQLException call() { + ExecutorService abortExecutor = Executors.newSingleThreadExecutor(); + try { + con.abort(abortExecutor); + } catch (SQLException e) { + return e; + } + abortExecutor.shutdown(); + try { + abortExecutor.awaitTermination(SLEEP_SECONDS, TimeUnit.SECONDS); + } catch (InterruptedException e) { + } + return null; + } + }); + SQLException workerException = workerFuture.get(); + long endTime = System.currentTimeMillis(); + SQLException abortException = abortFuture.get(); + if (abortException != null) { + throw abortException; + } + if (workerException == null) { + fail("Statement execution should have been aborted, thus throwing an exception"); + } + // suppose that if it took at least 95% of sleep time, aborting has failed and we've waited the + // full time + assertTrue(endTime - startTime < SLEEP_MILLISECONDS * 95 / 100); + assertTrue(con.isClosed()); + } + + /** + * According to the javadoc, calling abort on a closed connection is a no-op. + */ + @Test + public void testAbortOnClosedConnection() throws SQLException { + con.close(); + try { + con.abort(Executors.newSingleThreadExecutor()); + } catch (SQLException e) { + fail(e.getMessage()); + } + } + + /** + * According to the javadoc, calling abort when the {@code executor} is {@code null} + * results in SQLException + */ + @Test(expected = SQLException.class) + public void abortWithNullExecutor() throws SQLException { + con.abort(null); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/CloseOnCompletionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/CloseOnCompletionTest.java new file mode 100644 index 0000000..b6ce2aa --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/CloseOnCompletionTest.java @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4.jdbc41; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +class CloseOnCompletionTest { + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createTable(conn, "table1", "id integer"); + } + + @AfterEach + void tearDown() throws SQLException { + TestUtil.dropTable(conn, "table1"); + TestUtil.closeDB(conn); + } + + /** + * Test that the statement is not automatically closed if we do not ask for it. + */ + @Test + void withoutCloseOnCompletion() throws SQLException { + Statement stmt = conn.createStatement(); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "*")); + rs.close(); + assertFalse(stmt.isClosed()); + } + + /** + * Test the behavior of closeOnCompletion with a single result set. + */ + @Test + void singleResultSet() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.closeOnCompletion(); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "*")); + rs.close(); + assertTrue(stmt.isClosed()); + } + + /** + * Test the behavior of closeOnCompletion with a multiple result sets. + */ + @Test + void multipleResultSet() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.closeOnCompletion(); + + stmt.execute(TestUtil.selectSQL("table1", "*") + ";" + TestUtil.selectSQL("table1", "*") + ";"); + ResultSet rs = stmt.getResultSet(); + rs.close(); + assertFalse(stmt.isClosed()); + stmt.getMoreResults(); + rs = stmt.getResultSet(); + rs.close(); + assertTrue(stmt.isClosed()); + } + + /** + * Test that when execution does not produce any result sets, closeOnCompletion has no effect + * (spec). + */ + @Test + void noResultSet() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.closeOnCompletion(); + + stmt.executeUpdate(TestUtil.insertSQL("table1", "1")); + assertFalse(stmt.isClosed()); + } + + @Test + void executeTwice() throws SQLException { + PreparedStatement s = conn.prepareStatement("SELECT 1"); + + s.executeQuery(); + s.executeQuery(); + + } + + @Test + void closeOnCompletionExecuteTwice() throws SQLException { + PreparedStatement s = conn.prepareStatement("SELECT 1"); + + /* + once we set close on completion we should only be able to execute one as the second execution + will close the resultsets from the first one which will close the statement. + */ + + s.closeOnCompletion(); + s.executeQuery(); + try { + s.executeQuery(); + } catch (SQLException ex) { + assertEquals(PSQLState.OBJECT_NOT_IN_STATE.getState(), ex.getSQLState(), "Expecting <>"); + } + + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/DriverSupportsClassUnloadingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/DriverSupportsClassUnloadingTest.java new file mode 100644 index 0000000..b1ac622 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/DriverSupportsClassUnloadingTest.java @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4.jdbc41; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.Driver; +import org.postgresql.jdbc.PgConnection; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.runner.RunWith; +import se.jiderhamn.classloader.PackagesLoadedOutsideClassLoader; +import se.jiderhamn.classloader.leak.JUnitClassloaderRunner; +import se.jiderhamn.classloader.leak.LeakPreventor; +import se.jiderhamn.classloader.leak.Leaks; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Types; + +@RunWith(JUnitClassloaderRunner.class) +@LeakPreventor(DriverSupportsClassUnloadingTest.LeakPreventor.class) +@PackagesLoadedOutsideClassLoader( + packages = {"java.", "javax.", "jdk.", "com.sun.", "sun.", "org.w3c", "org.junit.", "junit.", + "se.jiderhamn."} +) +class DriverSupportsClassUnloadingTest { + // See https://github.com/mjiderhamn/classloader-leak-prevention/tree/master/classloader-leak-test-framework#verifying-prevention-measures + public static class LeakPreventor implements Runnable { + @Override + public void run() { + try { + if (Driver.isRegistered()) { + Driver.deregister(); + } + for (int i = 0; i < 3; i++) { + // Allow cleanup thread to detect and close the leaked connection + JUnitClassloaderRunner.forceGc(); + // JUnitClassloaderRunner uses finalizers + System.runFinalization(); + } + // Allow for the cleanup thread to terminate + Thread.sleep(2000); + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + } + + @BeforeAll + static void setSmallCleanupThreadTtl() { + // Make the tests faster + System.setProperty("pgjdbc.config.cleanup.thread.ttl", "100"); + } + + @AfterAll + static void resetCleanupThreadTtl() { + System.clearProperty("pgjdbc.config.cleanup.thread.ttl"); + } + + @Test + @Leaks(dumpHeapOnError = true) + void driverUnloadsWhenConnectionLeaks() throws SQLException, InterruptedException { + if (!Driver.isRegistered()) { + Driver.register(); + } + // This code intentionally leaks connection, prepared statement to verify if the classes + // will still be able to unload + Connection con = TestUtil.openDB(); + PreparedStatement ps = con.prepareStatement("select 1 c1, 'hello' c2"); + // TODO: getMetaData throws AssertionError, however, it should probably not + if (con.unwrap(PgConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) { + ResultSetMetaData md = ps.getMetaData(); + assertEquals( + Types.INTEGER, + md.getColumnType(1), + ".getColumnType for column 1 c1 should be INTEGER" + ); + } + + // This is to trigger "query timeout" code to increase the chances for memory leaks + ps.setQueryTimeout(1000); + ResultSet rs = ps.executeQuery(); + rs.next(); + assertEquals(1, rs.getInt(1), ".getInt for column c1"); + } + + @Test + @Leaks(dumpHeapOnError = true) + void driverUnloadsWhenConnectionClosedExplicitly() throws SQLException { + if (!Driver.isRegistered()) { + Driver.register(); + } + // This code intentionally leaks connection, prepared statement to verify if the classes + // will still be able to unload + try (Connection con = TestUtil.openDB()) { + try (PreparedStatement ps = con.prepareStatement("select 1 c1, 'hello' c2")) { + // TODO: getMetaData throws AssertionError, however, it should probably not + if (con.unwrap(PgConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) { + ResultSetMetaData md = ps.getMetaData(); + assertEquals( + Types.INTEGER, + md.getColumnType(1), + ".getColumnType for column 1 c1 should be INTEGER" + ); + } + + // This is to trigger "query timeout" code to increase the chances for memory leaks + ps.setQueryTimeout(1000); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + assertEquals(1, rs.getInt(1), ".getInt for column c1"); + } + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/GetObjectTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/GetObjectTest.java new file mode 100644 index 0000000..e3ad502 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/GetObjectTest.java @@ -0,0 +1,1016 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4.jdbc41; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.BaseConnection; +import org.postgresql.core.ServerVersion; +import org.postgresql.geometric.PGbox; +import org.postgresql.geometric.PGcircle; +import org.postgresql.geometric.PGline; +import org.postgresql.geometric.PGlseg; +import org.postgresql.geometric.PGpath; +import org.postgresql.geometric.PGpoint; +import org.postgresql.geometric.PGpolygon; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PGInterval; +import org.postgresql.util.PGmoney; +import org.postgresql.util.PGobject; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.TimeZone; +import java.util.UUID; + +import javax.sql.rowset.serial.SerialBlob; +import javax.sql.rowset.serial.SerialClob; + +class GetObjectTest { + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); // +0000 always + private static final TimeZone GMT03 = TimeZone.getTimeZone("GMT+03"); // +0300 always + private static final TimeZone GMT05 = TimeZone.getTimeZone("GMT-05"); // -0500 always + private static final TimeZone GMT13 = TimeZone.getTimeZone("GMT+13"); // +1300 always + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createTable(conn, "table1", "varchar_column varchar(16), " + + "char_column char(10), " + + "boolean_column boolean," + + "smallint_column smallint," + + "integer_column integer," + + "bigint_column bigint," + + "decimal_column decimal," + + "numeric_column numeric," + // smallserial requires 9.2 or later + + (((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_2) ? "smallserial_column smallserial," : "") + + "serial_column serial," + + "bigserial_column bigserial," + + "real_column real," + + "double_column double precision," + + "timestamp_without_time_zone_column timestamp without time zone," + + "timestamp_with_time_zone_column timestamp with time zone," + + "date_column date," + + "time_without_time_zone_column time without time zone," + + "time_with_time_zone_column time with time zone," + + "blob_column bytea," + + "lob_column oid," + + "array_column text[]," + + "point_column point," + + "line_column line," + + "lseg_column lseg," + + "box_column box," + + "path_column path," + + "polygon_column polygon," + + "circle_column circle," + + "money_column money," + + "interval_column interval," + + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? "uuid_column uuid," : "") + + "inet_column inet," + + "cidr_column cidr," + + "macaddr_column macaddr" + + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? ",xml_column xml" : "") + ); + } + + @AfterEach + void tearDown() throws SQLException { + TestUtil.dropTable(conn, "table1"); + TestUtil.closeDB(conn); + } + + /** + * Test the behavior getObject for string columns. + */ + @Test + void getString() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "varchar_column,char_column", "'varchar_value','char_value'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "varchar_column, char_column")); + try { + assertTrue(rs.next()); + assertEquals("varchar_value", rs.getObject("varchar_column", String.class)); + assertEquals("varchar_value", rs.getObject(1, String.class)); + assertEquals("char_value", rs.getObject("char_column", String.class)); + assertEquals("char_value", rs.getObject(2, String.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for string columns. + */ + @Test + void getClob() throws SQLException { + Statement stmt = conn.createStatement(); + conn.setAutoCommit(false); + try { + char[] data = new char[]{'d', 'e', 'a', 'd', 'b', 'e', 'e', 'f'}; + PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("table1", "lob_column", "?")); + try { + insertPS.setObject(1, new SerialClob(data), Types.CLOB); + insertPS.executeUpdate(); + } finally { + insertPS.close(); + } + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lob_column")); + try { + assertTrue(rs.next()); + Clob blob = rs.getObject("lob_column", Clob.class); + assertEquals(data.length, blob.length()); + assertEquals(new String(data), blob.getSubString(1, data.length)); + blob.free(); + + blob = rs.getObject(1, Clob.class); + assertEquals(data.length, blob.length()); + assertEquals(new String(data), blob.getSubString(1, data.length)); + blob.free(); + } finally { + rs.close(); + } + } finally { + conn.setAutoCommit(true); + } + } + + /** + * Test the behavior getObject for big decimal columns. + */ + @Test + void getBigDecimal() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "decimal_column,numeric_column", "0.1,0.1")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "decimal_column, numeric_column")); + try { + assertTrue(rs.next()); + assertEquals(new BigDecimal("0.1"), rs.getObject("decimal_column", BigDecimal.class)); + assertEquals(new BigDecimal("0.1"), rs.getObject(1, BigDecimal.class)); + assertEquals(new BigDecimal("0.1"), rs.getObject("numeric_column", BigDecimal.class)); + assertEquals(new BigDecimal("0.1"), rs.getObject(2, BigDecimal.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for timestamp columns. + */ + @Test + void getTimestamp() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "TIMESTAMP '2004-10-19 10:23:54'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column")); + try { + assertTrue(rs.next()); + Calendar calendar = GregorianCalendar.getInstance(); + calendar.clear(); + calendar.set(Calendar.YEAR, 2004); + calendar.set(Calendar.MONTH, Calendar.OCTOBER); + calendar.set(Calendar.DAY_OF_MONTH, 19); + calendar.set(Calendar.HOUR_OF_DAY, 10); + calendar.set(Calendar.MINUTE, 23); + calendar.set(Calendar.SECOND, 54); + Timestamp expectedNoZone = new Timestamp(calendar.getTimeInMillis()); + assertEquals(expectedNoZone, rs.getObject("timestamp_without_time_zone_column", Timestamp.class)); + assertEquals(expectedNoZone, rs.getObject(1, Timestamp.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for timestamp columns. + */ + @Test + void getJavaUtilDate() throws SQLException { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("select TIMESTAMP '2004-10-19 10:23:54'::timestamp as timestamp_without_time_zone_column" + + ", null::timestamp as null_timestamp"); + try { + assertTrue(rs.next()); + Calendar calendar = GregorianCalendar.getInstance(); + calendar.clear(); + calendar.set(Calendar.YEAR, 2004); + calendar.set(Calendar.MONTH, Calendar.OCTOBER); + calendar.set(Calendar.DAY_OF_MONTH, 19); + calendar.set(Calendar.HOUR_OF_DAY, 10); + calendar.set(Calendar.MINUTE, 23); + calendar.set(Calendar.SECOND, 54); + java.util.Date expected = new java.util.Date(calendar.getTimeInMillis()); + assertEquals(expected, rs.getObject("timestamp_without_time_zone_column", java.util.Date.class)); + assertEquals(expected, rs.getObject(1, java.util.Date.class)); + assertNull(rs.getObject(2, java.util.Date.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for timestamp columns. + */ + @Test + void getTimestampWithTimeZone() throws SQLException { + runGetTimestampWithTimeZone(UTC, "Z"); + runGetTimestampWithTimeZone(GMT03, "+03:00"); + runGetTimestampWithTimeZone(GMT05, "-05:00"); + runGetTimestampWithTimeZone(GMT13, "+13:00"); + } + + private void runGetTimestampWithTimeZone(TimeZone timeZone, String zoneString) throws SQLException { + Statement stmt = conn.createStatement(); + try { + stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_with_time_zone_column", "TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54" + zoneString + "'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_with_time_zone_column")); + try { + assertTrue(rs.next()); + + Calendar calendar = GregorianCalendar.getInstance(timeZone); + calendar.clear(); + calendar.set(Calendar.YEAR, 2004); + calendar.set(Calendar.MONTH, Calendar.OCTOBER); + calendar.set(Calendar.DAY_OF_MONTH, 19); + calendar.set(Calendar.HOUR_OF_DAY, 10); + calendar.set(Calendar.MINUTE, 23); + calendar.set(Calendar.SECOND, 54); + Timestamp expectedWithZone = new Timestamp(calendar.getTimeInMillis()); + assertEquals(expectedWithZone, rs.getObject("timestamp_with_time_zone_column", Timestamp.class)); + assertEquals(expectedWithZone, rs.getObject(1, Timestamp.class)); + } finally { + rs.close(); + } + stmt.executeUpdate("DELETE FROM table1"); + } finally { + stmt.close(); + } + } + + /** + * Test the behavior getObject for timestamp columns. + */ + @Test + void getCalendar() throws SQLException { + Statement stmt = conn.createStatement(); + + ResultSet rs = stmt.executeQuery("select TIMESTAMP '2004-10-19 10:23:54'::timestamp as timestamp_without_time_zone_column" + + ", TIMESTAMP '2004-10-19 10:23:54+02'::timestamp as timestamp_with_time_zone_column, null::timestamp as null_timestamp"); + try { + assertTrue(rs.next()); + Calendar calendar = GregorianCalendar.getInstance(); + calendar.clear(); + calendar.set(Calendar.YEAR, 2004); + calendar.set(Calendar.MONTH, Calendar.OCTOBER); + calendar.set(Calendar.DAY_OF_MONTH, 19); + calendar.set(Calendar.HOUR_OF_DAY, 10); + calendar.set(Calendar.MINUTE, 23); + calendar.set(Calendar.SECOND, 54); + long expected = calendar.getTimeInMillis(); + assertEquals(expected, rs.getObject("timestamp_without_time_zone_column", Calendar.class).getTimeInMillis()); + assertEquals(expected, rs.getObject(1, Calendar.class).getTimeInMillis()); + assertNull(rs.getObject(3, Calendar.class)); + calendar.setTimeZone(TimeZone.getTimeZone("GMT+2:00")); + expected = calendar.getTimeInMillis(); + assertEquals(expected, rs.getObject("timestamp_with_time_zone_column", Calendar.class).getTimeInMillis()); + assertEquals(expected, rs.getObject(2, Calendar.class).getTimeInMillis()); + assertNull(rs.getObject(3, Calendar.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for date columns. + */ + @Test + void getDate() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "DATE '1999-01-08'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column")); + try { + assertTrue(rs.next()); + Calendar calendar = GregorianCalendar.getInstance(); + calendar.clear(); + calendar.set(Calendar.YEAR, 1999); + calendar.set(Calendar.MONTH, Calendar.JANUARY); + calendar.set(Calendar.DAY_OF_MONTH, 8); + Date expectedNoZone = new Date(calendar.getTimeInMillis()); + assertEquals(expectedNoZone, rs.getObject("date_column", Date.class)); + assertEquals(expectedNoZone, rs.getObject(1, Date.class)); + } finally { + rs.close(); + } + } + + @Test + void getNullDate() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column")); + try { + assertTrue(rs.next()); + Date date = rs.getObject(1, Date.class); + assertTrue(rs.wasNull()); + } finally { + rs.close(); + } + } + + @Test + void getNullTimestamp() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column")); + try { + assertTrue(rs.next()); + java.util.Date ts = rs.getObject(1, java.util.Date.class); + assertTrue(rs.wasNull()); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for time columns. + */ + @Test + void getTime() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "TIME '04:05:06'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column")); + try { + assertTrue(rs.next()); + Calendar calendar = GregorianCalendar.getInstance(); + calendar.clear(); + calendar.set(Calendar.YEAR, 1970); + calendar.set(Calendar.MONTH, Calendar.JANUARY); + calendar.set(Calendar.DAY_OF_MONTH, 1); + calendar.set(Calendar.HOUR, 4); + calendar.set(Calendar.MINUTE, 5); + calendar.set(Calendar.SECOND, 6); + Time expectedNoZone = new Time(calendar.getTimeInMillis()); + assertEquals(expectedNoZone, rs.getObject("time_without_time_zone_column", Time.class)); + assertEquals(expectedNoZone, rs.getObject(1, Time.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for small integer columns. + */ + @Test + void getShort() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column", "1")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column")); + try { + assertTrue(rs.next()); + assertEquals(Short.valueOf((short) 1), rs.getObject("smallint_column", Short.class)); + assertEquals(Short.valueOf((short) 1), rs.getObject(1, Short.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for small integer columns. + */ + @Test + void getShortNull() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column")); + try { + assertTrue(rs.next()); + assertNull(rs.getObject("smallint_column", Short.class)); + assertNull(rs.getObject(1, Short.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for integer columns. + */ + @Test + void getInteger() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column, integer_column", "1, 2")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column, integer_column")); + try { + assertTrue(rs.next()); + assertEquals(Integer.valueOf(1), rs.getObject("smallint_column", Integer.class)); + assertEquals(Integer.valueOf(1), rs.getObject(1, Integer.class)); + assertEquals(Integer.valueOf(2), rs.getObject("integer_column", Integer.class)); + assertEquals(Integer.valueOf(2), rs.getObject(2, Integer.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for integer columns. + */ + @Test + void getIntegerNull() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column, integer_column", "NULL, NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column, integer_column")); + try { + assertTrue(rs.next()); + assertNull(rs.getObject("smallint_column", Integer.class)); + assertNull(rs.getObject(1, Integer.class)); + assertNull(rs.getObject("integer_column", Integer.class)); + assertNull(rs.getObject(2, Integer.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for long columns. + */ + @Test + void getBigInteger() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "2147483648")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column")); + try { + assertTrue(rs.next()); + assertEquals(BigInteger.valueOf(2147483648L), rs.getObject("bigint_column", BigInteger.class)); + assertEquals(BigInteger.valueOf(2147483648L), rs.getObject(1, BigInteger.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for long columns. + */ + @Test + void getLong() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "2147483648")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column")); + try { + assertTrue(rs.next()); + assertEquals(Long.valueOf(2147483648L), rs.getObject("bigint_column", Long.class)); + assertEquals(Long.valueOf(2147483648L), rs.getObject(1, Long.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for long columns. + */ + @Test + void getLongNull() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column")); + try { + assertTrue(rs.next()); + assertNull(rs.getObject("bigint_column", Long.class)); + assertNull(rs.getObject(1, Long.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for double columns. + */ + @Test + void getDouble() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "double_column", "1.0")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "double_column")); + try { + assertTrue(rs.next()); + assertEquals(Double.valueOf(1.0d), rs.getObject("double_column", Double.class)); + assertEquals(Double.valueOf(1.0d), rs.getObject(1, Double.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for double columns. + */ + @Test + void getDoubleNull() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "double_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "double_column")); + try { + assertTrue(rs.next()); + assertNull(rs.getObject("double_column", Double.class)); + assertNull(rs.getObject(1, Double.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for float columns. + */ + @Test + void getFloat() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "real_column", "1.0")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "real_column")); + try { + assertTrue(rs.next()); + assertEquals(Float.valueOf(1.0f), rs.getObject("real_column", Float.class)); + assertEquals(Float.valueOf(1.0f), rs.getObject(1, Float.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for float columns. + */ + @Test + void getFloatNull() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "real_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "real_column")); + try { + assertTrue(rs.next()); + assertNull(rs.getObject("real_column", Float.class)); + assertNull(rs.getObject(1, Float.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for serial columns. + */ + @Test + void getSerial() throws SQLException { + if (!((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_2)) { + // smallserial requires 9.2 or later + return; + } + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "smallserial_column, serial_column", "1, 2")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallserial_column, serial_column")); + try { + assertTrue(rs.next()); + assertEquals(Integer.valueOf(1), rs.getObject("smallserial_column", Integer.class)); + assertEquals(Integer.valueOf(1), rs.getObject(1, Integer.class)); + assertEquals(Integer.valueOf(2), rs.getObject("serial_column", Integer.class)); + assertEquals(Integer.valueOf(2), rs.getObject(2, Integer.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for boolean columns. + */ + @Test + void getBoolean() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "boolean_column", "TRUE")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "boolean_column")); + try { + assertTrue(rs.next()); + assertTrue(rs.getObject("boolean_column", Boolean.class)); + assertTrue(rs.getObject(1, Boolean.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for boolean columns. + */ + @Test + void getBooleanNull() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "boolean_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "boolean_column")); + try { + assertTrue(rs.next()); + assertNull(rs.getObject("boolean_column", Boolean.class)); + assertNull(rs.getObject(1, Boolean.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for xml columns. + */ + @Test + void getBlob() throws SQLException { + Statement stmt = conn.createStatement(); + conn.setAutoCommit(false); + try { + byte[] data = new byte[]{(byte) 0xDE, (byte) 0xAD, (byte) 0xBE, (byte) 0xEF}; + PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("table1", "lob_column", "?")); + try { + insertPS.setObject(1, new SerialBlob(data), Types.BLOB); + insertPS.executeUpdate(); + } finally { + insertPS.close(); + } + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lob_column")); + try { + assertTrue(rs.next()); + Blob blob = rs.getObject("lob_column", Blob.class); + assertEquals(data.length, blob.length()); + assertArrayEquals(data, blob.getBytes(1, data.length)); + blob.free(); + + blob = rs.getObject(1, Blob.class); + assertEquals(data.length, blob.length()); + assertArrayEquals(data, blob.getBytes(1, data.length)); + blob.free(); + } finally { + rs.close(); + } + } finally { + conn.setAutoCommit(true); + } + } + + /** + * Test the behavior getObject for array columns. + */ + @Test + void getArray() throws SQLException { + Statement stmt = conn.createStatement(); + String[] data = new String[]{"java", "jdbc"}; + stmt.executeUpdate(TestUtil.insertSQL("table1", "array_column", "'{\"java\", \"jdbc\"}'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "array_column")); + try { + assertTrue(rs.next()); + Array array = rs.getObject("array_column", Array.class); + assertArrayEquals(data, (String[]) array.getArray()); + array.free(); + + array = rs.getObject(1, Array.class); + assertArrayEquals(data, (String[]) array.getArray()); + array.free(); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for xml columns. + */ + @Test + void getXml() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) { + // XML column requires PostgreSQL 8.3+ + return; + } + Statement stmt = conn.createStatement(); + String content = "Manual"; + stmt.executeUpdate(TestUtil.insertSQL("table1", "xml_column", "XMLPARSE (DOCUMENT 'Manual')")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "xml_column")); + try { + assertTrue(rs.next()); + SQLXML sqlXml = rs.getObject("xml_column", SQLXML.class); + assertEquals(content, sqlXml.getString()); + sqlXml.free(); + + sqlXml = rs.getObject(1, SQLXML.class); + assertEquals(content, sqlXml.getString()); + sqlXml.free(); + } finally { + rs.close(); + } + } + + /** + *

Test the behavior getObject for money columns.

+ * + *

The test is ignored as it is locale-dependent.

+ */ + @Disabled + @Test + void getMoney() throws SQLException { + Statement stmt = conn.createStatement(); + String expected = "12.34"; + stmt.executeUpdate(TestUtil.insertSQL("table1", "money_column", "'12.34'::float8::numeric::money")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "money_column")); + try { + assertTrue(rs.next()); + PGmoney money = rs.getObject("money_column", PGmoney.class); + assertTrue(money.getValue().endsWith(expected)); + + money = rs.getObject(1, PGmoney.class); + assertTrue(money.getValue().endsWith(expected)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for point columns. + */ + @Test + void getPoint() throws SQLException { + Statement stmt = conn.createStatement(); + PGpoint expected = new PGpoint(1.0d, 2.0d); + stmt.executeUpdate(TestUtil.insertSQL("table1", "point_column", "point '(1, 2)'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "point_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("point_column", PGpoint.class)); + assertEquals(expected, rs.getObject(1, PGpoint.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for line columns. + */ + @Test + void getLine() throws SQLException { + if (!((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_4)) { + // only 9.4 and later ship with full line support by default + return; + } + + Statement stmt = conn.createStatement(); + PGline expected = new PGline(1.0d, 2.0d, 3.0d); + stmt.executeUpdate(TestUtil.insertSQL("table1", "line_column", "line '{1, 2, 3}'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "line_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("line_column", PGline.class)); + assertEquals(expected, rs.getObject(1, PGline.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for lseg columns. + */ + @Test + void getLineseg() throws SQLException { + Statement stmt = conn.createStatement(); + PGlseg expected = new PGlseg(1.0d, 2.0d, 3.0d, 4.0d); + stmt.executeUpdate(TestUtil.insertSQL("table1", "lseg_column", "lseg '[(1, 2), (3, 4)]'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lseg_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("lseg_column", PGlseg.class)); + assertEquals(expected, rs.getObject(1, PGlseg.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for box columns. + */ + @Test + void getBox() throws SQLException { + Statement stmt = conn.createStatement(); + PGbox expected = new PGbox(1.0d, 2.0d, 3.0d, 4.0d); + stmt.executeUpdate(TestUtil.insertSQL("table1", "box_column", "box '((1, 2), (3, 4))'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "box_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("box_column", PGbox.class)); + assertEquals(expected, rs.getObject(1, PGbox.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for path columns. + */ + @Test + void getPath() throws SQLException { + Statement stmt = conn.createStatement(); + PGpath expected = new PGpath(new PGpoint[]{new PGpoint(1.0d, 2.0d), new PGpoint(3.0d, 4.0d)}, true); + stmt.executeUpdate(TestUtil.insertSQL("table1", "path_column", "path '[(1, 2), (3, 4)]'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "path_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("path_column", PGpath.class)); + assertEquals(expected, rs.getObject(1, PGpath.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for polygon columns. + */ + @Test + void getPolygon() throws SQLException { + Statement stmt = conn.createStatement(); + PGpolygon expected = new PGpolygon(new PGpoint[]{new PGpoint(1.0d, 2.0d), new PGpoint(3.0d, 4.0d)}); + stmt.executeUpdate(TestUtil.insertSQL("table1", "polygon_column", "polygon '((1, 2), (3, 4))'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "polygon_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("polygon_column", PGpolygon.class)); + assertEquals(expected, rs.getObject(1, PGpolygon.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for circle columns. + */ + @Test + void getCircle() throws SQLException { + Statement stmt = conn.createStatement(); + PGcircle expected = new PGcircle(1.0d, 2.0d, 3.0d); + stmt.executeUpdate(TestUtil.insertSQL("table1", "circle_column", "circle '<(1, 2), 3>'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "circle_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("circle_column", PGcircle.class)); + assertEquals(expected, rs.getObject(1, PGcircle.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for interval columns. + */ + @Test + void getInterval() throws SQLException { + Statement stmt = conn.createStatement(); + PGInterval expected = new PGInterval(0, 0, 3, 4, 5, 6.0d); + stmt.executeUpdate(TestUtil.insertSQL("table1", "interval_column", "interval '3 4:05:06'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "interval_column")); + try { + assertTrue(rs.next()); + assertEquals(expected, rs.getObject("interval_column", PGInterval.class)); + assertEquals(expected, rs.getObject(1, PGInterval.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for uuid columns. + */ + @Test + void getUuid() throws SQLException { + if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) { + // UUID requires PostgreSQL 8.3+ + return; + } + Statement stmt = conn.createStatement(); + String expected = "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11"; + stmt.executeUpdate(TestUtil.insertSQL("table1", "uuid_column", "'" + expected + "'")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "uuid_column")); + try { + assertTrue(rs.next()); + assertEquals(UUID.fromString(expected), rs.getObject("uuid_column", UUID.class)); + assertEquals(UUID.fromString(expected), rs.getObject(1, UUID.class)); + } finally { + rs.close(); + } + } + + /** + * Test the behavior getObject for inet columns. + */ + @Test + void getInetAddressNull() throws SQLException, UnknownHostException { + Statement stmt = conn.createStatement(); + stmt.executeUpdate(TestUtil.insertSQL("table1", "inet_column", "NULL")); + + ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "inet_column")); + try { + assertTrue(rs.next()); + assertNull(rs.getObject("inet_column", InetAddress.class)); + assertNull(rs.getObject(1, InetAddress.class)); + } finally { + rs.close(); + } + } + + private void testInet(String inet, InetAddress expectedAddr, String expectedText) throws SQLException, UnknownHostException { + PGobject expectedObj = new PGobject(); + expectedObj.setType("inet"); + expectedObj.setValue(expectedText); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '" + inet + "'::inet AS inet_column"); + try { + assertTrue(rs.next()); + assertEquals(expectedText, rs.getString(1), "The string value of the inet should match when fetched via getString(...)"); + assertEquals(expectedText, rs.getString("inet_column"), "The string value of the inet should match when fetched via getString(...)"); + assertEquals(expectedObj, rs.getObject(1), "The object value of the inet should match when fetched via getObject(...)"); + assertEquals(expectedObj, rs.getObject("inet_column"), "The object value of the inet should match when fetched via getObject(...)"); + assertEquals(expectedAddr, rs.getObject("inet_column", InetAddress.class), "The InetAddress value should match when fetched via getObject(..., InetAddress.class)"); + assertEquals(expectedAddr, rs.getObject(1, InetAddress.class), "The InetAddress value should match when fetched via getObject(..., InetAddress.class)"); + } finally { + rs.close(); + stmt.close(); + } + } + + /** + * Test the behavior getObject for ipv4 inet columns. + */ + @Test + void getInet4Address() throws SQLException, UnknownHostException { + String inet = "192.168.100.128"; + InetAddress addr = InetAddress.getByName(inet); + testInet(inet, addr, inet); + testInet(inet + "/16", addr, inet + "/16"); + testInet(inet + "/32", addr, inet); + } + + /** + * Test the behavior getObject for ipv6 inet columns. + */ + @Test + void getInet6Address() throws SQLException, UnknownHostException { + String inet = "2001:4f8:3:ba:2e0:81ff:fe22:d1f1"; + InetAddress addr = InetAddress.getByName(inet); + testInet(inet, addr, inet); + testInet(inet + "/16", addr, inet + "/16"); + testInet(inet + "/128", addr, inet); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/Jdbc41TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/Jdbc41TestSuite.java new file mode 100644 index 0000000..921e5a4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/Jdbc41TestSuite.java @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4.jdbc41; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/* + * Executes all known tests for JDBC4.1 + */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + AbortTest.class, + CloseOnCompletionTest.class, + GetObjectTest.class, + NetworkTimeoutTest.class, + SchemaTest.class, +}) +public class Jdbc41TestSuite { + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/NetworkTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/NetworkTimeoutTest.java new file mode 100644 index 0000000..24fed8d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/NetworkTimeoutTest.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4.jdbc41; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.concurrent.TimeUnit; + +class NetworkTimeoutTest { + @Test + void setNetworkTimeout() throws Exception { + Connection conn = TestUtil.openDB(); + assertDoesNotThrow(() -> { + conn.setNetworkTimeout(null, 0); + }, "Connection.setNetworkTimeout() throw exception"); + } + + @Test + void setNetworkTimeoutInvalid() throws Exception { + Connection conn = TestUtil.openDB(); + try { + conn.setNetworkTimeout(null, -1); + fail("Connection.setNetworkTimeout() did not throw expected exception"); + } catch (SQLException e) { + // Passed + } finally { + TestUtil.closeDB(conn); + } + } + + @Test + void setNetworkTimeoutValid() throws Exception { + Connection conn = TestUtil.openDB(); + assertDoesNotThrow(() -> { + conn.setNetworkTimeout(null, (int) TimeUnit.SECONDS.toMillis(5)); + assertEquals(TimeUnit.SECONDS.toMillis(5), conn.getNetworkTimeout()); + }, "Connection.setNetworkTimeout() throw exception"); + } + + @Test + void setNetworkTimeoutEnforcement() throws Exception { + Connection conn = TestUtil.openDB(); + Statement stmt = null; + try { + conn.setNetworkTimeout(null, (int) TimeUnit.SECONDS.toMillis(1)); + stmt = conn.createStatement(); + stmt.execute("SELECT pg_sleep(2)"); + fail("Connection.setNetworkTimeout() did not throw expected exception"); + } catch (SQLException e) { + // assertTrue(stmt.isClosed()); + assertTrue(conn.isClosed()); + } finally { + TestUtil.closeQuietly(stmt); + TestUtil.closeDB(conn); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/SchemaTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/SchemaTest.java new file mode 100644 index 0000000..be64b47 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/SchemaTest.java @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2010, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc4.jdbc41; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.Properties; + +class SchemaTest { + private Connection conn; + private boolean dropUserSchema; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + Statement stmt = conn.createStatement(); + try { + stmt.execute("CREATE SCHEMA " + TestUtil.getUser()); + dropUserSchema = true; + } catch (SQLException e) { + /* assume schema existed */ + } + stmt.execute("CREATE SCHEMA schema1"); + stmt.execute("CREATE SCHEMA schema2"); + stmt.execute("CREATE SCHEMA \"schema 3\""); + stmt.execute("CREATE SCHEMA \"schema \"\"4\""); + stmt.execute("CREATE SCHEMA \"schema '5\""); + stmt.execute("CREATE SCHEMA \"schema ,6\""); + stmt.execute("CREATE SCHEMA \"UpperCase\""); + TestUtil.createTable(conn, "schema1.table1", "id integer"); + TestUtil.createTable(conn, "schema2.table2", "id integer"); + TestUtil.createTable(conn, "\"UpperCase\".table3", "id integer"); + TestUtil.createTable(conn, "schema1.sptest", "id integer"); + TestUtil.createTable(conn, "schema2.sptest", "id varchar"); + } + + @AfterEach + void tearDown() throws SQLException { + conn.setAutoCommit(true); + conn.setSchema(null); + Statement stmt = conn.createStatement(); + if (dropUserSchema) { + stmt.execute("DROP SCHEMA " + TestUtil.getUser() + " CASCADE"); + } + stmt.execute("DROP SCHEMA schema1 CASCADE"); + stmt.execute("DROP SCHEMA schema2 CASCADE"); + stmt.execute("DROP SCHEMA \"schema 3\" CASCADE"); + stmt.execute("DROP SCHEMA \"schema \"\"4\" CASCADE"); + stmt.execute("DROP SCHEMA \"schema '5\" CASCADE"); + stmt.execute("DROP SCHEMA \"schema ,6\""); + stmt.execute("DROP SCHEMA \"UpperCase\" CASCADE"); + TestUtil.closeDB(conn); + } + + /** + * Test that what you set is what you get. + */ + @Test + void getSetSchema() throws SQLException { + conn.setSchema("schema1"); + assertEquals("schema1", conn.getSchema()); + conn.setSchema("schema2"); + assertEquals("schema2", conn.getSchema()); + conn.setSchema("schema 3"); + assertEquals("schema 3", conn.getSchema()); + conn.setSchema("schema \"4"); + assertEquals("schema \"4", conn.getSchema()); + conn.setSchema("schema '5"); + assertEquals("schema '5", conn.getSchema()); + conn.setSchema("UpperCase"); + assertEquals("UpperCase", conn.getSchema()); + } + + /** + * Test that setting the schema allows to access objects of this schema without prefix, hide + * objects from other schemas but doesn't prevent to prefix-access to them. + */ + @Test + void usingSchema() throws SQLException { + Statement stmt = conn.createStatement(); + try { + assertDoesNotThrow(() -> { + conn.setSchema("schema1"); + stmt.executeQuery(TestUtil.selectSQL("table1", "*")); + stmt.executeQuery(TestUtil.selectSQL("schema2.table2", "*")); + try { + stmt.executeQuery(TestUtil.selectSQL("table2", "*")); + fail("Objects of schema2 should not be visible without prefix"); + } catch (SQLException e) { + // expected + } + + conn.setSchema("schema2"); + stmt.executeQuery(TestUtil.selectSQL("table2", "*")); + stmt.executeQuery(TestUtil.selectSQL("schema1.table1", "*")); + try { + stmt.executeQuery(TestUtil.selectSQL("table1", "*")); + fail("Objects of schema1 should not be visible without prefix"); + } catch (SQLException e) { + // expected + } + + conn.setSchema("UpperCase"); + stmt.executeQuery(TestUtil.selectSQL("table3", "*")); + stmt.executeQuery(TestUtil.selectSQL("schema1.table1", "*")); + try { + stmt.executeQuery(TestUtil.selectSQL("table1", "*")); + fail("Objects of schema1 should not be visible without prefix"); + } catch (SQLException e) { + // expected + } + }, "Could not find expected schema elements: "); + } finally { + try { + stmt.close(); + } catch (SQLException e) { + } + } + } + + /** + * Test that get schema returns the schema with the highest priority in the search path. + */ + @Test + void multipleSearchPath() throws SQLException { + execute("SET search_path TO schema1,schema2"); + assertEquals("schema1", conn.getSchema()); + + execute("SET search_path TO \"schema ,6\",schema2"); + assertEquals("schema ,6", conn.getSchema()); + } + + @Test + void schemaInProperties() throws Exception { + Properties properties = new Properties(); + properties.setProperty("currentSchema", "schema1"); + Connection conn = TestUtil.openDB(properties); + try { + assertEquals("schema1", conn.getSchema()); + + Statement stmt = conn.createStatement(); + stmt.executeQuery(TestUtil.selectSQL("table1", "*")); + stmt.executeQuery(TestUtil.selectSQL("schema2.table2", "*")); + try { + stmt.executeQuery(TestUtil.selectSQL("table2", "*")); + fail("Objects of schema2 should not be visible without prefix"); + } catch (SQLException e) { + // expected + } + } finally { + TestUtil.closeDB(conn); + } + } + + @Test + public void schemaPath$User() throws Exception { + execute("SET search_path TO \"$user\",public,schema2"); + assertEquals(TestUtil.getUser(), conn.getSchema()); + } + + private void execute(String sql) throws SQLException { + Statement stmt = conn.createStatement(); + try { + stmt.execute(sql); + } finally { + try { + stmt.close(); + } catch (SQLException e) { + } + } + } + + @Test + void searchPathPreparedStatementAutoCommitFalse() throws SQLException { + conn.setAutoCommit(false); + searchPathPreparedStatementAutoCommitTrue(); + } + + @Test + void searchPathPreparedStatementAutoCommitTrue() throws SQLException { + searchPathPreparedStatement(); + } + + @Test + void searchPathPreparedStatement() throws SQLException { + execute("set search_path to schema1,public"); + PreparedStatement ps = conn.prepareStatement("select * from sptest"); + for (int i = 0; i < 10; i++) { + ps.execute(); + } + assertColType(ps, "sptest should point to schema1.sptest, thus column type should be INT", + Types.INTEGER); + ps.close(); + execute("set search_path to schema2,public"); + ps = conn.prepareStatement("select * from sptest"); + assertColType(ps, "sptest should point to schema2.sptest, thus column type should be VARCHAR", + Types.VARCHAR); + ps.close(); + } + + @Test + void currentSchemaPropertyVisibilityTableDuringFunctionCreation() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema1,schema2"); + Connection connection = TestUtil.openDB(properties); + + TestUtil.execute(connection, "create table schema1.check_table (test_col text)"); + TestUtil.execute(connection, "insert into schema1.check_table (test_col) values ('test_value')"); + TestUtil.execute(connection, "create or replace function schema2.check_fun () returns text as $$" + + " select test_col from check_table" + + "$$ language sql stable"); + connection.close(); + } + + @Test + void currentSchemaPropertyNotVisibilityTableDuringFunctionCreation() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema2"); + + try (Connection connection = TestUtil.openDB(properties)) { + TestUtil.execute(connection, "create table schema1.check_table (test_col text)"); + TestUtil.execute(connection, "insert into schema1.check_table (test_col) values ('test_value')"); + TestUtil.execute(connection, "create or replace function schema2.check_fun (txt text) returns text as $$" + + " select test_col from check_table" + + "$$ language sql immutable"); + } catch (PSQLException e) { + String sqlState = e.getSQLState(); + String message = e.getMessage(); + assertThat("Test creates function in schema 'schema2' and this function try use table \"check_table\" " + + "from schema 'schema1'. We expect here sql error code - " + + PSQLState.UNDEFINED_TABLE + ", because search_path does not contains schema 'schema1' and " + + "postgres does not see table \"check_table\"", + sqlState, + equalTo(PSQLState.UNDEFINED_TABLE.getState()) + ); + assertThat( + "Test creates function in schema 'schema2' and this function try use table \"check_table\" " + + "from schema 'schema1'. We expect here that sql error message will be contains \"check_table\", " + + "because search_path does not contains schema 'schema1' and postgres does not see " + + "table \"check_table\"", + message, + containsString("\"check_table\"") + ); + } + } + + @Test + void currentSchemaPropertyVisibilityFunction() throws SQLException { + currentSchemaPropertyVisibilityTableDuringFunctionCreation(); + Properties properties = new Properties(); + properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema1,schema2"); + Connection connection = TestUtil.openDB(properties); + + TestUtil.execute(connection, "select check_fun()"); + connection.close(); + } + + @Test + void currentSchemaPropertyNotVisibilityTableInsideFunction() throws SQLException { + currentSchemaPropertyVisibilityTableDuringFunctionCreation(); + Properties properties = new Properties(); + properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema2"); + + try (Connection connection = TestUtil.openDB(properties)) { + TestUtil.execute(connection, "select check_fun()"); + } catch (PSQLException e) { + String sqlState = e.getSQLState(); + String message = e.getMessage(); + assertThat("Test call function in schema 'schema2' and this function uses table \"check_table\" " + + "from schema 'schema1'. We expect here sql error code - " + PSQLState.UNDEFINED_TABLE + ", " + + "because search_path does not contains schema 'schema1' and postgres does not see table \"check_table\".", + sqlState, + equalTo(PSQLState.UNDEFINED_TABLE.getState()) + ); + assertThat( + "Test call function in schema 'schema2' and this function uses table \"check_table\" " + + "from schema 'schema1'. We expect here that sql error message will be contains \"check_table\", because " + + " search_path does not contains schema 'schema1' and postgres does not see table \"check_table\"", + message, + containsString("\"check_table\"") + ); + } + } + + private void assertColType(PreparedStatement ps, String message, int expected) throws SQLException { + ResultSet rs = ps.executeQuery(); + ResultSetMetaData md = rs.getMetaData(); + int columnType = md.getColumnType(1); + assertEquals(expected, columnType, message); + rs.close(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/AdaptiveFetchSizeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/AdaptiveFetchSizeTest.java new file mode 100644 index 0000000..ae19db7 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/AdaptiveFetchSizeTest.java @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.PGConnection; +import org.postgresql.PGProperty; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Test; + +import java.lang.management.ManagementFactory; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Properties; + +/** + * Integration tests for adaptive fetch process. + */ +class AdaptiveFetchSizeTest { + + private Connection connection; + private PreparedStatement statement; + private ResultSet resultSet; + + private final String table = "test_adaptive_fetch"; + private final String columns = "value VARCHAR"; + + /** + * Drop table and close connection. + */ + @AfterEach + void tearDown() throws SQLException { + if (connection != null && !connection.isClosed()) { + connection.setAutoCommit(true); + if (resultSet != null) { + resultSet.close(); + } + if (statement != null) { + statement.close(); + } + TestUtil.dropTable(connection, table); + TestUtil.closeDB(connection); + } + } + + /** + * Simple integration test. At start created is table with rows sizes like 4 x 35B, 1 x 40B, 45 x + * 30B. Next fetching is happening. Starting fetch is using default fetch size, so it returns 4 + * rows. After reading 4 rows, new fetch size is computed. As biggest rows size so far was 35B, + * then 300/35B = 8 rows. Next fetch is done with 8 rows. First row in this fetch has size 40B, + * which gonna change fetch size to 7 rows (300/40B = 7), next fetch reads won't change size and 7 + * will be used to the end. + * To check if this works correctly checked is: + * - if starting 4 rows from result set have fetch size as 4; + * - if next 8 rows from result set have fetch size as 8; + * - if next 38 rows from result set have fetch size as 7; + * - check if all 50 rows were read. + */ + @Test + void adaptiveFetching() throws SQLException { + int startFetchSize = 4; + int expectedFirstSize = 8; + int expectedSecondSize = 7; + int expectedCounter = 50; + int resultCounter = 0; + + Properties properties = new Properties(); + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize); + PGProperty.MAX_RESULT_BUFFER.set(properties, "300"); + PGProperty.ADAPTIVE_FETCH.set(properties, true); + + openConnectionAndCreateTable(properties); + + for (int i = 0; i < expectedCounter; i++) { + if (i == 4) { + addStringWithSize(40); + } else { + addStringWithSize(35); + } + } + + executeFetchingQuery(); + + for (int i = 0; i < 4; i++) { + resultSet.next(); + resultCounter++; + assertEquals(startFetchSize, resultSet.getFetchSize()); + } + for (int i = 0; i < 8; i++) { + resultSet.next(); + resultCounter++; + assertEquals(expectedFirstSize, resultSet.getFetchSize()); + } + while (resultSet.next()) { + resultCounter++; + assertEquals(expectedSecondSize, resultSet.getFetchSize()); + } + + assertEquals(expectedCounter, resultCounter); + } + + /** + * The main purpose of this set is to check if minimum size was used during adaptive fetching. To + * a table are added 50 rows with sizes: 1x270B, 49x10B. Starting fetch is done with default size + * 4. As first row from result set have size 270B, then computed size should be 1 (300/270 = 1), + * however minimum size set to 10 should make that next fetch should be done with size 10. After + * this fetch size shouldn't change to the end. + * To check if this works correctly checked is: + * - if starting 4 rows from result set have fetch size as 4; + * - if next 46 rows from result set have fetch size as 10; + * - check if all 50 rows were read. + */ + @Test + void adaptiveFetchingWithMinimumSize() throws SQLException { + int startFetchSize = 4; + int expectedSize = 10; + int expectedCounter = 50; + int resultCounter = 0; + + Properties properties = new Properties(); + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize); + PGProperty.MAX_RESULT_BUFFER.set(properties, "300"); + PGProperty.ADAPTIVE_FETCH.set(properties, true); + PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedSize); + + openConnectionAndCreateTable(properties); + + for (int i = 0; i < expectedCounter; i++) { + if (i == 0) { + addStringWithSize(270); + } else { + addStringWithSize(10); + } + } + + executeFetchingQuery(); + + for (int i = 0; i < 4; i++) { + resultSet.next(); + resultCounter++; + assertEquals(startFetchSize, resultSet.getFetchSize()); + } + while (resultSet.next()) { + resultCounter++; + assertEquals(expectedSize, resultSet.getFetchSize()); + } + + assertEquals(expectedCounter, resultCounter); + } + + /** + * The main purpose of this set is to check if maximum size was used during adaptive fetching. To + * a table are added 50 rows with sizes: 4x10B, 46x30B. Starting fetch is done with default size + * 4. As first fetch have only rows with size 10B, then computed fetch size should be 30 (300/10 = + * 30), however maximum size set to 10 should make that next fetch should be done with size 10 (in + * other situation next rows will exceed size of maxResultBuffer). After this fetch size shouldn't + * change to the end. + * To check if this works correctly checked is: + * - if starting 4 rows from result set have fetch size as 4; + * - if next 46 rows from result set have fetch size as 10; + * - check if all 50 rows were read. + */ + @Test + void adaptiveFetchingWithMaximumSize() throws SQLException { + int startFetchSize = 4; + int expectedSize = 10; + int expectedCounter = 50; + int resultCounter = 0; + + Properties properties = new Properties(); + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize); + PGProperty.MAX_RESULT_BUFFER.set(properties, "300"); + PGProperty.ADAPTIVE_FETCH.set(properties, true); + PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedSize); + + openConnectionAndCreateTable(properties); + + for (int i = 0; i < expectedCounter; i++) { + if (i < 4) { + addStringWithSize(10); + } else { + addStringWithSize(30); + } + } + + executeFetchingQuery(); + + for (int i = 0; i < 4; i++) { + resultSet.next(); + resultCounter++; + assertEquals(startFetchSize, resultSet.getFetchSize()); + } + while (resultSet.next()) { + resultCounter++; + assertEquals(expectedSize, resultSet.getFetchSize()); + } + + assertEquals(expectedCounter, resultCounter); + } + + /** + * The main purpose of this set is to do fetching with maximum possible buffer. To a table are + * added 1000 rows with sizes 10B each. Starting fetch is done with default size 4, then next + * fetch should have size computed on maxResultBuffer, most probably that the next fetch would be + * the last. + * To check if this works correctly checked is: + * - if starting 4 rows from result set have fetch size as 4; + * - if next 996 rows from result set have fetch size computed with using max size of + * maxResultBuffer; + * - check if all 1000 rows were read. + */ + @Test + void adaptiveFetchingWithMoreData() throws SQLException { + int startFetchSize = 4; + int expectedCounter = 1000; + int resultCounter = 0; + int expectedSize = (int) ( + (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) / 10); + + Properties properties = new Properties(); + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize); + PGProperty.MAX_RESULT_BUFFER.set(properties, "90p"); + PGProperty.ADAPTIVE_FETCH.set(properties, true); + + openConnectionAndCreateTable(properties); + + for (int i = 0; i < expectedCounter; i++) { + addStringWithSize(10); + } + + executeFetchingQuery(); + + for (int i = 0; i < 4; i++) { + resultSet.next(); + resultCounter++; + assertEquals(startFetchSize, resultSet.getFetchSize()); + } + while (resultSet.next()) { + resultCounter++; + assertEquals(expectedSize, resultSet.getFetchSize()); + } + + assertEquals(expectedCounter, resultCounter); + } + + /** + * Execute query, which gonna be fetched. Sets auto commit to false to make fetching + * happen. + */ + private void executeFetchingQuery() throws SQLException { + connection.setAutoCommit(false); + + statement = connection.prepareStatement("SELECT * FROM " + table); + resultSet = statement.executeQuery(); + } + + /** + * Insert string with given size to a table. + * + * @param size desired size of a string to be inserted in the table + */ + private void addStringWithSize(int size) throws SQLException { + StringBuilder sb = new StringBuilder(size + 2); + sb.append("'"); + for (int i = 0; i < size; i++) { + sb.append('H'); + } + sb.append("'"); + String insert = TestUtil.insertSQL(table, "value", sb.toString()); + TestUtil.execute(connection, insert); + } + + /** + * Open connection, check if fetch can be performed and create table. + * + * @param properties Properties to be used during opening connection. + */ + private void openConnectionAndCreateTable(Properties properties) throws SQLException { + connection = TestUtil.openDB(properties); + //After opening connection we should check if will be possible to do a fetch + checkIfFetchTestCanBePerformed(connection); + TestUtil.createTable(connection, table, columns); + } + + /** + * Check if a fetch can be performed - PreferQueryMode is not set to Simple. + * + * @param connection Connection to be checked. + */ + private void checkIfFetchTestCanBePerformed(Connection connection) throws SQLException { + PGConnection pgConnection = connection.unwrap(PGConnection.class); + PreferQueryMode preferQueryMode = + pgConnection == null ? PreferQueryMode.EXTENDED : pgConnection.getPreferQueryMode(); + Assumptions.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE, + "Fetching tests can't be performed in simple mode"); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/CustomizeDefaultFetchSizeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/CustomizeDefaultFetchSizeTest.java new file mode 100644 index 0000000..66ae16e --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/CustomizeDefaultFetchSizeTest.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +class CustomizeDefaultFetchSizeTest { + + private Connection connection; + + @AfterEach + void tearDown() throws Exception { + if (connection != null) { + TestUtil.closeDB(connection); + } + } + + @Test + void setPredefineDefaultFetchSizeOnStatement() throws Exception { + final int waitFetchSize = 13; + Properties properties = new Properties(); + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, waitFetchSize); + + connection = TestUtil.openDB(properties); + + Statement statement = connection.createStatement(); + int resultFetchSize = statement.getFetchSize(); + + statement.close(); + + assertThat( + "PGProperty.DEFAULT_ROW_FETCH_SIZE should be propagate to Statement that was create from connection " + + "on that define it parameter", + resultFetchSize, CoreMatchers.equalTo(waitFetchSize)); + } + + @Test + void setPredefineDefaultFetchSizeOnPreparedStatement() throws Exception { + final int waitFetchSize = 14; + + Properties properties = new Properties(); + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, waitFetchSize); + + connection = TestUtil.openDB(properties); + + CallableStatement statement = connection.prepareCall("{ call unnest(array[1, 2, 3, 5])}"); + int resultFetchSize = statement.getFetchSize(); + + assertThat( + "PGProperty.DEFAULT_ROW_FETCH_SIZE should be propagate to CallableStatement that was create from connection " + + "on that define it parameter", + resultFetchSize, CoreMatchers.equalTo(waitFetchSize)); + } + + @Test + void notAvailableSpecifyNegativeFetchSize() throws Exception { + assertThrows(SQLException.class, () -> { + Properties properties = new Properties(); + PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, Integer.MIN_VALUE); + + connection = TestUtil.openDB(properties); + + fail( + "On step initialize connection we know about not valid parameter PGProperty.DEFAULT_ROW_FETCH_SIZE they can't be negative, " + + "so we should throw correspond exception about it rather than fall with exception in runtime for example during create statement"); + }); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/DatabaseMetaDataTest.java new file mode 100644 index 0000000..9878b91 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/DatabaseMetaDataTest.java @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2007, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.core.TypeInfo; +import org.postgresql.jdbc.PgConnection; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; + +class DatabaseMetaDataTest { + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + conn = TestUtil.openDB(); + TestUtil.createSchema(conn, "test_schema"); + TestUtil.createEnumType(conn, "test_schema.test_enum", "'val'"); + TestUtil.createTable(conn, "test_schema.off_path_table", "var test_schema.test_enum[]"); + TestUtil.createEnumType(conn, "_test_enum", "'evil'"); + TestUtil.createEnumType(conn, "test_enum", "'other'"); + TestUtil.createTable(conn, "on_path_table", "a test_schema.test_enum[], b _test_enum, c test_enum[]"); + TestUtil.createTable(conn, "decimaltest", "a decimal, b decimal(10, 5)"); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.dropTable(conn, "decimaltest"); + TestUtil.dropTable(conn, "on_path_table"); + TestUtil.dropType(conn, "test_enum"); + TestUtil.dropType(conn, "_test_enum"); + TestUtil.dropSchema(conn, "test_schema"); + TestUtil.closeDB(conn); + } + + @Test + void getColumnsForNullScale() throws Exception { + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getColumns("%", "%", "decimaltest", "%"); + assertTrue(rs.next()); + assertEquals("a", rs.getString("COLUMN_NAME")); + assertEquals(0, rs.getInt("DECIMAL_DIGITS")); + assertTrue(rs.wasNull()); + + assertTrue(rs.next()); + assertEquals("b", rs.getString("COLUMN_NAME")); + assertEquals(5, rs.getInt("DECIMAL_DIGITS")); + assertFalse(rs.wasNull()); + + assertFalse(rs.next()); + } + + @Test + void getCorrectSQLTypeForOffPathTypes() throws Exception { + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getColumns("%", "%", "off_path_table", "%"); + assertTrue(rs.next()); + assertEquals("var", rs.getString("COLUMN_NAME")); + assertEquals("\"test_schema\".\"_test_enum\"", rs.getString("TYPE_NAME"), "Detects correct off-path type name"); + assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"), "Detects correct SQL type for off-path types"); + + assertFalse(rs.next()); + } + + @Test + void getCorrectSQLTypeForShadowedTypes() throws Exception { + DatabaseMetaData dbmd = conn.getMetaData(); + + ResultSet rs = dbmd.getColumns("%", "%", "on_path_table", "%"); + + assertTrue(rs.next()); + assertEquals("a", rs.getString("COLUMN_NAME")); + assertEquals("\"test_schema\".\"_test_enum\"", rs.getString("TYPE_NAME"), "Correctly maps types from other schemas"); + assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE")); + + assertTrue(rs.next()); + assertEquals("b", rs.getString("COLUMN_NAME")); + // = TYPE _test_enum AS ENUM ('evil') + assertEquals("_test_enum", rs.getString("TYPE_NAME")); + assertEquals(Types.VARCHAR, rs.getInt("DATA_TYPE")); + + assertTrue(rs.next()); + assertEquals("c", rs.getString("COLUMN_NAME")); + // = array of TYPE test_enum AS ENUM ('value') + if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v16)) { + assertEquals("_test_enum_1", rs.getString("TYPE_NAME"), "Correctly detects shadowed array type name"); + } else { + assertEquals("___test_enum", rs.getString("TYPE_NAME"), "Correctly detects shadowed array type name"); + } + assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"), "Correctly detects type of shadowed name"); + + assertFalse(rs.next()); + } + + @Test + void largeOidIsHandledCorrectly() throws SQLException { + TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo(); + + try { + ti.getSQLType((int) 4294967295L); // (presumably) unused OID 4294967295, which is 2**32 - 1 + } catch (PSQLException ex) { + assertEquals(ex.getSQLState(), PSQLState.NO_DATA.getState()); + } + } + + @Test + void oidConversion() throws SQLException { + TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo(); + int oid = 0; + long loid = 0; + assertEquals(oid, ti.longOidToInt(loid)); + assertEquals(loid, ti.intOidToLong(oid)); + + oid = Integer.MAX_VALUE; + loid = Integer.MAX_VALUE; + assertEquals(oid, ti.longOidToInt(loid)); + assertEquals(loid, ti.intOidToLong(oid)); + + oid = Integer.MIN_VALUE; + loid = 1L << 31; + assertEquals(oid, ti.longOidToInt(loid)); + assertEquals(loid, ti.intOidToLong(oid)); + + oid = -1; + loid = 0xFFFFFFFFL; + assertEquals(oid, ti.longOidToInt(loid)); + assertEquals(loid, ti.intOidToLong(oid)); + } + + @Test + void oidConversionThrowsForNegativeLongValues() throws SQLException { + assertThrows(PSQLException.class, () -> { + TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo(); + ti.longOidToInt(-1); + }); + } + + @Test + void oidConversionThrowsForTooLargeLongValues() throws SQLException { + assertThrows(PSQLException.class, () -> { + TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo(); + ti.longOidToInt(1L << 32); + }); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310InfinityTests.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310InfinityTests.java new file mode 100644 index 0000000..87dd142 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310InfinityTests.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.lang.reflect.Field; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class GetObject310InfinityTests extends BaseTest4 { + private final String expression; + private final String pgType; + private final Class klass; + private final Object expectedValue; + + public GetObject310InfinityTests(BinaryMode binaryMode, String expression, + String pgType, Class klass, Object expectedValue) { + setBinaryMode(binaryMode); + this.expression = expression; + this.pgType = pgType; + this.klass = klass; + this.expectedValue = expectedValue; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + Assume.assumeTrue("PostgreSQL 8.3 does not support 'infinity' for 'date'", + !"date".equals(pgType) || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)); + } + + @Parameterized.Parameters(name = "binary = {0}, expr = {1}, pgType = {2}, klass = {3}") + public static Iterable data() throws IllegalAccessException { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + for (String expression : Arrays.asList("-infinity", "infinity")) { + for (String pgType : Arrays.asList("date", "timestamp", + "timestamp with time zone")) { + for (Class klass : Arrays.asList(LocalDate.class, LocalDateTime.class, + OffsetDateTime.class)) { + if (klass.equals(LocalDate.class) && !"date".equals(pgType)) { + continue; + } + if (klass.equals(LocalDateTime.class) && !pgType.startsWith("timestamp")) { + continue; + } + if (klass.equals(OffsetDateTime.class) && !pgType.startsWith("timestamp")) { + continue; + } + if (klass.equals(LocalDateTime.class) && "timestamp with time zone".equals(pgType)) { + // org.postgresql.util.PSQLException: Cannot convert the column of type TIMESTAMPTZ to requested type timestamp. + continue; + } + Field field = null; + try { + field = klass.getField(expression.startsWith("-") ? "MIN" : "MAX"); + } catch (NoSuchFieldException e) { + throw new IllegalStateException("No min/max field in " + klass, e); + } + Object expected = field.get(null); + ids.add(new Object[]{binaryMode, expression, pgType, klass, expected}); + } + } + } + } + return ids; + } + + @Test + public void test() throws SQLException { + PreparedStatement stmt = con.prepareStatement("select '" + expression + "'::" + pgType); + ResultSet rs = stmt.executeQuery(); + rs.next(); + Object res = rs.getObject(1, klass); + Assert.assertEquals(expectedValue, res); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310Test.java new file mode 100644 index 0000000..5ab253a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310Test.java @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.time.Duration; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.chrono.IsoChronology; +import java.time.chrono.IsoEra; +import java.time.temporal.Temporal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.TimeZone; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +@RunWith(Parameterized.class) +public class GetObject310Test extends BaseTest4 { + + private static final TimeZone saveTZ = TimeZone.getDefault(); + + private static final ZoneOffset UTC = ZoneOffset.UTC; // +0000 always + private static final ZoneOffset GMT03 = ZoneOffset.of("+03:00"); // +0300 always + private static final ZoneOffset GMT05 = ZoneOffset.of("-05:00"); // -0500 always + private static final ZoneOffset GMT13 = ZoneOffset.of("+13:00"); // +1300 always + + private static final IsoChronology ISO = IsoChronology.INSTANCE; + + public GetObject310Test(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone," + + "timestamp_with_time_zone_column timestamp with time zone," + + "date_column date," + + "time_without_time_zone_column time without time zone," + + "time_with_time_zone_column time with time zone" + ); + } + + @Override + public void tearDown() throws SQLException { + TimeZone.setDefault(saveTZ); + TestUtil.dropTable(con, "table1"); + super.tearDown(); + } + + /** + * Test the behavior getObject for date columns. + */ + @Test + public void testGetLocalDate() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + + List zoneIdsToTest = new ArrayList<>(); + zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1 + zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9 + zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0 + zoneIdsToTest.add("Europe/Berlin"); // It is something like GMT+1..GMT+2 + zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s + zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14 + zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11 + for (int i = -12; i <= 13; i++) { + zoneIdsToTest.add(String.format("GMT%+02d", i)); + } + + List datesToTest = Arrays.asList("1998-01-08", + // Some random dates + "1981-12-11", "2022-02-22", + "2015-09-03", "2015-06-30", + "1997-06-30", "1997-07-01", "2012-06-30", "2012-07-01", + "2015-06-30", "2015-07-01", "2005-12-31", "2006-01-01", + "2008-12-31", "2009-01-01", "2015-06-30", "2015-07-31", + "2015-07-31", + + // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00 + "2003-03-25", "2000-03-26", "2000-03-27", + + // This is a pre-1970 date, so check if it is rounded properly + "1950-07-20", + + // Ensure the calendar is proleptic + "1582-01-01", "1582-12-31", + "1582-09-30", "1582-10-16", + + // https://github.com/pgjdbc/pgjdbc/issues/2221 + "0001-01-01", + "1000-01-01", "1000-06-01", "0999-12-31", + + // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00 + "2000-10-28", "2000-10-29", "2000-10-30"); + + for (String zoneId : zoneIdsToTest) { + ZoneId zone = ZoneId.of(zoneId); + for (String date : datesToTest) { + localDate(zone, date); + } + } + } + + public void localDate(ZoneId zoneId, String date) throws SQLException { + TimeZone.setDefault(TimeZone.getTimeZone(zoneId)); + try (Statement stmt = con.createStatement() ) { + stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "DATE '" + date + "'")); + + try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column")) ) { + assertTrue(rs.next()); + LocalDate localDate = LocalDate.parse(date); + assertEquals(localDate, rs.getObject("date_column", LocalDate.class)); + assertEquals(localDate, rs.getObject(1, LocalDate.class)); + } + stmt.executeUpdate("DELETE FROM table1"); + } + } + + /** + * Test the behavior getObject for timetz columns. + */ + @Test + public void testGetOffsetTime() throws SQLException { + List timesToTest = Arrays.asList("00:00:00+00:00", "00:00:00+00:30", + "01:02:03.333444+02:00", "23:59:59.999999-12:00", + "11:22:59.4711-08:00", "23:59:59.0-12:00", + "11:22:59.4711+15:59:12", "23:59:59.0-15:59:12" + ); + + for (String time : timesToTest) { + try (Statement stmt = con.createStatement() ) { + stmt.executeUpdate(TestUtil.insertSQL("table1", "time_with_time_zone_column", "time with time zone '" + time + "'")); + + try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_with_time_zone_column")) ) { + assertTrue(rs.next()); + OffsetTime offsetTime = OffsetTime.parse(time); + assertEquals(offsetTime, rs.getObject("time_with_time_zone_column", OffsetTime.class)); + assertEquals(offsetTime, rs.getObject(1, OffsetTime.class)); + + //Also test that we get the correct values when retrieving the data as OffsetDateTime objects on EPOCH (required by JDBC) + OffsetDateTime offsetDT = offsetTime.atDate(LocalDate.of(1970, 1, 1)); + assertEquals(offsetDT, rs.getObject("time_with_time_zone_column", OffsetDateTime.class)); + assertEquals(offsetDT, rs.getObject(1, OffsetDateTime.class)); + + assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDate.class); + assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalTime.class); + assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDateTime.class); + } + stmt.executeUpdate("DELETE FROM table1"); + } + } + } + + /** + * Test the behavior getObject for time columns. + */ + @Test + public void testGetLocalTime() throws SQLException { + try (Statement stmt = con.createStatement() ) { + stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "TIME '04:05:06.123456'")); + + try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"))) { + assertTrue(rs.next()); + LocalTime localTime = LocalTime.of(4, 5, 6, 123456000); + assertEquals(localTime, rs.getObject("time_without_time_zone_column", LocalTime.class)); + assertEquals(localTime, rs.getObject(1, LocalTime.class)); + + assertDataTypeMismatch(rs, "time_without_time_zone_column", OffsetTime.class); + assertDataTypeMismatch(rs, "time_without_time_zone_column", OffsetDateTime.class); + assertDataTypeMismatch(rs, "time_without_time_zone_column", LocalDate.class); + assertDataTypeMismatch(rs, "time_without_time_zone_column", LocalDateTime.class); + } + stmt.executeUpdate("DELETE FROM table1"); + } + } + + /** + * Test the behavior getObject for time columns with null. + */ + @Test + public void testGetLocalTimeNull() throws SQLException { + try (Statement stmt = con.createStatement() ) { + stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "NULL")); + + try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"))) { + assertTrue(rs.next()); + assertNull(rs.getObject("time_without_time_zone_column", LocalTime.class)); + assertNull(rs.getObject(1, LocalTime.class)); + } + stmt.executeUpdate("DELETE FROM table1"); + } + } + + /** + * Test the behavior getObject for time columns with invalid type. + */ + @Test + public void testGetLocalTimeInvalidType() throws SQLException { + try (Statement stmt = con.createStatement() ) { + stmt.executeUpdate(TestUtil.insertSQL("table1", "time_with_time_zone_column", "TIME '04:05:06.123456-08:00'")); + + try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_with_time_zone_column"))) { + assertTrue(rs.next()); + assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalTime.class); + assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDateTime.class); + assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDate.class); + } + stmt.executeUpdate("DELETE FROM table1"); + } + } + + /** + * Test the behavior getObject for timestamp columns. + */ + @Test + public void testGetLocalDateTime() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + + List zoneIdsToTest = new ArrayList<>(); + zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1 + zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9 + zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0 + zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s + zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14 + zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11 + for (int i = -12; i <= 13; i++) { + zoneIdsToTest.add(String.format("GMT%+02d", i)); + } + + List datesToTest = Arrays.asList("2015-09-03T12:00:00", "2015-06-30T23:59:58", + "1997-06-30T23:59:59", "1997-07-01T00:00:00", "2012-06-30T23:59:59", "2012-07-01T00:00:00", + "2015-06-30T23:59:59", "2015-07-01T00:00:00", "2005-12-31T23:59:59", "2006-01-01T00:00:00", + "2008-12-31T23:59:59", "2009-01-01T00:00:00", /* "2015-06-30T23:59:60", */ "2015-07-31T00:00:00", + "2015-07-31T00:00:01", "2015-07-31T00:00:00.000001", + + // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00 + "2000-03-26T01:59:59", "2000-03-26T02:00:00", "2000-03-26T02:00:01", "2000-03-26T02:59:59", + "2000-03-26T03:00:00", "2000-03-26T03:00:01", "2000-03-26T03:59:59", "2000-03-26T04:00:00", + "2000-03-26T04:00:01", "2000-03-26T04:00:00.000001", + + // This is a pre-1970 date, so check if it is rounded properly + "1950-07-20T02:00:00", + + // Ensure the calendar is proleptic + "1582-09-30T00:00:00", "1582-10-16T00:00:00", + + // https://github.com/pgjdbc/pgjdbc/issues/2221 + "0001-01-01T00:00:00", + "1000-01-01T00:00:00", + "1000-01-01T23:59:59", "1000-06-01T01:00:00", "0999-12-31T23:59:59", + + // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00 + "2000-10-29T01:59:59", "2000-10-29T02:00:00", "2000-10-29T02:00:01", "2000-10-29T02:59:59", + "2000-10-29T03:00:00", "2000-10-29T03:00:01", "2000-10-29T03:59:59", "2000-10-29T04:00:00", + "2000-10-29T04:00:01", "2000-10-29T04:00:00.000001"); + + for (String zoneId : zoneIdsToTest) { + ZoneId zone = ZoneId.of(zoneId); + for (String date : datesToTest) { + localTimestamps(zone, date); + } + } + } + + public void localTimestamps(ZoneId zoneId, String timestamp) throws SQLException { + TimeZone.setDefault(TimeZone.getTimeZone(zoneId)); + try (Statement stmt = con.createStatement()) { + stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "TIMESTAMP '" + timestamp + "'")); + + try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column"))) { + assertTrue(rs.next()); + LocalDateTime localDateTime = LocalDateTime.parse(timestamp); + assertEquals(localDateTime, rs.getObject("timestamp_without_time_zone_column", LocalDateTime.class)); + assertEquals(localDateTime, rs.getObject(1, LocalDateTime.class)); + + //Also test that we get the correct values when retrieving the data as LocalDate objects + assertEquals(localDateTime.toLocalDate(), rs.getObject("timestamp_without_time_zone_column", LocalDate.class)); + assertEquals(localDateTime.toLocalDate(), rs.getObject(1, LocalDate.class)); + + assertDataTypeMismatch(rs, "timestamp_without_time_zone_column", OffsetTime.class); + // TODO: this should also not work, but that's an open discussion (see https://github.com/pgjdbc/pgjdbc/pull/2467): + // assertDataTypeMismatch(rs, "timestamp_without_time_zone_column", OffsetDateTime.class); + } + stmt.executeUpdate("DELETE FROM table1"); + } + } + + /** + * Test the behavior getObject for timestamp with time zone columns. + */ + @Test + public void testGetTimestampWithTimeZone() throws SQLException { + runGetOffsetDateTime(UTC); + runGetOffsetDateTime(GMT03); + runGetOffsetDateTime(GMT05); + runGetOffsetDateTime(GMT13); + } + + private void runGetOffsetDateTime(ZoneOffset offset) throws SQLException { + try (Statement stmt = con.createStatement()) { + stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_with_time_zone_column", "TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54.123456" + offset.toString() + "'")); + + try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_with_time_zone_column"))) { + assertTrue(rs.next()); + LocalDateTime localDateTime = LocalDateTime.of(2004, 10, 19, 10, 23, 54, 123456000); + + OffsetDateTime offsetDateTime = localDateTime.atOffset(offset).withOffsetSameInstant(ZoneOffset.UTC); + assertEquals(offsetDateTime, rs.getObject("timestamp_with_time_zone_column", OffsetDateTime.class)); + assertEquals(offsetDateTime, rs.getObject(1, OffsetDateTime.class)); + + assertDataTypeMismatch(rs, "timestamp_with_time_zone_column", LocalTime.class); + assertDataTypeMismatch(rs, "timestamp_with_time_zone_column", LocalDateTime.class); + } + stmt.executeUpdate("DELETE FROM table1"); + } + } + + @Test + public void testBcDate() throws SQLException { + try (Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 BC'::date")) { + assertTrue(rs.next()); + LocalDate expected = ISO.date(IsoEra.BCE, 1582, 9, 30); + LocalDate actual = rs.getObject(1, LocalDate.class); + assertEquals(expected, actual); + assertFalse(rs.next()); + } + } + + @Test + public void testBcTimestamp() throws SQLException { + try (Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 12:34:56 BC'::timestamp")) { + assertTrue(rs.next()); + LocalDateTime expected = ISO.date(IsoEra.BCE, 1582, 9, 30).atTime(12, 34, 56); + LocalDateTime actual = rs.getObject(1, LocalDateTime.class); + assertEquals(expected, actual); + assertFalse(rs.next()); + } + } + + @Test + public void testBcTimestamptz() throws SQLException { + try (Statement stmt = con.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 12:34:56Z BC'::timestamp")) { + assertTrue(rs.next()); + OffsetDateTime expected = ISO.date(IsoEra.BCE, 1582, 9, 30).atTime(OffsetTime.of(12, 34, 56, 0, UTC)); + OffsetDateTime actual = rs.getObject(1, OffsetDateTime.class); + assertEquals(expected, actual); + assertFalse(rs.next()); + } + } + + @Test + public void testProlepticCalendarTimestamp() throws SQLException { + // date time ranges and CTEs are both new with 8.4 + assumeMinimumServerVersion(ServerVersion.v8_4); + LocalDateTime start = LocalDate.of(1582, 9, 30).atStartOfDay(); + LocalDateTime end = LocalDate.of(1582, 10, 16).atStartOfDay(); + long numberOfDays = Duration.between(start, end).toDays() + 1L; + List range = Stream.iterate(start, x -> x.plusDays(1)) + .limit(numberOfDays) + .collect(Collectors.toList()); + + runProlepticTests(LocalDateTime.class, "'1582-09-30 00:00'::timestamp, '1582-10-16 00:00'::timestamp", range); + } + + @Test + public void testProlepticCalendarTimestamptz() throws SQLException { + // date time ranges and CTEs are both new with 8.4 + assumeMinimumServerVersion(ServerVersion.v8_4); + OffsetDateTime start = LocalDate.of(1582, 9, 30).atStartOfDay().atOffset(UTC); + OffsetDateTime end = LocalDate.of(1582, 10, 16).atStartOfDay().atOffset(UTC); + long numberOfDays = Duration.between(start, end).toDays() + 1L; + List range = Stream.iterate(start, x -> x.plusDays(1)) + .limit(numberOfDays) + .collect(Collectors.toList()); + + runProlepticTests(OffsetDateTime.class, "'1582-09-30 00:00:00 Z'::timestamptz, '1582-10-16 00:00:00 Z'::timestamptz", range); + } + + private void runProlepticTests(Class clazz, String selectRange, List range) throws SQLException { + List temporals = new ArrayList<>(range.size()); + + try (PreparedStatement stmt = con.prepareStatement("SELECT * FROM generate_series(" + selectRange + ", '1 day');"); + ResultSet rs = stmt.executeQuery()) { + while (rs.next()) { + T temporal = rs.getObject(1, clazz); + temporals.add(temporal); + } + assertEquals(range, temporals); + } + } + + /** checks if getObject with given column name or index 1 throws an exception with DATA_TYPE_MISMATCH as SQLState */ + private static void assertDataTypeMismatch(ResultSet rs, String columnName, Class typeToGet) { + PSQLException ex = assertThrows(PSQLException.class, () -> rs.getObject(columnName, typeToGet)); + assertEquals(PSQLState.DATA_TYPE_MISMATCH.getState(), ex.getSQLState()); + + ex = assertThrows(PSQLException.class, () -> rs.getObject(1, typeToGet)); + assertEquals(PSQLState.DATA_TYPE_MISMATCH.getState(), ex.getSQLState()); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42CallableStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42CallableStatementTest.java new file mode 100644 index 0000000..6a55c75 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42CallableStatementTest.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; + +import java.sql.CallableStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Tests for JDBC 4.2 features in {@link org.postgresql.jdbc.PgCallableStatement}. + */ +public class Jdbc42CallableStatementTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + + try (Statement stmt = con.createStatement()) { + stmt.execute( + "CREATE OR REPLACE FUNCTION testspg__getResultSetWithoutArg() " + + "RETURNS refcursor AS ' " + + "declare ref refcursor;" + + "begin OPEN ref FOR SELECT 1; RETURN ref; end; ' LANGUAGE plpgsql;"); + } + } + + final String func = "{ ? = call "; + final String pkgName = "testspg__"; + + @Override + public void tearDown() throws SQLException { + try (Statement stmt = con.createStatement()) { + stmt.execute("drop FUNCTION testspg__getResultSetWithoutArg ();"); + } + super.tearDown(); + } + + @Test + public void testGetResultSetWithoutArg() throws SQLException { + assumeCallableStatementsSupported(); + try (CallableStatement call = con.prepareCall(func + pkgName + "getResultSetWithoutArg () }")) { + con.setAutoCommit(false); // ref cursors only work if auto commit is off + call.registerOutParameter(1, Types.REF_CURSOR); + call.execute(); + List values = new ArrayList<>(1); + try (ResultSet rs = call.getObject(1, ResultSet.class)) { + while (rs.next()) { + values.add(rs.getInt(1)); + } + } + assertEquals(Collections.singletonList(1), values); + } finally { + con.setAutoCommit(true); + } + } + + @Test + public void testGetResultSetWithoutArgUnsupportedConversion() throws SQLException { + assumeCallableStatementsSupported(); + try (CallableStatement call = con.prepareCall(func + pkgName + "getResultSetWithoutArg () }")) { + con.setAutoCommit(false); // ref cursors only work if auto commit is off + call.registerOutParameter(1, Types.REF_CURSOR); + call.execute(); + try { + // this should never be allowed even if more types will be implemented in the future + call.getObject(1, ResultSetMetaData.class); + fail("conversion from ResultSet to ResultSetMetaData should not be supported"); + } catch (SQLException e) { + // should reach + } + } finally { + con.setAutoCommit(true); + } + } + + @Test + public void testRegisterOutParameter() throws SQLException { + + CallableStatement cs = null; + + cs = con.prepareCall("{ ? = call xxxx.yyyy (?,?,?,?)}"); + cs.registerOutParameter(1, Types.REF_CURSOR); + + cs.setLong(2, 1000L); + cs.setLong(3, 500); + cs.setLong(4, 3000); + cs.setNull(5, Types.NUMERIC); + } + + @Test + public void testRegisterInoutParameter() throws SQLException { + + CallableStatement cs = null; + + cs = con.prepareCall("{call xxxx.yyyy (?)}"); + cs.setNull(1, Types.REF_CURSOR); + cs.registerOutParameter(1, Types.REF_CURSOR); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42TestSuite.java new file mode 100644 index 0000000..6b471c3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42TestSuite.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; + +@RunWith(Suite.class) +@SuiteClasses({ + AdaptiveFetchSizeTest.class, + CustomizeDefaultFetchSizeTest.class, + GetObject310InfinityTests.class, + GetObject310Test.class, + Jdbc42CallableStatementTest.class, + LargeCountJdbc42Test.class, + PreparedStatementTest.class, + SetObject310Test.class, + SimpleJdbc42Test.class, +}) +public class Jdbc42TestSuite { + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/LargeCountJdbc42Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/LargeCountJdbc42Test.java new file mode 100644 index 0000000..fef3362 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/LargeCountJdbc42Test.java @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLState; + +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Properties; + +/** + * Test methods with small counts that return long and failure scenarios. This have two really big + * and slow test, they are ignored for CI but can be tested locally to check that it works. + */ +@RunWith(Parameterized.class) +public class LargeCountJdbc42Test extends BaseTest4 { + + private final boolean insertRewrite; + + public LargeCountJdbc42Test(BinaryMode binaryMode, boolean insertRewrite) { + this.insertRewrite = insertRewrite; + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}, insertRewrite = {1}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BinaryMode binaryMode : BinaryMode.values()) { + for (boolean insertRewrite : new boolean[]{false, true}) { + ids.add(new Object[]{binaryMode, insertRewrite}); + } + } + return ids; + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createUnloggedTable(con, "largetable", "a boolean"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "largetable"); + TestUtil.closeDB(con); + } + + // ********************* EXECUTE LARGE UPDATES ********************* + // FINEST: simple execute, handler=org.postgresql.jdbc.PgStatement$StatementResultHandler@38cccef, maxRows=0, fetchSize=0, flags=21 + // FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={20,20}) + // FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<2147483757>) + // FINEST: FE=> Describe(portal=null) + // FINEST: FE=> Execute(portal=null,limit=1) + // FINEST: FE=> Sync + // FINEST: <=BE ParseComplete [null] + // FINEST: <=BE BindComplete [unnamed] + // FINEST: <=BE NoData + // FINEST: <=BE CommandStatus(INSERT 0 2147483757) + // FINEST: <=BE ReadyForQuery(I) + // FINEST: simple execute, handler=org.postgresql.jdbc.PgStatement$StatementResultHandler@5679c6c6, maxRows=0, fetchSize=0, flags=21 + // FINEST: FE=> Parse(stmt=null,query="delete from largetable",oids={}) + // FINEST: FE=> Bind(stmt=null,portal=null) + // FINEST: FE=> Describe(portal=null) + // FINEST: FE=> Execute(portal=null,limit=1) + // FINEST: FE=> Sync + // FINEST: <=BE ParseComplete [null] + // FINEST: <=BE BindComplete [unnamed] + // FINEST: <=BE NoData + // FINEST: <=BE CommandStatus(DELETE 2147483757) + + /* + * Test PreparedStatement.executeLargeUpdate() and Statement.executeLargeUpdate(String sql) + */ + @Ignore("This is the big and SLOW test") + @Test + public void testExecuteLargeUpdateBIG() throws Exception { + long expected = Integer.MAX_VALUE + 110L; + con.setAutoCommit(false); + // Test PreparedStatement.executeLargeUpdate() + try (PreparedStatement stmt = con.prepareStatement("insert into largetable " + + "select true from generate_series(?, ?)")) { + stmt.setLong(1, 1); + stmt.setLong(2, 2_147_483_757L); // Integer.MAX_VALUE + 110L + long count = stmt.executeLargeUpdate(); + Assert.assertEquals("PreparedStatement 110 rows more than Integer.MAX_VALUE", expected, count); + } + // Test Statement.executeLargeUpdate(String sql) + try (Statement stmt = con.createStatement()) { + long count = stmt.executeLargeUpdate("delete from largetable"); + Assert.assertEquals("Statement 110 rows more than Integer.MAX_VALUE", expected, count); + } + con.setAutoCommit(true); + } + + /* + * Test Statement.executeLargeUpdate(String sql) + */ + @Test + public void testExecuteLargeUpdateStatementSMALL() throws Exception { + try (Statement stmt = con.createStatement()) { + long count = stmt.executeLargeUpdate("insert into largetable " + + "select true from generate_series(1, 1010)"); + long expected = 1010L; + Assert.assertEquals("Small long return 1010L", expected, count); + } + } + + /* + * Test PreparedStatement.executeLargeUpdate(); + */ + @Test + public void testExecuteLargeUpdatePreparedStatementSMALL() throws Exception { + try (PreparedStatement stmt = con.prepareStatement("insert into largetable " + + "select true from generate_series(?, ?)")) { + stmt.setLong(1, 1); + stmt.setLong(2, 1010L); + long count = stmt.executeLargeUpdate(); + long expected = 1010L; + Assert.assertEquals("Small long return 1010L", expected, count); + } + } + + /* + * Test Statement.getLargeUpdateCount(); + */ + @Test + public void testGetLargeUpdateCountStatementSMALL() throws Exception { + try (Statement stmt = con.createStatement()) { + boolean isResult = stmt.execute("insert into largetable " + + "select true from generate_series(1, 1010)"); + Assert.assertFalse("False if it is an update count or there are no results", isResult); + long count = stmt.getLargeUpdateCount(); + long expected = 1010L; + Assert.assertEquals("Small long return 1010L", expected, count); + } + } + + /* + * Test PreparedStatement.getLargeUpdateCount(); + */ + @Test + public void testGetLargeUpdateCountPreparedStatementSMALL() throws Exception { + try (PreparedStatement stmt = con.prepareStatement("insert into largetable " + + "select true from generate_series(?, ?)")) { + stmt.setInt(1, 1); + stmt.setInt(2, 1010); + boolean isResult = stmt.execute(); + Assert.assertFalse("False if it is an update count or there are no results", isResult); + long count = stmt.getLargeUpdateCount(); + long expected = 1010L; + Assert.assertEquals("Small long return 1010L", expected, count); + } + } + + /* + * Test fail SELECT Statement.executeLargeUpdate(String sql) + */ + @Test + public void testExecuteLargeUpdateStatementSELECT() throws Exception { + try (Statement stmt = con.createStatement()) { + long count = stmt.executeLargeUpdate("select true from generate_series(1, 5)"); + Assert.fail("A result was returned when none was expected. Returned: " + count); + } catch (SQLException e) { + Assert.assertEquals(PSQLState.TOO_MANY_RESULTS.getState(), e.getSQLState()); + } + } + + /* + * Test fail SELECT PreparedStatement.executeLargeUpdate(); + */ + @Test + public void testExecuteLargeUpdatePreparedStatementSELECT() throws Exception { + try (PreparedStatement stmt = con.prepareStatement("select true from generate_series(?, ?)")) { + stmt.setLong(1, 1); + stmt.setLong(2, 5L); + long count = stmt.executeLargeUpdate(); + Assert.fail("A result was returned when none was expected. Returned: " + count); + } catch (SQLException e) { + Assert.assertEquals(PSQLState.TOO_MANY_RESULTS.getState(), e.getSQLState()); + } + } + + /* + * Test Statement.getLargeUpdateCount(); + */ + @Test + public void testGetLargeUpdateCountStatementSELECT() throws Exception { + try (Statement stmt = con.createStatement()) { + boolean isResult = stmt.execute("select true from generate_series(1, 5)"); + Assert.assertTrue("True since this is a SELECT", isResult); + long count = stmt.getLargeUpdateCount(); + long expected = -1L; + Assert.assertEquals("-1 if the current result is a ResultSet object", expected, count); + } + } + + /* + * Test PreparedStatement.getLargeUpdateCount(); + */ + @Test + public void testGetLargeUpdateCountPreparedStatementSELECT() throws Exception { + try (PreparedStatement stmt = con.prepareStatement("select true from generate_series(?, ?)")) { + stmt.setLong(1, 1); + stmt.setLong(2, 5L); + boolean isResult = stmt.execute(); + Assert.assertTrue("True since this is a SELECT", isResult); + long count = stmt.getLargeUpdateCount(); + long expected = -1L; + Assert.assertEquals("-1 if the current result is a ResultSet object", expected, count); + } + } + + // ********************* BATCH LARGE UPDATES ********************* + // FINEST: batch execute 3 queries, handler=org.postgresql.jdbc.BatchResultHandler@3d04a311, maxRows=0, fetchSize=0, flags=21 + // FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,23}) + // FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<200>) + // FINEST: FE=> Describe(portal=null) + // FINEST: FE=> Execute(portal=null,limit=1) + // FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,20}) + // FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<3000000000>) + // FINEST: FE=> Describe(portal=null) + // FINEST: FE=> Execute(portal=null,limit=1) + // FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,23}) + // FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<50>) + // FINEST: FE=> Describe(portal=null) + // FINEST: FE=> Execute(portal=null,limit=1) + // FINEST: FE=> Sync + // FINEST: <=BE ParseComplete [null] + // FINEST: <=BE BindComplete [unnamed] + // FINEST: <=BE NoData + // FINEST: <=BE CommandStatus(INSERT 0 200) + // FINEST: <=BE ParseComplete [null] + // FINEST: <=BE BindComplete [unnamed] + // FINEST: <=BE NoData + // FINEST: <=BE CommandStatus(INSERT 0 3000000000) + // FINEST: <=BE ParseComplete [null] + // FINEST: <=BE BindComplete [unnamed] + // FINEST: <=BE NoData + // FINEST: <=BE CommandStatus(INSERT 0 50) + + /* + * Test simple PreparedStatement.executeLargeBatch(); + */ + @Ignore("This is the big and SLOW test") + @Test + public void testExecuteLargeBatchStatementBIG() throws Exception { + con.setAutoCommit(false); + try (PreparedStatement stmt = con.prepareStatement("insert into largetable " + + "select true from generate_series(?, ?)")) { + stmt.setInt(1, 1); + stmt.setInt(2, 200); + stmt.addBatch(); // statement one + stmt.setInt(1, 1); + stmt.setLong(2, 3_000_000_000L); + stmt.addBatch(); // statement two + stmt.setInt(1, 1); + stmt.setInt(2, 50); + stmt.addBatch(); // statement three + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("Large rows inserted via 3 batch", new long[]{200L, 3_000_000_000L, 50L}, actual); + } + con.setAutoCommit(true); + } + + /* + * Test simple Statement.executeLargeBatch(); + */ + @Test + public void testExecuteLargeBatchStatementSMALL() throws Exception { + try (Statement stmt = con.createStatement()) { + stmt.addBatch("insert into largetable(a) select true"); // statement one + stmt.addBatch("insert into largetable select false"); // statement two + stmt.addBatch("insert into largetable(a) values(true)"); // statement three + stmt.addBatch("insert into largetable values(false)"); // statement four + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("Rows inserted via 4 batch", new long[]{1L, 1L, 1L, 1L}, actual); + } + } + + /* + * Test simple PreparedStatement.executeLargeBatch(); + */ + @Test + public void testExecuteLargePreparedStatementStatementSMALL() throws Exception { + try (PreparedStatement stmt = con.prepareStatement("insert into largetable " + + "select true from generate_series(?, ?)")) { + stmt.setInt(1, 1); + stmt.setInt(2, 200); + stmt.addBatch(); // statement one + stmt.setInt(1, 1); + stmt.setInt(2, 100); + stmt.addBatch(); // statement two + stmt.setInt(1, 1); + stmt.setInt(2, 50); + stmt.addBatch(); // statement three + stmt.addBatch(); // statement four, same parms as three + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("Rows inserted via 4 batch", new long[]{200L, 100L, 50L, 50L}, actual); + } + } + + /* + * Test loop PreparedStatement.executeLargeBatch(); + */ + @Test + public void testExecuteLargePreparedStatementStatementLoopSMALL() throws Exception { + long[] loop = {200, 100, 50, 300, 20, 60, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; + try (PreparedStatement stmt = con.prepareStatement("insert into largetable " + + "select true from generate_series(?, ?)")) { + for (long i : loop) { + stmt.setInt(1, 1); + stmt.setLong(2, i); + stmt.addBatch(); + } + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("Rows inserted via batch", loop, actual); + } + } + + /* + * Test loop PreparedStatement.executeLargeBatch(); + */ + @Test + public void testExecuteLargeBatchValuesInsertSMALL() throws Exception { + boolean[] loop = {true, false, true, false, false, false, true, true, true, true, false, true}; + try (PreparedStatement stmt = con.prepareStatement("insert into largetable values(?)")) { + for (boolean i : loop) { + stmt.setBoolean(1, i); + stmt.addBatch(); + } + long[] actual = stmt.executeLargeBatch(); + Assert.assertEquals("Rows inserted via batch", loop.length, actual.length); + for (long i : actual) { + if (insertRewrite) { + Assert.assertEquals(Statement.SUCCESS_NO_INFO, i); + } else { + Assert.assertEquals(1, i); + } + } + } + } + + /* + * Test null PreparedStatement.executeLargeBatch(); + */ + @Test + public void testNullExecuteLargeBatchStatement() throws Exception { + try (Statement stmt = con.createStatement()) { + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("addBatch() not called batchStatements is null", new long[0], actual); + } + } + + /* + * Test empty PreparedStatement.executeLargeBatch(); + */ + @Test + public void testEmptyExecuteLargeBatchStatement() throws Exception { + try (Statement stmt = con.createStatement()) { + stmt.addBatch(""); + stmt.clearBatch(); + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("clearBatch() called, batchStatements.isEmpty()", new long[0], actual); + } + } + + /* + * Test null PreparedStatement.executeLargeBatch(); + */ + @Test + public void testNullExecuteLargeBatchPreparedStatement() throws Exception { + try (PreparedStatement stmt = con.prepareStatement("")) { + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("addBatch() not called batchStatements is null", new long[0], actual); + } + } + + /* + * Test empty PreparedStatement.executeLargeBatch(); + */ + @Test + public void testEmptyExecuteLargeBatchPreparedStatement() throws Exception { + try (PreparedStatement stmt = con.prepareStatement("")) { + stmt.addBatch(); + stmt.clearBatch(); + long[] actual = stmt.executeLargeBatch(); + Assert.assertArrayEquals("clearBatch() called, batchStatements.isEmpty()", new long[0], actual); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatement64KBindsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatement64KBindsTest.java new file mode 100644 index 0000000..8f3fb3a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatement64KBindsTest.java @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2022, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import org.postgresql.PGProperty; +import org.postgresql.jdbc.PreferQueryMode; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.PSQLState; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Array; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Properties; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +@RunWith(Parameterized.class) +public class PreparedStatement64KBindsTest extends BaseTest4 { + private final int numBinds; + private final PreferQueryMode preferQueryMode; + private final BinaryMode binaryMode; + + public PreparedStatement64KBindsTest(int numBinds, PreferQueryMode preferQueryMode, + BinaryMode binaryMode) { + this.numBinds = numBinds; + this.preferQueryMode = preferQueryMode; + this.binaryMode = binaryMode; + } + + @Parameterized.Parameters(name = "numBinds={0}, preferQueryMode={1}, binaryMode={2}}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (PreferQueryMode preferQueryMode : PreferQueryMode.values()) { + for (BinaryMode binaryMode : BinaryMode.values()) { + for (int numBinds : new int[]{32766, 32767, 32768, 65534, 65535, 65536}) { + ids.add(new Object[]{numBinds, preferQueryMode, binaryMode}); + } + } + } + return ids; + } + + @Override + protected void updateProperties(Properties props) { + super.updateProperties(props); + PGProperty.PREFER_QUERY_MODE.set(props, preferQueryMode.value()); + setBinaryMode(binaryMode); + } + + @Test + public void executeWith65535BindsWorks() throws SQLException { + String sql = Collections.nCopies(numBinds, "?").stream() + .collect(Collectors.joining(",", "select ARRAY[", "]")); + + try (PreparedStatement ps = con.prepareStatement(sql)) { + for (int i = 1; i <= numBinds; i++) { + ps.setString(i, "v" + i); + } + String expected = Arrays.toString( + IntStream.rangeClosed(1, numBinds) + .mapToObj(i -> "v" + i).toArray() + ); + + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + Array res = rs.getArray(1); + Object[] elements = (Object[]) res.getArray(); + String actual = Arrays.toString(elements); + + if (preferQueryMode == PreferQueryMode.SIMPLE || numBinds <= 65535) { + Assert.assertEquals("SELECT query with " + numBinds + " should work", actual, expected); + } else { + Assert.fail("con.prepareStatement(..." + numBinds + " binds) should fail since the wire protocol allows only 65535 parameters"); + } + } + } catch (SQLException e) { + if (preferQueryMode != PreferQueryMode.SIMPLE && numBinds > 65535) { + Assert.assertEquals( + "con.prepareStatement(..." + numBinds + " binds) should fail since the wire protocol allows only 65535 parameters. SQL State is ", + PSQLState.INVALID_PARAMETER_VALUE.getState(), + e.getSQLState() + ); + } else { + throw e; + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatementTest.java new file mode 100644 index 0000000..d31e439 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatementTest.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Assert; +import org.junit.Test; + +import java.math.BigDecimal; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.time.LocalTime; +import java.util.Properties; + +public class PreparedStatementTest extends BaseTest4 { + protected void updateProperties(Properties props) { + PGProperty.PREFER_QUERY_MODE.set(props, "simple"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.createTable(con, "timestamptztable", "tstz timestamptz"); + TestUtil.createTable(con, "timetztable", "ttz timetz"); + TestUtil.createTable(con, "timetable", "id serial, tt time"); + } + + @Override + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "timestamptztable"); + TestUtil.dropTable(con, "timetztable"); + TestUtil.dropTable(con, "timetable"); + super.tearDown(); + } + + @Test + public void testSetNumber() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("SELECT ? * 2"); + + pstmt.setBigDecimal(1, new BigDecimal("1.6")); + ResultSet rs = pstmt.executeQuery(); + rs.next(); + BigDecimal d = rs.getBigDecimal(1); + pstmt.close(); + + Assert.assertEquals(new BigDecimal("3.2"), d); + } + + @Test + public void testSetBoolean() throws SQLException { + try (PreparedStatement ps = con.prepareStatement("select false union select (select ?)")) { + ps.setBoolean(1, true); + + try (ResultSet rs = ps.executeQuery()) { + assert (rs.next()); + rs.getBoolean(1); + } + } + } + + @Test + public void testTimestampTzSetNull() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("INSERT INTO timestamptztable (tstz) VALUES (?)"); + + // valid: fully qualified type to setNull() + pstmt.setNull(1, Types.TIMESTAMP_WITH_TIMEZONE); + pstmt.executeUpdate(); + + // valid: fully qualified type to setObject() + pstmt.setObject(1, null, Types.TIMESTAMP_WITH_TIMEZONE); + pstmt.executeUpdate(); + + pstmt.close(); + } + + @Test + public void testTimeTzSetNull() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("INSERT INTO timetztable (ttz) VALUES (?)"); + + // valid: fully qualified type to setNull() + pstmt.setNull(1, Types.TIME_WITH_TIMEZONE); + pstmt.executeUpdate(); + + // valid: fully qualified type to setObject() + pstmt.setObject(1, null, Types.TIME_WITH_TIMEZONE); + pstmt.executeUpdate(); + + pstmt.close(); + } + + @Test + public void testLocalTimeMax() throws SQLException { + PreparedStatement pstmt = con.prepareStatement("INSERT INTO timetable (tt) VALUES (?)"); + + pstmt.setObject(1, LocalTime.MAX); + pstmt.executeUpdate(); + + pstmt.setObject(1, LocalTime.MIN); + pstmt.executeUpdate(); + + ResultSet rs = con.createStatement().executeQuery("select tt from timetable order by id asc"); + Assert.assertTrue(rs.next()); + LocalTime localTime = (LocalTime) rs.getObject(1, LocalTime.class); + Assert.assertEquals(LocalTime.MAX, localTime); + + Assert.assertTrue(rs.next()); + localTime = (LocalTime) rs.getObject(1, LocalTime.class); + Assert.assertEquals(LocalTime.MIN, localTime); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310InfinityTests.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310InfinityTests.java new file mode 100644 index 0000000..86457aa --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310InfinityTests.java @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import org.postgresql.core.ServerVersion; +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.After; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class SetObject310InfinityTests extends BaseTest4 { + + public SetObject310InfinityTests(BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(2); + for (BaseTest4.BinaryMode binaryMode : BaseTest4.BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + Assume.assumeTrue("PostgreSQL 8.3 does not support 'infinity' for 'date'", + TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)); + super.setUp(); + TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone," + + "timestamp_with_time_zone_column timestamp with time zone," + + "date_column date" + ); + } + + @After + public void tearDown() throws SQLException { + TestUtil.dropTable(con, "table1"); + super.tearDown(); + } + + @Test + public void testTimestamptz() throws SQLException { + runTestforType(OffsetDateTime.MAX, OffsetDateTime.MIN, "timestamp_without_time_zone_column", null); + } + + @Test + public void testTimestamp() throws SQLException { + runTestforType(LocalDateTime.MAX, LocalDateTime.MIN, "timestamp_without_time_zone_column", null); + } + + @Test + public void testDate() throws SQLException { + runTestforType(LocalDate.MAX, LocalDate.MIN, "date_column", null); + } + + private void runTestforType(Object max, Object min, String columnName, Integer type) throws SQLException { + insert(max, columnName, type); + String readback = readString(columnName); + assertEquals("infinity", readback); + delete(); + + insert(min, columnName, type); + readback = readString(columnName); + assertEquals("-infinity", readback); + delete(); + } + + private void insert(Object data, String columnName, Integer type) throws SQLException { + PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?")); + try { + if (type != null) { + ps.setObject(1, data, type); + } else { + ps.setObject(1, data); + } + assertEquals(1, ps.executeUpdate()); + } finally { + ps.close(); + } + } + + private String readString(String columnName) throws SQLException { + Statement st = con.createStatement(); + try { + ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName)); + try { + assertNotNull(rs); + assertTrue(rs.next()); + return rs.getString(1); + } finally { + rs.close(); + } + } finally { + st.close(); + } + } + + private void delete() throws SQLException { + Statement st = con.createStatement(); + try { + st.execute("DELETE FROM table1"); + } finally { + st.close(); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310Test.java new file mode 100644 index 0000000..a39057f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310Test.java @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.chrono.IsoChronology; +import java.time.chrono.IsoEra; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.format.ResolverStyle; +import java.time.format.SignStyle; +import java.time.temporal.ChronoField; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.TimeZone; + +@RunWith(Parameterized.class) +public class SetObject310Test extends BaseTest4 { + private static final TimeZone saveTZ = TimeZone.getDefault(); + + public static final DateTimeFormatter LOCAL_TIME_FORMATTER = + new DateTimeFormatterBuilder() + .parseCaseInsensitive() + .appendValue(ChronoField.YEAR_OF_ERA, 4, 10, SignStyle.EXCEEDS_PAD) + .appendLiteral('-') + .appendValue(ChronoField.MONTH_OF_YEAR, 2) + .appendLiteral('-') + .appendValue(ChronoField.DAY_OF_MONTH, 2) + .appendLiteral(' ') + .append(DateTimeFormatter.ISO_LOCAL_TIME) + .optionalStart() + .appendOffset("+HH:mm", "+00") + .optionalEnd() + .optionalStart() + .appendLiteral(' ') + .appendPattern("GG") + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.LENIENT) + .withChronology(IsoChronology.INSTANCE); + + public SetObject310Test(BaseTest4.BinaryMode binaryMode) { + setBinaryMode(binaryMode); + } + + @Parameterized.Parameters(name = "binary = {0}") + public static Iterable data() { + Collection ids = new ArrayList<>(); + for (BaseTest4.BinaryMode binaryMode : BaseTest4.BinaryMode.values()) { + ids.add(new Object[]{binaryMode}); + } + return ids; + } + + @BeforeClass + public static void createTables() throws Exception { + try (Connection con = TestUtil.openDB()) { + TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone," + + "timestamp_with_time_zone_column timestamp with time zone," + + "date_column date," + + "time_without_time_zone_column time without time zone," + + "time_with_time_zone_column time with time zone" + ); + } + } + + @AfterClass + public static void dropTables() throws Exception { + try (Connection con = TestUtil.openDB()) { + TestUtil.dropTable(con, "table1"); + } + TimeZone.setDefault(saveTZ); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + TestUtil.execute(con, "delete from table1"); + } + + private void insert(Object data, String columnName, Integer type) throws SQLException { + PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?")); + try { + if (type != null) { + ps.setObject(1, data, type); + } else { + ps.setObject(1, data); + } + assertEquals(1, ps.executeUpdate()); + } finally { + ps.close(); + } + } + + private String readString(String columnName) throws SQLException { + Statement st = con.createStatement(); + try { + ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName)); + try { + assertNotNull(rs); + assertTrue(rs.next()); + return rs.getString(1); + } finally { + rs.close(); + } + } finally { + st.close(); + } + } + + private String insertThenReadStringWithoutType(LocalDateTime data, String columnName) throws SQLException { + insert(data, columnName, null); + return readString(columnName); + } + + private String insertThenReadStringWithType(LocalDateTime data, String columnName) throws SQLException { + insert(data, columnName, Types.TIMESTAMP); + return readString(columnName); + } + + private void insertWithoutType(Object data, String columnName) throws SQLException { + insert(data, columnName, null); + } + + private T insertThenReadWithoutType(Object data, String columnName, Class expectedType) throws SQLException { + return insertThenReadWithoutType(data, columnName, expectedType, true); + } + + private T insertThenReadWithoutType(Object data, String columnName, Class expectedType, boolean checkRoundtrip) throws SQLException { + PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?")); + try { + ps.setObject(1, data); + assertEquals(1, ps.executeUpdate()); + } finally { + ps.close(); + } + + Statement st = con.createStatement(); + try { + ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName)); + try { + assertNotNull(rs); + + assertTrue(rs.next()); + if (checkRoundtrip) { + assertEquals("Roundtrip set/getObject with type should return same result", + data, rs.getObject(1, data.getClass())); + } + return expectedType.cast(rs.getObject(1)); + } finally { + rs.close(); + } + } finally { + st.close(); + } + } + + private T insertThenReadWithType(Object data, int sqlType, String columnName, Class expectedType) throws SQLException { + return insertThenReadWithType(data, sqlType, columnName, expectedType, true); + } + + private T insertThenReadWithType(Object data, int sqlType, String columnName, Class expectedType, boolean checkRoundtrip) throws SQLException { + PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?")); + try { + ps.setObject(1, data, sqlType); + assertEquals(1, ps.executeUpdate()); + } finally { + ps.close(); + } + + Statement st = con.createStatement(); + try { + ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName)); + try { + assertNotNull(rs); + + assertTrue(rs.next()); + if (checkRoundtrip) { + assertEquals("Roundtrip set/getObject with type should return same result", + data, rs.getObject(1, data.getClass())); + } + return expectedType.cast(rs.getObject(1)); + } finally { + rs.close(); + } + } finally { + st.close(); + } + } + + private void deleteRows() throws SQLException { + Statement st = con.createStatement(); + try { + st.executeUpdate("DELETE FROM table1"); + } finally { + st.close(); + } + } + + /** + * Test the behavior of setObject for timestamp columns. + */ + @Test + public void testSetLocalDateTime() throws SQLException { + List zoneIdsToTest = getZoneIdsToTest(); + List datesToTest = getDatesToTest(); + + for (String zoneId : zoneIdsToTest) { + ZoneId zone = ZoneId.of(zoneId); + for (String date : datesToTest) { + LocalDateTime localDateTime = LocalDateTime.parse(date); + String expected = date.replace('T', ' '); + localTimestamps(zone, localDateTime, expected); + } + } + } + + /** + * Test the behavior of setObject for timestamp columns. + */ + @Test + public void testSetOffsetDateTime() throws SQLException { + List zoneIdsToTest = getZoneIdsToTest(); + List storeZones = new ArrayList<>(); + for (String zoneId : zoneIdsToTest) { + storeZones.add(TimeZone.getTimeZone(zoneId)); + } + List datesToTest = getDatesToTest(); + + for (TimeZone timeZone : storeZones) { + ZoneId zoneId = timeZone.toZoneId(); + for (String date : datesToTest) { + LocalDateTime localDateTime = LocalDateTime.parse(date); + String expected = date.replace('T', ' '); + offsetTimestamps(zoneId, localDateTime, expected, storeZones); + } + } + } + + private List getDatesToTest() { + return Arrays.asList("2015-09-03T12:00:00", "2015-06-30T23:59:58", + "1997-06-30T23:59:59", "1997-07-01T00:00:00", "2012-06-30T23:59:59", "2012-07-01T00:00:00", + "2015-06-30T23:59:59", "2015-07-01T00:00:00", "2005-12-31T23:59:59", "2006-01-01T00:00:00", + "2008-12-31T23:59:59", "2009-01-01T00:00:00", /* "2015-06-30T23:59:60", */ "2015-07-31T00:00:00", + "2015-07-31T00:00:01", "2015-07-31T00:00:00.000001", + + // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00 + "2000-03-26T01:59:59", "2000-03-26T02:00:00", "2000-03-26T02:00:01", "2000-03-26T02:59:59", + "2000-03-26T03:00:00", "2000-03-26T03:00:01", "2000-03-26T03:59:59", "2000-03-26T04:00:00", + "2000-03-26T04:00:01", "2000-03-26T04:00:00.000001", + + // This is a pre-1970 date, so check if it is rounded properly + "1950-07-20T02:00:00", + + // Ensure the calendar is proleptic + "1582-09-30T00:00:00", "1582-10-16T00:00:00", + + // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00 + "2000-10-29T01:59:59", "2000-10-29T02:00:00", "2000-10-29T02:00:01", "2000-10-29T02:59:59", + "2000-10-29T03:00:00", "2000-10-29T03:00:01", "2000-10-29T03:59:59", "2000-10-29T04:00:00", + "2000-10-29T04:00:01", "2000-10-29T04:00:00.000001"); + } + + private List getZoneIdsToTest() { + List zoneIdsToTest = new ArrayList<>(); + zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1 + zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9 + zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0 + zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s + zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14 + zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11 + for (int i = -12; i <= 13; i++) { + zoneIdsToTest.add(String.format("GMT%+02d", i)); + } + return zoneIdsToTest; + } + + private void localTimestamps(ZoneId zoneId, LocalDateTime localDateTime, String expected) throws SQLException { + TimeZone.setDefault(TimeZone.getTimeZone(zoneId)); + String readBack = insertThenReadStringWithoutType(localDateTime, "timestamp_without_time_zone_column"); + assertEquals( + "LocalDateTime=" + localDateTime + ", with TimeZone.default=" + zoneId + ", setObject(int, Object)", + expected, readBack); + deleteRows(); + + readBack = insertThenReadStringWithType(localDateTime, "timestamp_without_time_zone_column"); + assertEquals( + "LocalDateTime=" + localDateTime + ", with TimeZone.default=" + zoneId + ", setObject(int, Object, TIMESTAMP)", + expected, readBack); + deleteRows(); + } + + private void offsetTimestamps(ZoneId dataZone, LocalDateTime localDateTime, String expected, List storeZones) throws SQLException { + OffsetDateTime data = localDateTime.atZone(dataZone).toOffsetDateTime(); + try (PreparedStatement ps = con.prepareStatement( + "select ?::timestamp with time zone, ?::timestamp with time zone")) { + for (TimeZone storeZone : storeZones) { + TimeZone.setDefault(storeZone); + ps.setObject(1, data); + ps.setObject(2, data, Types.TIMESTAMP_WITH_TIMEZONE); + try (ResultSet rs = ps.executeQuery()) { + rs.next(); + String noType = rs.getString(1); + OffsetDateTime noTypeRes = parseBackendTimestamp(noType); + assertEquals( + "OffsetDateTime=" + data + " (with ZoneId=" + dataZone + "), with TimeZone.default=" + + storeZone + ", setObject(int, Object)", data.toInstant(), + noTypeRes.toInstant()); + String withType = rs.getString(2); + OffsetDateTime withTypeRes = parseBackendTimestamp(withType); + assertEquals( + "OffsetDateTime=" + data + " (with ZoneId=" + dataZone + "), with TimeZone.default=" + + storeZone + ", setObject(int, Object, TIMESTAMP_WITH_TIMEZONE)", + data.toInstant(), withTypeRes.toInstant()); + } + } + } + } + + /** + * Sometimes backend responds like {@code 1950-07-20 16:20:00+03} and sometimes it responds like + * {@code 1582-09-30 13:49:57+02:30:17}, so we need to handle cases when "offset minutes" is missing. + */ + private static OffsetDateTime parseBackendTimestamp(String backendTimestamp) { + String isoTimestamp = backendTimestamp.replace(' ', 'T'); + // If the pattern already has trailing :XX we are fine + // Otherwise add :00 for timezone offset minutes + if (isoTimestamp.charAt(isoTimestamp.length() - 3) != ':') { + isoTimestamp += ":00"; + } + return OffsetDateTime.parse(isoTimestamp); + } + + @Test + public void testLocalDateTimeRounding() throws SQLException { + LocalDateTime dateTime = LocalDateTime.parse("2018-12-31T23:59:59.999999500"); + localTimestamps(ZoneOffset.UTC, dateTime, "2019-01-01 00:00:00"); + } + + @Test + public void testTimeStampRounding() throws SQLException { + // TODO: fix for binary + assumeBinaryModeRegular(); + LocalTime time = LocalTime.parse("23:59:59.999999500"); + Time actual = insertThenReadWithoutType(time, "time_without_time_zone_column", Time.class, false/*no roundtrip*/); + assertEquals(Time.valueOf("24:00:00"), actual); + } + + @Test + public void testTimeStampRoundingWithType() throws SQLException { + // TODO: fix for binary + assumeBinaryModeRegular(); + LocalTime time = LocalTime.parse("23:59:59.999999500"); + Time actual = + insertThenReadWithType(time, Types.TIME, "time_without_time_zone_column", Time.class, false/*no roundtrip*/); + assertEquals(Time.valueOf("24:00:00"), actual); + } + + /** + * Test the behavior of setObject for timestamp columns. + */ + @Test + public void testSetLocalDateTimeBc() throws SQLException { + assumeTrue(TestUtil.haveIntegerDateTimes(con)); + + // use BC for funsies + List bcDates = new ArrayList<>(); + bcDates.add(LocalDateTime.parse("1997-06-30T23:59:59.999999").with(ChronoField.ERA, IsoEra.BCE.getValue())); + bcDates.add(LocalDateTime.parse("0997-06-30T23:59:59.999999").with(ChronoField.ERA, IsoEra.BCE.getValue())); + + for (LocalDateTime bcDate : bcDates) { + String expected = LOCAL_TIME_FORMATTER.format(bcDate); + if (expected.endsWith(" BCE")) { + // Java 22.ea.25-open prints "BCE" even though previous releases printed "BC" + // See https://bugs.openjdk.org/browse/JDK-8320747 + expected = expected.substring(0, expected.length() - 1); + } + localTimestamps(ZoneOffset.UTC, bcDate, expected); + } + } + + /** + * Test the behavior setObject for date columns. + */ + @Test + public void testSetLocalDateWithType() throws SQLException { + LocalDate data = LocalDate.parse("1971-12-15"); + java.sql.Date actual = insertThenReadWithType(data, Types.DATE, "date_column", java.sql.Date.class); + java.sql.Date expected = java.sql.Date.valueOf("1971-12-15"); + assertEquals(expected, actual); + } + + /** + * Test the behavior setObject for date columns. + */ + @Test + public void testSetLocalDateWithoutType() throws SQLException { + LocalDate data = LocalDate.parse("1971-12-15"); + java.sql.Date actual = insertThenReadWithoutType(data, "date_column", java.sql.Date.class); + java.sql.Date expected = java.sql.Date.valueOf("1971-12-15"); + assertEquals(expected, actual); + } + + /** + * Test the behavior setObject for time columns. + */ + @Test + public void testSetLocalTimeAndReadBack() throws SQLException { + // TODO: fix for binary mode. + // Avoid micros truncation in org.postgresql.jdbc.PgResultSet#internalGetObject + assumeBinaryModeRegular(); + LocalTime data = LocalTime.parse("16:21:51.123456"); + + insertWithoutType(data, "time_without_time_zone_column"); + + String readBack = readString("time_without_time_zone_column"); + assertEquals("16:21:51.123456", readBack); + } + + /** + * Test the behavior setObject for time columns. + */ + @Test + public void testSetLocalTimeWithType() throws SQLException { + LocalTime data = LocalTime.parse("16:21:51"); + Time actual = insertThenReadWithType(data, Types.TIME, "time_without_time_zone_column", Time.class); + Time expected = Time.valueOf("16:21:51"); + assertEquals(expected, actual); + } + + /** + * Test the behavior setObject for time columns. + */ + @Test + public void testSetLocalTimeWithoutType() throws SQLException { + LocalTime data = LocalTime.parse("16:21:51"); + Time actual = insertThenReadWithoutType(data, "time_without_time_zone_column", Time.class); + Time expected = Time.valueOf("16:21:51"); + assertEquals(expected, actual); + } + + /** + * Test the behavior setObject for time columns. + */ + @Test + public void testSetOffsetTimeWithType() throws SQLException { + OffsetTime data = OffsetTime.parse("16:21:51+12:34"); + insertThenReadWithType(data, Types.TIME, "time_with_time_zone_column", Time.class); + } + + /** + * Test the behavior setObject for time columns. + */ + @Test + public void testSetOffsetTimeWithoutType() throws SQLException { + OffsetTime data = OffsetTime.parse("16:21:51+12:34"); + insertThenReadWithoutType(data, "time_with_time_zone_column", Time.class); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SimpleJdbc42Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SimpleJdbc42Test.java new file mode 100644 index 0000000..d765b6f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SimpleJdbc42Test.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.Assert.assertTrue; + +import org.postgresql.test.jdbc2.BaseTest4; + +import org.junit.Test; + +/** + * Most basic test to check that the right package is compiled. + */ +public class SimpleJdbc42Test extends BaseTest4 { + + /** + * Test presence of JDBC 4.2 specific methods. + */ + @Test + public void testSupportsRefCursors() throws Exception { + assertTrue(con.getMetaData().supportsRefCursors()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/TimestampUtilsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/TimestampUtilsTest.java new file mode 100644 index 0000000..14fb14f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/TimestampUtilsTest.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jdbc42; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.jdbc.TimestampUtils; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; +import java.time.LocalTime; +import java.time.OffsetTime; +import java.util.TimeZone; + +class TimestampUtilsTest { + private TimestampUtils timestampUtils; + + @BeforeEach + void setUp() { + timestampUtils = new TimestampUtils(true, TimeZone::getDefault); + } + + @Test + void toStringOfLocalTime() { + assertToStringOfLocalTime("00:00:00"); + assertToStringOfLocalTime("00:00:00.1"); + assertToStringOfLocalTime("00:00:00.12"); + assertToStringOfLocalTime("00:00:00.123"); + assertToStringOfLocalTime("00:00:00.1234"); + assertToStringOfLocalTime("00:00:00.12345"); + assertToStringOfLocalTime("00:00:00.123456"); + + assertToStringOfLocalTime("00:00:00.999999"); + assertToStringOfLocalTime("00:00:00.999999", "00:00:00.999999499", "499 NanoSeconds round down"); + assertToStringOfLocalTime("00:00:01", "00:00:00.999999500", "500 NanoSeconds round up"); + + assertToStringOfLocalTime("23:59:59"); + + assertToStringOfLocalTime("23:59:59.999999"); + assertToStringOfLocalTime("23:59:59.999999", "23:59:59.999999499", "499 NanoSeconds round down"); + assertToStringOfLocalTime("24:00:00", "23:59:59.999999500", "500 NanoSeconds round up"); + assertToStringOfLocalTime("24:00:00", "23:59:59.999999999", "999 NanoSeconds round up"); + } + + private void assertToStringOfLocalTime(String inputTime) { + assertToStringOfLocalTime(inputTime, inputTime, null); + } + + private void assertToStringOfLocalTime(String expectedOutput, String inputTime, String message) { + assertEquals( + expectedOutput, + timestampUtils.toString(LocalTime.parse(inputTime)), + "timestampUtils.toString(LocalTime.parse(" + inputTime + "))" + + (message == null ? ": " + message : "")); + } + + @Test + void toLocalTime() throws SQLException { + assertToLocalTime("00:00:00"); + + assertToLocalTime("00:00:00.1"); + assertToLocalTime("00:00:00.12"); + assertToLocalTime("00:00:00.123"); + assertToLocalTime("00:00:00.1234"); + assertToLocalTime("00:00:00.12345"); + assertToLocalTime("00:00:00.123456"); + assertToLocalTime("00:00:00.999999"); + + assertToLocalTime("23:59:59"); + assertToLocalTime("23:59:59.999999"); // 0 NanoSeconds + assertToLocalTime("23:59:59.9999999"); // 900 NanoSeconds + assertToLocalTime("23:59:59.99999999"); // 990 NanoSeconds + assertToLocalTime("23:59:59.999999998"); // 998 NanoSeconds + assertToLocalTime(LocalTime.MAX.toString(), "24:00:00", "LocalTime can't represent 24:00:00"); + } + + private void assertToLocalTime(String inputTime) throws SQLException { + assertToLocalTime(inputTime, inputTime, null); + } + + private void assertToLocalTime(String expectedOutput, String inputTime, String message) throws SQLException { + assertEquals( + LocalTime.parse(expectedOutput), + timestampUtils.toLocalTime(inputTime), + "timestampUtils.toLocalTime(" + inputTime + ")" + + (message == null ? ": " + message : "")); + } + + @Test + void toStringOfOffsetTime() { + assertToStringOfOffsetTime("00:00:00+00", "00:00:00+00:00"); + assertToStringOfOffsetTime("00:00:00.1+01", "00:00:00.1+01:00"); + assertToStringOfOffsetTime("00:00:00.12+12", "00:00:00.12+12:00"); + assertToStringOfOffsetTime("00:00:00.123-01", "00:00:00.123-01:00"); + assertToStringOfOffsetTime("00:00:00.1234-02", "00:00:00.1234-02:00"); + assertToStringOfOffsetTime("00:00:00.12345-12", "00:00:00.12345-12:00"); + assertToStringOfOffsetTime("00:00:00.123456+01:30", "00:00:00.123456+01:30"); + assertToStringOfOffsetTime("00:00:00.123456-12:34", "00:00:00.123456-12:34"); + + assertToStringOfOffsetTime("23:59:59+01", "23:59:59+01:00"); + + assertToStringOfOffsetTime("23:59:59.999999+01", "23:59:59.999999+01:00"); + assertToStringOfOffsetTime("23:59:59.999999+01", "23:59:59.999999499+01:00"); // 499 NanoSeconds + assertToStringOfOffsetTime("24:00:00+01", "23:59:59.999999500+01:00"); // 500 NanoSeconds + assertToStringOfOffsetTime("24:00:00+01", "23:59:59.999999999+01:00"); // 999 NanoSeconds + } + + private void assertToStringOfOffsetTime(String expectedOutput, String inputTime) { + assertEquals(expectedOutput, + timestampUtils.toString(OffsetTime.parse(inputTime)), + "timestampUtils.toString(OffsetTime.parse(" + inputTime + "))"); + } + + @Test + void toOffsetTime() throws SQLException { + assertToOffsetTime("00:00:00+00:00", "00:00:00+00"); + assertToOffsetTime("00:00:00.1+01:00", "00:00:00.1+01"); + assertToOffsetTime("00:00:00.12+12:00", "00:00:00.12+12"); + assertToOffsetTime("00:00:00.123-01:00", "00:00:00.123-01"); + assertToOffsetTime("00:00:00.1234-02:00", "00:00:00.1234-02"); + assertToOffsetTime("00:00:00.12345-12:00", "00:00:00.12345-12"); + assertToOffsetTime("00:00:00.123456+01:30", "00:00:00.123456+01:30"); + assertToOffsetTime("00:00:00.123456-12:34", "00:00:00.123456-12:34"); + + assertToOffsetTime("23:59:59.999999+01:00", "23:59:59.999999+01"); // 0 NanoSeconds + assertToOffsetTime("23:59:59.9999999+01:00", "23:59:59.9999999+01"); // 900 NanoSeconds + assertToOffsetTime("23:59:59.99999999+01:00", "23:59:59.99999999+01"); // 990 NanoSeconds + assertToOffsetTime("23:59:59.999999998+01:00", "23:59:59.999999998+01"); // 998 NanoSeconds + assertToOffsetTime(OffsetTime.MAX.toString(), "24:00:00+01"); + } + + private void assertToOffsetTime(String expectedOutput, String inputTime) throws SQLException { + assertEquals(OffsetTime.parse(expectedOutput), + timestampUtils.toOffsetTime(inputTime), + "timestampUtils.toOffsetTime(" + inputTime + ")"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jre8/core/Jre8TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/Jre8TestSuite.java new file mode 100644 index 0000000..6c971a0 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/Jre8TestSuite.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jre8.core; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/** + * @author Joe Kutner on 10/24/17. + * Twitter: @codefinger + */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + SocksProxyTest.class, +}) +public class Jre8TestSuite { +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/jre8/core/SocksProxyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/SocksProxyTest.java new file mode 100644 index 0000000..ec8e368 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/SocksProxyTest.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.jre8.core; + +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.DriverManager; + +/** + * @author Joe Kutner on 10/9/17. + * Twitter: @codefinger + */ +class SocksProxyTest { + + @AfterEach + void cleanup() { + System.clearProperty("socksProxyHost"); + System.clearProperty("socksProxyPort"); + System.clearProperty("socksNonProxyHosts"); + } + + /** + * Tests the connect method by connecting to the test database. + */ + @Test + void connectWithSocksNonProxyHost() throws Exception { + System.setProperty("socksProxyHost", "fake-socks-proxy"); + System.setProperty("socksProxyPort", "9999"); + System.setProperty("socksNonProxyHosts", TestUtil.getServer()); + + TestUtil.initDriver(); // Set up log levels, etc. + + Connection con = + DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword()); + + assertNotNull(con); + con.close(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/osgi/OsgiTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/osgi/OsgiTestSuite.java new file mode 100644 index 0000000..7402247 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/osgi/OsgiTestSuite.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.osgi; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; + +@RunWith(Suite.class) +@SuiteClasses({ + PGDataSourceFactoryTest.class, +}) +public class OsgiTestSuite { + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/osgi/PGDataSourceFactoryTest.java b/pgjdbc/src/test/java/org/postgresql/test/osgi/PGDataSourceFactoryTest.java new file mode 100644 index 0000000..d1d00f1 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/osgi/PGDataSourceFactoryTest.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2003, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.osgi; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.jdbc2.optional.ConnectionPool; +import org.postgresql.jdbc2.optional.PoolingDataSource; +import org.postgresql.jdbc2.optional.SimpleDataSource; +import org.postgresql.osgi.PGDataSourceFactory; +import org.postgresql.xa.PGXADataSource; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.osgi.service.jdbc.DataSourceFactory; + +import java.sql.Driver; +import java.util.Properties; + +import javax.sql.ConnectionPoolDataSource; +import javax.sql.DataSource; +import javax.sql.XADataSource; + +class PGDataSourceFactoryTest { + + private DataSourceFactory dataSourceFactory; + + @BeforeEach + void createFactory() { + dataSourceFactory = new PGDataSourceFactory(); + } + + @Test + void createDriverDefault() throws Exception { + Driver driver = dataSourceFactory.createDriver(null); + assertTrue(driver instanceof org.postgresql.Driver); + } + + @Test + void createDataSourceDefault() throws Exception { + DataSource dataSource = dataSourceFactory.createDataSource(null); + assertNotNull(dataSource); + } + + @Test + void createDataSourceSimple() throws Exception { + Properties properties = new Properties(); + properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db"); + properties.put("currentSchema", "schema"); + DataSource dataSource = dataSourceFactory.createDataSource(properties); + assertNotNull(dataSource); + assertTrue(dataSource instanceof SimpleDataSource); + SimpleDataSource simpleDataSource = (SimpleDataSource) dataSource; + assertEquals("db", simpleDataSource.getDatabaseName()); + assertEquals("schema", simpleDataSource.getCurrentSchema()); + } + + @Test + void createDataSourcePooling() throws Exception { + Properties properties = new Properties(); + properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db"); + properties.put(DataSourceFactory.JDBC_INITIAL_POOL_SIZE, "5"); + properties.put(DataSourceFactory.JDBC_MAX_POOL_SIZE, "10"); + DataSource dataSource = dataSourceFactory.createDataSource(properties); + assertNotNull(dataSource); + assertTrue(dataSource instanceof PoolingDataSource); + PoolingDataSource poolingDataSource = (PoolingDataSource) dataSource; + assertEquals("db", poolingDataSource.getDatabaseName()); + assertEquals(5, poolingDataSource.getInitialConnections()); + assertEquals(10, poolingDataSource.getMaxConnections()); + } + + @Test + void createConnectionPoolDataSourceDefault() throws Exception { + ConnectionPoolDataSource dataSource = dataSourceFactory.createConnectionPoolDataSource(null); + assertNotNull(dataSource); + } + + @Test + void createConnectionPoolDataSourceConfigured() throws Exception { + Properties properties = new Properties(); + properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db"); + ConnectionPoolDataSource dataSource = + dataSourceFactory.createConnectionPoolDataSource(properties); + assertNotNull(dataSource); + assertTrue(dataSource instanceof ConnectionPool); + ConnectionPool connectionPoolDataSource = (ConnectionPool) dataSource; + assertEquals("db", connectionPoolDataSource.getDatabaseName()); + } + + @Test + void createXADataSourceDefault() throws Exception { + XADataSource dataSource = dataSourceFactory.createXADataSource(null); + assertNotNull(dataSource); + } + + @Test + void createXADataSourceConfigured() throws Exception { + Properties properties = new Properties(); + properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db"); + XADataSource dataSource = dataSourceFactory.createXADataSource(properties); + assertNotNull(dataSource); + assertTrue(dataSource instanceof PGXADataSource); + PGXADataSource xaDataSource = (PGXADataSource) dataSource; + assertEquals("db", xaDataSource.getDatabaseName()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/plugin/AuthenticationPluginTest.java b/pgjdbc/src/test/java/org/postgresql/test/plugin/AuthenticationPluginTest.java new file mode 100644 index 0000000..a25a9e4 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/plugin/AuthenticationPluginTest.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.plugin; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGProperty; +import org.postgresql.core.ServerVersion; +import org.postgresql.plugin.AuthenticationPlugin; +import org.postgresql.plugin.AuthenticationRequestType; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLException; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; +import java.util.function.Consumer; + +class AuthenticationPluginTest { + @BeforeAll + static void setUp() throws SQLException { + TestUtil.assumeHaveMinimumServerVersion(ServerVersion.v10); + } + + public static class DummyAuthenticationPlugin implements AuthenticationPlugin { + private static Consumer onGetPassword; + + @Override + public char[] getPassword(AuthenticationRequestType type) throws PSQLException { + onGetPassword.accept(type); + + // Ex: "MD5" => "DUMMY-MD5" + return ("DUMMY-" + type.toString()).toCharArray(); + } + } + + private void testAuthPlugin(String username, String passwordEncryption, AuthenticationRequestType expectedType) throws SQLException { + createRole(username, passwordEncryption, "DUMMY-" + expectedType.toString()); + try { + Properties props = new Properties(); + props.setProperty(PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getName(), DummyAuthenticationPlugin.class.getName()); + PGProperty.USER.set(props, username); + + boolean[] wasCalled = {false}; + DummyAuthenticationPlugin.onGetPassword = type -> { + wasCalled[0] = true; + assertEquals(expectedType, type, "The authentication type should match"); + }; + try (Connection conn = TestUtil.openDB(props)) { + assertTrue(wasCalled[0], "The custom authentication plugin should be invoked"); + } + } finally { + dropRole(username); + } + } + + @Test + void authPluginMD5() throws Exception { + testAuthPlugin("auth_plugin_test_md5", "md5", AuthenticationRequestType.MD5_PASSWORD); + } + + @Test + void authPluginSASL() throws Exception { + testAuthPlugin("auth_plugin_test_sasl", "scram-sha-256", AuthenticationRequestType.SASL); + } + + private static void createRole(String username, String passwordEncryption, String password) throws SQLException { + try (Connection conn = TestUtil.openPrivilegedDB()) { + TestUtil.execute(conn, "SET password_encryption='" + passwordEncryption + "'"); + TestUtil.execute(conn, "DROP ROLE IF EXISTS " + username); + TestUtil.execute(conn, "CREATE USER " + username + " WITH PASSWORD '" + password + "'"); + } + } + + private static void dropRole(String username) throws SQLException { + try (Connection conn = TestUtil.openPrivilegedDB()) { + TestUtil.execute(conn, "DROP ROLE IF EXISTS " + username); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/plugin/PluginTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/plugin/PluginTestSuite.java new file mode 100644 index 0000000..49c4ba9 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/plugin/PluginTestSuite.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.plugin; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; + +@RunWith(Suite.class) +@SuiteClasses({ + AuthenticationPluginTest.class, +}) +public class PluginTestSuite { + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/socketfactory/CustomSocketFactory.java b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/CustomSocketFactory.java new file mode 100644 index 0000000..8b6c2b9 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/CustomSocketFactory.java @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.socketfactory; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; +import java.net.UnknownHostException; + +import javax.net.SocketFactory; + +public class CustomSocketFactory extends SocketFactory { + + private static CustomSocketFactory instance; + + private final String argument; + private int socketCreated; + + public CustomSocketFactory(String argument) { + if (instance != null) { + throw new IllegalStateException("Test failed, multiple custom socket factory instantiation"); + } + instance = this; + this.argument = argument; + } + + @Override + public Socket createSocket(String arg0, int arg1) throws IOException, UnknownHostException { + throw new UnsupportedOperationException(); + } + + @Override + public Socket createSocket(InetAddress arg0, int arg1) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Socket createSocket(String arg0, int arg1, InetAddress arg2, int arg3) + throws IOException, UnknownHostException { + throw new UnsupportedOperationException(); + } + + @Override + public Socket createSocket(InetAddress arg0, int arg1, InetAddress arg2, int arg3) + throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Socket createSocket() throws IOException { + socketCreated++; + return new Socket(); + } + + public String getArgument() { + return argument; + } + + public int getSocketCreated() { + return socketCreated; + } + + public static CustomSocketFactory getInstance() { + return instance; + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/socketfactory/SocketFactoryTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/SocketFactoryTestSuite.java new file mode 100644 index 0000000..1706fce --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/SocketFactoryTestSuite.java @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.socketfactory; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.util.Properties; + +class SocketFactoryTestSuite { + + private static final String STRING_ARGUMENT = "name of a socket"; + + private Connection conn; + + @BeforeEach + void setUp() throws Exception { + Properties properties = new Properties(); + properties.put(PGProperty.SOCKET_FACTORY.getName(), CustomSocketFactory.class.getName()); + properties.put(PGProperty.SOCKET_FACTORY_ARG.getName(), STRING_ARGUMENT); + conn = TestUtil.openDB(properties); + } + + @AfterEach + void tearDown() throws Exception { + TestUtil.closeDB(conn); + } + + /** + * Test custom socket factory. + */ + @Test + void databaseMetaData() throws Exception { + assertNotNull(CustomSocketFactory.getInstance(), "Custom socket factory not null"); + assertEquals(STRING_ARGUMENT, CustomSocketFactory.getInstance().getArgument()); + assertEquals(1, CustomSocketFactory.getInstance().getSocketCreated()); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/CommonNameVerifierTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/CommonNameVerifierTest.java new file mode 100644 index 0000000..7d28b21 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/CommonNameVerifierTest.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.ssl; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.ssl.PGjdbcHostnameVerifier; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +public class CommonNameVerifierTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {"com", "host.com", -1}, + {"*.com", "host.com", -1}, + {"*.com", "*.*.com", -1}, + {"**.com", "*.com", -1}, + {"a.com", "*.host.com", -1}, + {"host.com", "subhost.host.com", -1}, + {"host.com", "host.com", 0} + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "a={0}, b={1}") + void comparePatterns(String a, String b, int expected) throws Exception { + assertEquals(expected, PGjdbcHostnameVerifier.HOSTNAME_PATTERN_COMPARATOR.compare(a, b), a + " vs " + b); + + assertEquals(-expected, PGjdbcHostnameVerifier.HOSTNAME_PATTERN_COMPARATOR.compare(b, a), b + " vs " + a); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/LazyKeyManagerTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/LazyKeyManagerTest.java new file mode 100644 index 0000000..121ed30 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/LazyKeyManagerTest.java @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.ssl; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +import org.postgresql.ssl.LazyKeyManager; +import org.postgresql.ssl.PKCS12KeyManager; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.auth.x500.X500Principal; + +class LazyKeyManagerTest { + + @Test + void loadP12Key() throws Exception { + PKCS12KeyManager pkcs12KeyManager = new PKCS12KeyManager( + TestUtil.getSslTestCertPath("goodclient.p12"), + new TestCallbackHandler("sslpwd")); + PrivateKey pk = pkcs12KeyManager.getPrivateKey("user"); + assertNotNull(pk); + X509Certificate[] chain = pkcs12KeyManager.getCertificateChain("user"); + assertNotNull(chain); + } + + @Test + void loadKey() throws Exception { + LazyKeyManager lazyKeyManager = new LazyKeyManager( + TestUtil.getSslTestCertPath("goodclient.crt"), + TestUtil.getSslTestCertPath("goodclient.pk8"), + new TestCallbackHandler("sslpwd"), + true); + PrivateKey pk = lazyKeyManager.getPrivateKey("user"); + assertNotNull(pk); + } + + @Test + void chooseClientAlias() throws Exception { + LazyKeyManager lazyKeyManager = new LazyKeyManager( + TestUtil.getSslTestCertPath("goodclient.crt"), + TestUtil.getSslTestCertPath("goodclient.pk8"), + new TestCallbackHandler("sslpwd"), + true); + X500Principal testPrincipal = new X500Principal("CN=root certificate, O=PgJdbc test, ST=CA, C=US"); + X500Principal[] issuers = new X500Principal[]{testPrincipal}; + + String validKeyType = lazyKeyManager.chooseClientAlias(new String[]{"RSA"}, issuers, null); + assertNotNull(validKeyType); + + String ignoresCase = lazyKeyManager.chooseClientAlias(new String[]{"rsa"}, issuers, null); + assertNotNull(ignoresCase); + + String invalidKeyType = lazyKeyManager.chooseClientAlias(new String[]{"EC"}, issuers, null); + assertNull(invalidKeyType); + + String containsValidKeyType = lazyKeyManager.chooseClientAlias(new String[]{"EC", "RSA"}, issuers, null); + assertNotNull(containsValidKeyType); + + String ignoresBlank = lazyKeyManager.chooseClientAlias(new String[]{}, issuers, null); + assertNotNull(ignoresBlank); + } + + public static class TestCallbackHandler implements CallbackHandler { + char [] password; + + TestCallbackHandler(String password) { + if (password != null) { + this.password = password.toCharArray(); + } + } + + @Override + public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { + for (Callback callback : callbacks) { + if (!(callback instanceof PasswordCallback)) { + throw new UnsupportedCallbackException(callback); + } + PasswordCallback pwdCallback = (PasswordCallback) callback; + if (password != null) { + pwdCallback.setPassword(password); + continue; + } + // It is used instead of cons.readPassword(prompt), because the prompt may contain '%' + // characters + //pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt())); + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/LibPQFactoryHostNameTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/LibPQFactoryHostNameTest.java new file mode 100644 index 0000000..45cabc0 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/LibPQFactoryHostNameTest.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.ssl; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.ssl.PGjdbcHostnameVerifier; +import org.postgresql.ssl.jdbc4.LibPQFactory; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +public class LibPQFactoryHostNameTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + {"host.com", "pattern.com", false}, + {"host.com", ".pattern.com", false}, + {"host.com", "*.pattern.com", false}, + {"host.com", "*.host.com", false}, + {"a.com", "*.host.com", false}, + {".a.com", "*.host.com", false}, + {"longhostname.com", "*.com", true}, + {"longhostname.ru", "*.com", false}, + {"host.com", "host.com", true}, + {"sub.host.com", "host.com", false}, + {"sub.host.com", "sub.host.com", true}, + {"sub.host.com", "*.host.com", true}, + {"Sub.host.com", "sub.host.com", true}, + {"sub.host.com", "Sub.host.com", true}, + {"sub.host.com", "*.hoSt.com", true}, + {"*.host.com", "host.com", false}, + {"sub.sub.host.com", "*.host.com", false}, // Wildcard should cover just one level + {"com", "*", false}, // Wildcard should have al least one dot + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "host={0}, pattern={1}") + void checkPattern(String hostname, String pattern, boolean expected) throws Exception { + assertEquals(expected, LibPQFactory.verifyHostName(hostname, pattern), hostname + ", pattern: " + pattern); + + assertEquals(expected, PGjdbcHostnameVerifier.INSTANCE.verifyHostName(hostname, pattern), hostname + ", pattern: " + pattern); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/PKCS12KeyTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/PKCS12KeyTest.java new file mode 100644 index 0000000..ca6abbb --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/PKCS12KeyTest.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.ssl; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGProperty; +import org.postgresql.ssl.PKCS12KeyManager; +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.sql.Connection; +import java.util.Properties; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.UnsupportedCallbackException; +import javax.security.auth.x500.X500Principal; + +class PKCS12KeyTest { + @Test + void TestGoodClientP12() throws Exception { + TestUtil.assumeSslTestsEnabled(); + + Properties props = new Properties(); + props.put(TestUtil.DATABASE_PROP, "hostssldb"); + PGProperty.SSL_MODE.set(props, "prefer"); + PGProperty.SSL_KEY.set(props, TestUtil.getSslTestCertPath("goodclient.p12")); + + try (Connection conn = TestUtil.openDB(props)) { + boolean sslUsed = TestUtil.queryForBoolean(conn, "SELECT ssl_is_used()"); + assertTrue(sslUsed, "SSL should be in use"); + } + } + + @Test + void TestChooseClientAlias() throws Exception { + PKCS12KeyManager pkcs12KeyManager = new PKCS12KeyManager(TestUtil.getSslTestCertPath("goodclient.p12"), new TestCallbackHandler("sslpwd")); + X500Principal testPrincipal = new X500Principal("CN=root certificate, O=PgJdbc test, ST=CA, C=US"); + X500Principal[] issuers = new X500Principal[]{testPrincipal}; + + String validKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"RSA"}, issuers, null); + assertNotNull(validKeyType); + + String ignoresCase = pkcs12KeyManager.chooseClientAlias(new String[]{"rsa"}, issuers, null); + assertNotNull(ignoresCase); + + String invalidKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"EC"}, issuers, null); + assertNull(invalidKeyType); + + String containsValidKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"EC", "RSA"}, issuers, null); + assertNotNull(containsValidKeyType); + + String ignoresBlank = pkcs12KeyManager.chooseClientAlias(new String[]{}, issuers, null); + assertNotNull(ignoresBlank); + } + + public static class TestCallbackHandler implements CallbackHandler { + char [] password; + + TestCallbackHandler(String password) { + if (password != null) { + this.password = password.toCharArray(); + } + } + + @Override + public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { + for (Callback callback : callbacks) { + if (!(callback instanceof PasswordCallback)) { + throw new UnsupportedCallbackException(callback); + } + PasswordCallback pwdCallback = (PasswordCallback) callback; + if (password != null) { + pwdCallback.setPassword(password); + continue; + } + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/SingleCertValidatingFactoryTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/SingleCertValidatingFactoryTest.java new file mode 100644 index 0000000..97baadf --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/SingleCertValidatingFactoryTest.java @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.ssl; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.test.TestUtil; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.security.GeneralSecurityException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +import javax.net.ssl.SSLHandshakeException; + +public class SingleCertValidatingFactoryTest { + @BeforeAll + static void setUp() { + TestUtil.assumeSslTestsEnabled(); + } + + // The valid and invalid server SSL certificates: + private static final String goodServerCertPath = "../certdir/goodroot.crt"; + private static final String badServerCertPath = "../certdir/badroot.crt"; + + private String getGoodServerCert() { + return loadFile(goodServerCertPath); + } + + private String getBadServerCert() { + return loadFile(badServerCertPath); + } + + protected String getUsername() { + return System.getProperty("username"); + } + + protected String getPassword() { + return System.getProperty("password"); + } + + /** + * Tests whether a given throwable or one of it's root causes matches of a given class. + */ + private boolean matchesExpected(Throwable t, + Class expectedThrowable) + throws SQLException { + if (t == null || expectedThrowable == null) { + return false; + } + if (expectedThrowable.isAssignableFrom(t.getClass())) { + return true; + } + return matchesExpected(t.getCause(), expectedThrowable); + } + + protected void testConnect(Properties info, boolean sslExpected) throws SQLException { + testConnect(info, sslExpected, null); + } + + /** + * Connects to the database with the given connection properties and then verifies that connection + * is using SSL. + */ + protected void testConnect(Properties info, boolean sslExpected, Class expectedThrowable) throws SQLException { + info.setProperty(TestUtil.DATABASE_PROP, "hostdb"); + try (Connection conn = TestUtil.openDB(info)) { + Statement stmt = conn.createStatement(); + // Basic SELECT test: + ResultSet rs = stmt.executeQuery("SELECT 1"); + rs.next(); + assertEquals(1, rs.getInt(1)); + rs.close(); + // Verify SSL usage is as expected: + rs = stmt.executeQuery("SELECT ssl_is_used()"); + rs.next(); + boolean sslActual = rs.getBoolean(1); + assertEquals(sslExpected, sslActual); + stmt.close(); + } catch (Exception e) { + if (matchesExpected(e, expectedThrowable)) { + // do nothing and just suppress the exception + return; + } else { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else if (e instanceof SQLException) { + throw (SQLException) e; + } else { + throw new RuntimeException(e); + } + } + } + + if (expectedThrowable != null) { + fail("Expected exception " + expectedThrowable.getName() + " but it did not occur."); + } + } + + /** + * Connect using SSL and attempt to validate the server's certificate but don't actually provide + * it. This connection attempt should *fail* as the client should reject the server. + */ + @Test + void connectSSLWithValidationNoCert() throws SQLException { + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.DefaultJavaSSLFactory"); + testConnect(info, true, SSLHandshakeException.class); + } + + /** + *

Connect using SSL and attempt to validate the server's certificate against the wrong pre shared + * certificate. This test uses a pre generated certificate that will *not* match the test + * PostgreSQL server (the certificate is for properssl.example.com).

+ * + *

This connection uses a custom SSLSocketFactory using a custom trust manager that validates the + * remote server's certificate against the pre shared certificate.

+ * + *

This test should throw an exception as the client should reject the server since the + * certificate does not match.

+ */ + @Test + void connectSSLWithValidationWrongCert() throws SQLException, IOException { + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "file:" + badServerCertPath); + testConnect(info, true, SSLHandshakeException.class); + } + + @Test + void fileCertInvalid() throws SQLException, IOException { + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "file:foo/bar/baz"); + testConnect(info, true, FileNotFoundException.class); + } + + @Test + void stringCertInvalid() throws SQLException, IOException { + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "foobar!"); + testConnect(info, true, GeneralSecurityException.class); + } + + /** + * Connect using SSL and attempt to validate the server's certificate against the proper pre + * shared certificate. The certificate is specified as a String. Note that the test read's the + * certificate from a local file. + */ + @Test + void connectSSLWithValidationProperCertFile() throws SQLException, IOException { + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "file:" + goodServerCertPath); + testConnect(info, true); + } + + /** + * Connect using SSL and attempt to validate the server's certificate against the proper pre + * shared certificate. The certificate is specified as a String (eg. the "----- BEGIN CERTIFICATE + * ----- ... etc"). + */ + @Test + void connectSSLWithValidationProperCertString() throws SQLException, IOException { + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", getGoodServerCert()); + testConnect(info, true); + } + + /** + * Connect using SSL and attempt to validate the server's certificate against the proper pre + * shared certificate. The certificate is specified as a system property. + */ + @Test + void connectSSLWithValidationProperCertSysProp() throws SQLException, IOException { + // System property name we're using for the SSL cert. This can be anything. + String sysPropName = "org.postgresql.jdbc.test.sslcert"; + + try { + System.setProperty(sysPropName, getGoodServerCert()); + + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "sys:" + sysPropName); + testConnect(info, true); + } finally { + // Clear it out when we're done: + System.setProperty(sysPropName, ""); + } + } + + /** + *

Connect using SSL and attempt to validate the server's certificate against the proper pre + * shared certificate. The certificate is specified as an environment variable.

+ * + *

Note: To execute this test successfully you need to set the value of the environment variable + * DATASOURCE_SSL_CERT prior to running the test.

+ * + *

Here's one way to do it: $ DATASOURCE_SSL_CERT=$(cat certdir/goodroot.crt) ant clean test

+ */ + @Test + void connectSSLWithValidationProperCertEnvVar() throws SQLException, IOException { + String envVarName = "DATASOURCE_SSL_CERT"; + if (System.getenv(envVarName) == null) { + System.out.println( + "Skipping test connectSSLWithValidationProperCertEnvVar (env variable is not defined)"); + return; + } + + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "env:" + envVarName); + testConnect(info, true); + } + + /** + * Connect using SSL using a system property to specify the SSL certificate but not actually + * having it set. This tests whether the proper exception is thrown. + */ + @Test + void connectSSLWithValidationMissingSysProp() throws SQLException, IOException { + // System property name we're using for the SSL cert. This can be anything. + String sysPropName = "org.postgresql.jdbc.test.sslcert"; + + try { + System.setProperty(sysPropName, ""); + + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "sys:" + sysPropName); + testConnect(info, true, GeneralSecurityException.class); + } finally { + // Clear it out when we're done: + System.setProperty(sysPropName, ""); + } + } + + /** + * Connect using SSL using an environment var to specify the SSL certificate but not actually + * having it set. This tests whether the proper exception is thrown. + */ + @Test + void connectSSLWithValidationMissingEnvVar() throws SQLException, IOException { + // Use an environment variable that does *not* exist: + String envVarName = "MISSING_DATASOURCE_SSL_CERT"; + if (System.getenv(envVarName) != null) { + System.out + .println("Skipping test connectSSLWithValidationMissingEnvVar (env variable is defined)"); + return; + } + + Properties info = new Properties(); + info.setProperty("ssl", "true"); + info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory"); + info.setProperty("sslfactoryarg", "env:" + envVarName); + testConnect(info, true, GeneralSecurityException.class); + } + + /////////////////////////////////////////////////////////////////// + + /** + * Utility function to load a file as a string. + */ + public static String loadFile(String path) { + BufferedReader br = null; + try { + br = new BufferedReader(new InputStreamReader(new FileInputStream(path))); + StringBuilder sb = new StringBuilder(); + String line; + while ((line = br.readLine()) != null) { + sb.append(line); + sb.append("\n"); + } + return sb.toString(); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + if (br != null) { + try { + br.close(); + } catch (Exception e) { + } + } + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTest.java new file mode 100644 index 0000000..c5c1536 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTest.java @@ -0,0 +1,481 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.ssl; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.jdbc.GSSEncMode; +import org.postgresql.jdbc.SslMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.EOFException; +import java.io.FileNotFoundException; +import java.net.SocketException; +import java.security.cert.CertPathValidatorException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Properties; + +import javax.net.ssl.SSLHandshakeException; + +public class SslTest { + enum Hostname { + GOOD("localhost"), + BAD("127.0.0.1"), + ; + + final String value; + + Hostname(String value) { + this.value = value; + } + } + + enum TestDatabase { + hostdb, + hostnossldb, + hostssldb, + hostsslcertdb, + certdb, + ; + + public static final TestDatabase[] VALUES = values(); + + public boolean requiresClientCert() { + return this == certdb || this == hostsslcertdb; + } + + public boolean requiresSsl() { + return this == certdb || this == hostssldb || this == hostsslcertdb; + } + + public boolean rejectsSsl() { + return this == hostnossldb; + } + } + + enum ClientCertificate { + EMPTY(""), + GOOD("goodclient"), + BAD("badclient"), + ; + + public static final ClientCertificate[] VALUES = values(); + public final String fileName; + + ClientCertificate(String fileName) { + this.fileName = fileName; + } + } + + enum ClientRootCertificate { + EMPTY(""), + GOOD("goodroot"), + BAD("badroot"), + ; + + public static final ClientRootCertificate[] VALUES = values(); + public final String fileName; + + ClientRootCertificate(String fileName) { + this.fileName = fileName; + } + } + + public Hostname host; + public TestDatabase db; + public SslMode sslmode; + public ClientCertificate clientCertificate; + public ClientRootCertificate clientRootCertificate; + public GSSEncMode gssEncMode; + + public static Iterable data() { + TestUtil.assumeSslTestsEnabled(); + + Collection tests = new ArrayList<>(); + + for (SslMode sslMode : SslMode.VALUES) { + for (Hostname hostname : Hostname.values()) { + for (TestDatabase database : TestDatabase.VALUES) { + for (ClientCertificate clientCertificate : ClientCertificate.VALUES) { + for (ClientRootCertificate rootCertificate : ClientRootCertificate.VALUES) { + if ((sslMode == SslMode.DISABLE + || database.rejectsSsl()) + && (clientCertificate != ClientCertificate.GOOD + || rootCertificate != ClientRootCertificate.GOOD)) { + // When SSL is disabled, it does not make sense to verify "bad certificates" + // since certificates are NOT used in plaintext connections + continue; + } + if (database.rejectsSsl() + && (sslMode.verifyCertificate() + || hostname == Hostname.BAD) + ) { + // DB would reject SSL connection, so it makes no sense to test cases like verify-full + continue; + } + for (GSSEncMode gssEncMode : GSSEncMode.values()) { + if (gssEncMode == GSSEncMode.REQUIRE) { + // TODO: support gss tests in /certdir/pg_hba.conf + continue; + } + tests.add(new Object[]{hostname, database, sslMode, clientCertificate, rootCertificate, gssEncMode}); + } + } + } + } + } + } + + return tests; + } + + private static boolean contains(String value, String substring) { + return value != null && value.contains(substring); + } + + private void assertClientCertRequired(SQLException e, String caseName) { + if (e == null) { + fail(caseName + " should result in failure of client validation"); + } + assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected"); + } + + private void checkErrorCodes(SQLException e) { + if (e != null && e.getCause() instanceof FileNotFoundException + && clientRootCertificate != ClientRootCertificate.EMPTY) { + fail("FileNotFoundException => it looks like a configuration failure"); + } + + if (e == null && sslmode == SslMode.ALLOW && !db.requiresSsl()) { + // allowed to connect with plain connection + return; + } + + if (clientRootCertificate == ClientRootCertificate.EMPTY + && (sslmode == SslMode.VERIFY_CA || sslmode == SslMode.VERIFY_FULL)) { + String caseName = "rootCertificate is missing and sslmode=" + sslmode; + if (e == null) { + fail(caseName + " should result in FileNotFound exception for root certificate"); + } + assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected"); + FileNotFoundException fnf = findCause(e, FileNotFoundException.class); + if (fnf == null) { + fail(caseName + " ==> FileNotFoundException should be present in getCause chain"); + } + return; + } + + if (db.requiresSsl() && sslmode == SslMode.DISABLE) { + String caseName = "sslmode=DISABLE and database " + db + " requires SSL"; + if (e == null) { + fail(caseName + " should result in connection failure"); + } + assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected"); + return; + } + + if (db.rejectsSsl() && sslmode.requireEncryption()) { + String caseName = + "database " + db + " rejects SSL, and sslmode " + sslmode + " requires encryption"; + if (e == null) { + fail(caseName + " should result in connection failure"); + } + assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected"); + return; + } + + // Server certificate, server hostname, and client certificate can be validated in any order + // So we have three validators and expect at least one of them to match + List errors = null; + try { + if (assertServerCertificate(e)) { + return; + } + } catch (AssertionError ae) { + errors = addError(errors, ae); + } + + try { + if (assertServerHostname(e)) { + return; + } + } catch (AssertionError ae) { + errors = addError(errors, ae); + } + + try { + if (assertClientCertificate(e)) { + return; + } + } catch (AssertionError ae) { + errors = addError(errors, ae); + } + + if (sslmode == SslMode.ALLOW && db.requiresSsl()) { + // Allow tries to connect with non-ssl first, and it always throws the first error even after try SSL. + // "If SSL was expected to fail" (e.g. invalid certificate), and db requiresSsl, then ALLOW + // should fail as well + String caseName = + "sslmode=ALLOW and db " + db + " requires SSL, and there are expected SSL failures"; + if (errors == null) { + if (e != null) { + fail(caseName + " ==> connection should be upgraded to SSL with no failures"); + } + } else { + try { + if (e == null) { + fail(caseName + " ==> connection should fail"); + } + assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected"); + } catch (AssertionError er) { + for (AssertionError error : errors) { + er.addSuppressed(error); + } + throw er; + } + } + // ALLOW is ok + return; + } + + if (errors == null) { + if (e == null) { + // Assume "no exception" was expected. + // The cases like "successfully connected in sslmode=DISABLE to SSLONLY db" + // should be handled with assertions above + return; + } + fail("SQLException present when it was not expected"); + } + + AssertionError firstError = errors.get(0); + if (errors.size() == 1) { + throw firstError; + } + + for (int i = 1; i < errors.size(); i++) { + AssertionError error = errors.get(i); + firstError.addSuppressed(error); + } + + throw firstError; + } + + private List addError(List errors, AssertionError ae) { + if (errors == null) { + errors = new ArrayList<>(); + } + errors.add(ae); + return errors; + } + + /** + * Checks server certificate validation error. + * + * @param e connection exception or null if no exception + * @return true when validation pass, false when the case is not applicable + * @throws AssertionError when exception does not match expectations + */ + private boolean assertServerCertificate(SQLException e) { + if (clientRootCertificate == ClientRootCertificate.GOOD + || (sslmode != SslMode.VERIFY_CA && sslmode != SslMode.VERIFY_FULL)) { + return false; + } + + String caseName = "Server certificate is " + clientRootCertificate + " + sslmode=" + sslmode; + if (e == null) { + fail(caseName + " should result in failure of server validation"); + } + + assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected"); + CertPathValidatorException validatorEx = findCause(e, CertPathValidatorException.class); + if (validatorEx == null) { + fail(caseName + " ==> exception should be caused by CertPathValidatorException," + + " but no CertPathValidatorException is present in the getCause chain"); + } + assertEquals("NO_TRUST_ANCHOR", validatorEx.getReason().toString(), caseName + " ==> CertPathValidatorException.getReason"); + return true; + } + + /** + * Checks hostname validation error. + * + * @param e connection exception or null if no exception + * @return true when validation pass, false when the case is not applicable + * @throws AssertionError when exception does not match expectations + */ + private boolean assertServerHostname(SQLException e) { + if (sslmode != SslMode.VERIFY_FULL || host != Hostname.BAD) { + return false; + } + + String caseName = "VERIFY_FULL + hostname that does not match server certificate"; + if (e == null) { + fail(caseName + " ==> CONNECTION_FAILURE expected"); + } + assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected"); + String message = e.getMessage(); + if (message == null || !message.contains("PgjdbcHostnameVerifier")) { + fail(caseName + " ==> message should contain" + + " 'PgjdbcHostnameVerifier'. Actual message is " + message); + } + return true; + } + + /** + * Checks client certificate validation error. + * + * @param e connection exception or null if no exception + * @return true when validation pass, false when the case is not applicable + * @throws AssertionError when exception does not match expectations + */ + private boolean assertClientCertificate(SQLException e) { + if (db.requiresClientCert() && clientCertificate == ClientCertificate.EMPTY) { + String caseName = + "client certificate was not sent and database " + db + " requires client certificate"; + assertClientCertRequired(e, caseName); + return true; + } + + if (clientCertificate != ClientCertificate.BAD) { + return false; + } + // Server verifies certificate no matter how it is configured, so sending BAD one + // is doomed to fail + String caseName = "BAD client certificate, and database " + db + " requires one"; + if (e == null) { + fail(caseName + " should result in failure of client validation"); + } + // Note: Java's SSLSocket handshake does NOT process alert messages + // even if they are present on the wire. This looks like a perfectly valid + // handshake, however, the subsequent read from the stream (e.g. during startup + // message) discovers the alert message (e.g. "Received fatal alert: decrypt_error") + // and converts that to exception. + // That is why "CONNECTION_UNABLE_TO_CONNECT" is listed here for BAD client cert. + // Ideally, handshake failure should be detected during the handshake, not after sending the startup + // message + if (!PSQLState.CONNECTION_FAILURE.getState().equals(e.getSQLState()) + && !(clientCertificate == ClientCertificate.BAD + && PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState().equals(e.getSQLState())) + ) { + fail(caseName + " ==> CONNECTION_FAILURE(08006)" + + " or CONNECTION_UNABLE_TO_CONNECT(08001) is expected" + + ", got " + e.getSQLState()); + } + + // Three exceptions are possible + // SSLHandshakeException: Received fatal alert: unknown_ca + // EOFException + // SocketException: broken pipe (write failed) + + // decrypt_error does not look to be a valid case, however, we allow it for now + // SSLHandshakeException: Received fatal alert: decrypt_error + + SocketException brokenPipe = findCause(e, SocketException.class); + if (brokenPipe != null) { + if (!contains(brokenPipe.getMessage(), "Broken pipe")) { + fail( + caseName + " ==> server should have terminated the connection (broken pipe expected)" + + ", actual exception was " + brokenPipe.getMessage()); + } + return true; + } + + EOFException eofException = findCause(e, EOFException.class); + if (eofException != null) { + return true; + } + + SSLHandshakeException handshakeException = findCause(e, SSLHandshakeException.class); + if (handshakeException != null) { + final String handshakeMessage = handshakeException.getMessage(); + if (!contains(handshakeMessage, "unknown_ca") + && !contains(handshakeMessage, "decrypt_error")) { + fail( + caseName + + " ==> server should have terminated the connection (expected 'unknown_ca' or 'decrypt_error')" + + ", actual exception was " + handshakeMessage); + } + return true; + } + + fail(caseName + " ==> exception should be caused by SocketException(broken pipe)" + + " or EOFException," + + " or SSLHandshakeException. No exceptions of such kind are present in the getCause chain"); + return false; + } + + private static T findCause(Throwable t, + Class cause) { + while (t != null) { + if (cause.isInstance(t)) { + return (T) t; + } + t = t.getCause(); + } + return null; + } + + @MethodSource("data") + @ParameterizedTest(name = "host={0}, db={1} sslMode={2}, cCert={3}, cRootCert={4}, gssEncMode={5}") + void run(Hostname host, TestDatabase db, SslMode sslmode, ClientCertificate clientCertificate, ClientRootCertificate clientRootCertificate, GSSEncMode gssEncMode) throws SQLException { + initSslTest(host, db, sslmode, clientCertificate, clientRootCertificate, gssEncMode); + Properties props = new Properties(); + props.put(TestUtil.SERVER_HOST_PORT_PROP, host.value + ":" + TestUtil.getPort()); + props.put(TestUtil.DATABASE_PROP, db.toString()); + PGProperty.SSL_MODE.set(props, sslmode.value); + PGProperty.GSS_ENC_MODE.set(props, gssEncMode.value); + if (clientCertificate == ClientCertificate.EMPTY) { + PGProperty.SSL_CERT.set(props, ""); + PGProperty.SSL_KEY.set(props, ""); + } else { + PGProperty.SSL_CERT.set(props, TestUtil.getSslTestCertPath(clientCertificate.fileName + ".crt")); + PGProperty.SSL_KEY.set(props, TestUtil.getSslTestCertPath(clientCertificate.fileName + ".pk8")); + } + if (clientRootCertificate == ClientRootCertificate.EMPTY) { + PGProperty.SSL_ROOT_CERT.set(props, ""); + } else { + PGProperty.SSL_ROOT_CERT.set(props, TestUtil.getSslTestCertPath(clientRootCertificate.fileName + ".crt")); + } + + try (Connection conn = TestUtil.openDB(props)) { + boolean sslUsed = TestUtil.queryForBoolean(conn, "SELECT ssl_is_used()"); + if (sslmode == SslMode.ALLOW) { + assertEquals(db.requiresSsl(), sslUsed, "SSL should be used if the DB requires SSL"); + } else { + assertEquals(sslmode != SslMode.DISABLE && !db.rejectsSsl(), sslUsed, "SSL should be used unless it is disabled or the DB rejects it"); + } + } catch (SQLException e) { + try { + // Note that checkErrorCodes throws AssertionError for unexpected cases + checkErrorCodes(e); + } catch (AssertionError ae) { + // Make sure original SQLException is printed as well even in case of AssertionError + ae.initCause(e); + throw ae; + } + } + } + + public void initSslTest(Hostname host, TestDatabase db, SslMode sslmode, ClientCertificate clientCertificate, ClientRootCertificate clientRootCertificate, GSSEncMode gssEncMode) { + this.host = host; + this.db = db; + this.sslmode = sslmode; + this.clientCertificate = clientCertificate; + this.clientRootCertificate = clientRootCertificate; + this.gssEncMode = gssEncMode; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTestSuite.java new file mode 100644 index 0000000..95e439c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTestSuite.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.ssl; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + CommonNameVerifierTest.class, + LazyKeyManagerTest.class, + LibPQFactoryHostNameTest.class, + SslTest.class, +}) +public class SslTestSuite { +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITest.java b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITest.java new file mode 100644 index 0000000..ac4acb3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITest.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.sspi; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.junit.MatcherAssume.assumeThat; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.PGProperty; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PSQLException; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.Locale; +import java.util.Properties; + +/* +* These tests require a working SSPI authentication setup +* in the database server that allows the executing user +* to authenticate as the "sspiusername" in the build +* configuration. +*/ +class SSPITest { + + /* + * SSPI only exists on Windows. + */ + @BeforeAll + static void checkPlatform() { + assumeThat("SSPI not supported on this platform", + System.getProperty("os.name").toLowerCase(Locale.ROOT), + containsString("windows")); + } + + /* + * Tests that SSPI login succeeds and a query can be run. + */ + @Test + @Disabled + void authorized() throws Exception { + Properties props = new Properties(); + PGProperty.USER.set(props, TestUtil.getSSPIUser()); + + Connection con = TestUtil.openDB(props); + + Statement stmt = con.createStatement(); + stmt.executeQuery("SELECT 1"); + + TestUtil.closeDB(con); + } + + /* + * Tests that SSPI login fails with an unknown/unauthorized + * user name. + */ + @Test + void unauthorized() throws Exception { + Properties props = new Properties(); + PGProperty.USER.set(props, "invalid" + TestUtil.getSSPIUser()); + + try { + Connection con = TestUtil.openDB(props); + TestUtil.closeDB(con); + fail("Expected a PSQLException"); + } catch (PSQLException e) { + assertThat(e.getSQLState(), is(PSQLState.INVALID_PASSWORD.getState())); + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITestSuite.java new file mode 100644 index 0000000..6e1ae33 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITestSuite.java @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.sspi; + +/* + * Executes all known tests for SSPI. + */ +//@RunWith(Suite.class) +//@Suite.SuiteClasses({ SSPITest.class }) +public class SSPITestSuite { + // Empty. +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/BrokenInputStream.java b/pgjdbc/src/test/java/org/postgresql/test/util/BrokenInputStream.java new file mode 100644 index 0000000..9224c05 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/BrokenInputStream.java @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import java.io.IOException; +import java.io.InputStream; + +public class BrokenInputStream extends InputStream { + + private final InputStream is; + private final long breakOn; + private long numRead; + + public BrokenInputStream(InputStream is, long breakOn) { + this.is = is; + this.breakOn = breakOn; + this.numRead = 0; + } + + @Override + public int read() throws IOException { + if (breakOn > numRead++) { + throw new IOException("I was told to break on " + breakOn); + } + + return is.read(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/BufferGenerator.java b/pgjdbc/src/test/java/org/postgresql/test/util/BufferGenerator.java new file mode 100644 index 0000000..2ad2c47 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/BufferGenerator.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.OutputStream; +import java.util.Date; +import java.util.Random; + +/** + * Created by amozhenin on 30.09.2015. + */ +public class BufferGenerator { + public static final int ROW_COUNT = 100000; + + public static void main(String[] args) throws Exception { + Random random = new Random(); + random.setSeed(new Date().getTime()); + OutputStream out = null; + try { + File outFile = new File("target", "buffer.txt"); + outFile.getParentFile().mkdir(); + out = new BufferedOutputStream(new FileOutputStream(outFile)); + for (long i = 0; i < ROW_COUNT; i++) { + StringBuffer line = new StringBuffer(); + line.append("VERY_LONG_LINE_TO_ASSIST_IN_DETECTION_OF_ISSUE_366_#_").append(i).append('\t'); + int letter = random.nextInt(26); // don't really care about uniformity for a test + char character = (char) ((int) 'A' + letter); // black magic + line.append("VERY_LONG_STRING_TO_REPRODUCE_ISSUE_366_").append(character).append(character); + line.append(character).append('\t').append(random.nextDouble()).append('\n'); + out.write(line.toString().getBytes("UTF-8")); + } + } finally { + if (out != null) { + out.close(); + } + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ByteBufferByteStreamWriterTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ByteBufferByteStreamWriterTest.java new file mode 100644 index 0000000..940331f --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/ByteBufferByteStreamWriterTest.java @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.util.ByteBufferByteStreamWriter; +import org.postgresql.util.ByteStreamWriter; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +class ByteBufferByteStreamWriterTest { + + private ByteArrayOutputStream targetStream; + private byte[] data; + private ByteBufferByteStreamWriter writer; + + @BeforeEach + void setUp() throws Exception { + targetStream = new ByteArrayOutputStream(); + data = new byte[]{1, 2, 3, 4}; + ByteBuffer buffer = ByteBuffer.wrap(data); + writer = new ByteBufferByteStreamWriter(buffer); + } + + @Test + void reportsLengthCorrectly() { + assertEquals(4, writer.getLength(), "Incorrect length reported"); + } + + @Test + void copiesDataCorrectly() throws IOException { + writer.writeTo(target(targetStream)); + byte[] written = targetStream.toByteArray(); + assertArrayEquals(data, written, "Incorrect data written to target stream"); + } + + @Test + void propagatesException() throws IOException { + final IOException e = new IOException("oh no"); + OutputStream errorStream = new OutputStream() { + @Override + public void write(int b) throws IOException { + throw e; + } + }; + try { + writer.writeTo(target(errorStream)); + fail("No exception thrown"); + } catch (IOException caught) { + assertEquals(caught, e, "Exception was thrown that wasn't the expected one"); + } + } + + private static ByteStreamWriter.ByteStreamTarget target(final OutputStream stream) { + return new ByteStreamWriter.ByteStreamTarget() { + @Override + public OutputStream getOutputStream() { + return stream; + } + }; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ByteStreamWriterTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ByteStreamWriterTest.java new file mode 100644 index 0000000..34069e3 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/ByteStreamWriterTest.java @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.BaseTest4; +import org.postgresql.util.ByteBufferByteStreamWriter; +import org.postgresql.util.ByteStreamWriter; + +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Random; + +public class ByteStreamWriterTest extends BaseTest4 { + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeByteaSupported(); + TestUtil.createTempTable(con, "images", "img bytea"); + } + + private ByteBuffer testData(int size) { + ByteBuffer data = ByteBuffer.allocate(size); + Random random = new Random(31459); + while (data.remaining() > 8) { + data.putLong(random.nextLong()); + } + while (data.remaining() > 0) { + data.put((byte) (random.nextInt() % 256)); + } + data.rewind(); + return data; + } + + private void insertStream(ByteBuffer testData) throws Exception { + insertStream(testData, null); + } + + private void insertStream(ByteBuffer testData, Integer lengthOverride) throws Exception { + insertStream(new TestByteBufferByteStreamWriter(testData, lengthOverride)); + } + + private void insertStream(ByteStreamWriter writer) throws Exception { + PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?")); + try { + updatePS.setObject(1, writer); + updatePS.executeUpdate(); + } finally { + updatePS.close(); + } + } + + private void validateContent(ByteBuffer data) throws Exception { + validateContent(data.array()); + } + + private void validateContent(byte [] data) throws Exception { + PreparedStatement selectPS = con.prepareStatement(TestUtil.selectSQL("images", "img")); + try { + ResultSet rs = selectPS.executeQuery(); + try { + rs.next(); + byte[] actualData = rs.getBytes(1); + assertArrayEquals("Sent and received data are not the same", data, actualData); + } finally { + rs.close(); + } + } finally { + selectPS.close(); + } + } + + @Test + public void testEmpty() throws Exception { + ByteBuffer testData = testData(0); + insertStream(testData); + validateContent(testData); + } + + @Test + public void testLength2Kb() throws Exception { + ByteBuffer testData = testData(2 * 1024); + insertStream(testData); + validateContent(testData); + } + + @Test + public void testLength37b() throws Exception { + ByteBuffer testData = testData(37); + insertStream(testData); + validateContent(testData); + } + + @Test + public void testLength2KbReadOnly() throws Exception { + ByteBuffer testData = testData(2 * 1024); + // Read-only buffer does not provide access to the array, so we test it separately + insertStream(testData.asReadOnlyBuffer()); + validateContent(testData); + } + + @Test + public void testTwoBuffers() throws Exception { + ByteBuffer testData = testData(20); + ByteBuffer part1 = testData.duplicate(); + part1.position(0); + part1.limit(9); + ByteBuffer part2 = testData.duplicate(); + part2.position(part1.limit()); + part2.limit(testData.limit()); + // Read-only buffer does not provide access to the array, so we test it separately + insertStream(ByteStreamWriter.of(part1, part2)); + validateContent(testData); + } + + @Test + public void testThreeBuffersWithReadonly() throws Exception { + ByteBuffer testData = testData(20); + ByteBuffer part1 = testData.duplicate(); + part1.position(0); + part1.limit(9); + ByteBuffer part2 = testData.duplicate(); + part2.position(part1.limit()); + part2.limit(15); + ByteBuffer part3 = testData.duplicate(); + part3.position(part2.limit()); + part3.limit(testData.limit()); + // Read-only buffer does not provide access to the array, so we test it separately + insertStream(ByteStreamWriter.of(part1, part2.asReadOnlyBuffer(), part3)); + validateContent(testData); + } + + @Test + public void testLength10Kb() throws Exception { + ByteBuffer testData = testData(10 * 1024); + insertStream(testData); + validateContent(testData); + } + + @Test + public void testLength100Kb() throws Exception { + ByteBuffer testData = testData(100 * 1024); + insertStream(testData); + validateContent(testData); + } + + @Test + public void testLength200Kb() throws Exception { + ByteBuffer testData = testData(200 * 1024); + insertStream(testData); + validateContent(testData); + } + + @Test + public void testLengthGreaterThanContent() throws Exception { + ByteBuffer testData = testData(8); + insertStream(testData, 10); + byte[] expectedData = new byte[10]; + testData.rewind(); + testData.get(expectedData, 0, 8); + // other two bytes are zeroed out, which the jvm does for us automatically + validateContent(expectedData); + } + + @Test + public void testLengthLessThanContent() throws Exception { + ByteBuffer testData = testData(8); + try { + insertStream(testData, 4); + fail("did not throw exception when too much content"); + } catch (SQLException e) { + Throwable cause = e.getCause(); + assertTrue("cause wan't an IOException", cause instanceof IOException); + assertEquals("Incorrect exception message", + cause.getMessage(), "Attempt to write more than the specified 4 bytes"); + } + } + + @Test + public void testIOExceptionPassedThroughAsCause() throws Exception { + IOException e = new IOException("oh no"); + try { + insertStream(new ExceptionThrowingByteStreamWriter(e)); + fail("did not throw exception when IOException thrown"); + } catch (SQLException sqle) { + Throwable cause = sqle.getCause(); + assertEquals("Incorrect exception cause", e, cause); + } + } + + @Test + public void testRuntimeExceptionPassedThroughAsIOException() throws Exception { + RuntimeException e = new RuntimeException("oh no"); + try { + insertStream(new ExceptionThrowingByteStreamWriter(e)); + fail("did not throw exception when RuntimeException thrown"); + } catch (SQLException sqle) { + Throwable cause = sqle.getCause(); + assertTrue("cause wan't an IOException", cause instanceof IOException); + assertEquals("Incorrect exception message", + cause.getMessage(), "Error writing bytes to stream"); + Throwable nestedCause = cause.getCause(); + assertEquals("Incorrect exception cause", e, nestedCause); + } + } + + /** + * Allows testing where reported length doesn't match what the stream writer attempts + */ + private static class TestByteBufferByteStreamWriter extends ByteBufferByteStreamWriter { + + private final Integer lengthOverride; + + private TestByteBufferByteStreamWriter(ByteBuffer buf, Integer lengthOverride) { + super(buf); + this.lengthOverride = lengthOverride; + } + + @Override + public int getLength() { + return lengthOverride != null ? lengthOverride : super.getLength(); + } + } + + private static class ExceptionThrowingByteStreamWriter implements ByteStreamWriter { + + private final Throwable cause; + + private ExceptionThrowingByteStreamWriter(Throwable cause) { + assertTrue(cause instanceof RuntimeException || cause instanceof IOException); + this.cause = cause; + } + + @Override + public int getLength() { + return 1; + } + + @Override + public void writeTo(ByteStreamTarget target) throws IOException { + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else if (cause instanceof IOException) { + throw (IOException) cause; + } + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ExpressionPropertiesTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ExpressionPropertiesTest.java new file mode 100644 index 0000000..b30d812 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/ExpressionPropertiesTest.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.postgresql.util.ExpressionProperties; + +import org.junit.jupiter.api.Test; + +import java.util.Properties; + +class ExpressionPropertiesTest { + @Test + void simpleReplace() { + ExpressionProperties p = new ExpressionProperties(); + p.put("server", "app1"); + p.put("file", "pgjdbc_${server}.txt"); + assertEquals("pgjdbc_app1.txt", p.getProperty("file"), "${server} should be replaced"); + } + + @Test + void replacementMissing() { + ExpressionProperties p = new ExpressionProperties(); + p.put("file", "pgjdbc_${server}.txt"); + assertEquals("pgjdbc_${server}.txt", p.getProperty("file"), "${server} should be kept as is as there is no replacement"); + } + + @Test + void multipleReplacements() { + ExpressionProperties p = new ExpressionProperties(); + p.put("server", "app1"); + p.put("file", "${server}${server}${server}${server}${server}"); + assertEquals("app1app1app1app1app1", p.getProperty("file"), "All the ${server} entries should be replaced"); + } + + @Test + void multipleParentProperties() { + Properties p1 = new Properties(); + p1.setProperty("server", "app1_${app.type}"); + Properties p2 = new Properties(); + p2.setProperty("app.type", "production"); + + ExpressionProperties p = new ExpressionProperties(p1, p2); + p.put("file", "pgjdbc_${server}.txt"); + + assertEquals("pgjdbc_app1_production.txt", p.getProperty("file"), "All the ${...} entries should be replaced"); + } + + @Test + void rawValue() { + ExpressionProperties p = new ExpressionProperties(); + p.put("server", "app1"); + p.put("file", "${server}${server}${server}${server}${server}"); + assertEquals("${server}${server}${server}${server}${server}", p.getRawPropertyValue("file"), "No replacements in raw value expected"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/HostSpecTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/HostSpecTest.java new file mode 100644 index 0000000..7aad5bc --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/HostSpecTest.java @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.util.HostSpec; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +/** + * @author Joe Kutner on 10/19/17. + * Twitter: @codefinger + */ +class HostSpecTest { + + @AfterEach + void cleanup() { + System.clearProperty("socksProxyHost"); + System.clearProperty("socksProxyPort"); + System.clearProperty("socksNonProxyHosts"); + } + + @Test + void shouldResolve() throws Exception { + HostSpec hostSpec = new HostSpec("localhost", 5432); + assertTrue(hostSpec.shouldResolve()); + } + + @Test + void shouldResolveWithEmptySocksProxyHost() throws Exception { + System.setProperty("socksProxyHost", ""); + HostSpec hostSpec = new HostSpec("localhost", 5432); + assertTrue(hostSpec.shouldResolve()); + } + + @Test + void shouldResolveWithWhiteSpaceSocksProxyHost() throws Exception { + System.setProperty("socksProxyHost", " "); + HostSpec hostSpec = new HostSpec("localhost", 5432); + assertTrue(hostSpec.shouldResolve()); + } + + @Test + void shouldResolveWithSocksProxyHost() throws Exception { + System.setProperty("socksProxyHost", "fake-socks-proxy"); + HostSpec hostSpec = new HostSpec("example.com", 5432); + assertFalse(hostSpec.shouldResolve()); + } + + @Test + void shouldResolveWithSocksProxyHostWithLocalhost() throws Exception { + System.setProperty("socksProxyHost", "fake-socks-proxy"); + HostSpec hostSpec = new HostSpec("localhost", 5432); + assertTrue(hostSpec.shouldResolve()); + } + + @Test + void shouldResolveWithSocksNonProxyHost() throws Exception { + System.setProperty("socksProxyHost", "fake-socks-proxy"); + System.setProperty("socksNonProxyHosts", "example.com"); + HostSpec hostSpec = new HostSpec("example.com", 5432); + assertTrue(hostSpec.shouldResolve()); + } + + @Test + void shouldResolveWithSocksNonProxyHosts() throws Exception { + System.setProperty("socksProxyHost", "fake-socks-proxy"); + System.setProperty("socksNonProxyHosts", "example.com|localhost"); + HostSpec hostSpec = new HostSpec("example.com", 5432); + assertTrue(hostSpec.shouldResolve()); + } + + @Test + void shouldResolveWithSocksNonProxyHostsNotMatching() throws Exception { + System.setProperty("socksProxyHost", "fake-socks-proxy"); + System.setProperty("socksNonProxyHosts", "example.com|localhost"); + HostSpec hostSpec = new HostSpec("example.org", 5432); + assertFalse(hostSpec.shouldResolve()); + } + + @Test + void shouldReturnEmptyLocalAddressBind() throws Exception { + HostSpec hostSpec = new HostSpec("example.org", 5432); + assertNull(hostSpec.getLocalSocketAddress()); + } + + @Test + void shouldReturnLocalAddressBind() throws Exception { + HostSpec hostSpec = new HostSpec("example.org", 5432, "foo"); + assertEquals("foo", hostSpec.getLocalSocketAddress()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/LruCacheTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/LruCacheTest.java new file mode 100644 index 0000000..59594eb --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/LruCacheTest.java @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2015, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.util.CanEstimateSize; +import org.postgresql.util.LruCache; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; +import java.util.ArrayDeque; +import java.util.Arrays; +import java.util.Deque; + +/** + * Tests {@link org.postgresql.util.LruCache}. + */ +class LruCacheTest { + + private static class Entry implements CanEstimateSize { + private final int id; + + Entry(int id) { + this.id = id; + } + + @Override + public long getSize() { + return id; + } + + @Override + public String toString() { + return "Entry{" + "id=" + id + '}'; + } + } + + private final Integer[] expectCreate = new Integer[1]; + private final Deque expectEvict = new ArrayDeque<>(); + private final Entry dummy = new Entry(-999); + private LruCache cache; + + @BeforeEach + void setUp() throws Exception { + cache = new LruCache<>(4, 1000, false, new LruCache.CreateAction() { + @Override + public Entry create(Integer key) throws SQLException { + assertEquals(expectCreate[0], key, "Unexpected create"); + return new Entry(key); + } + }, new LruCache.EvictAction() { + @Override + public void evict(Entry entry) throws SQLException { + if (expectEvict.isEmpty()) { + fail("Unexpected entry was evicted: " + entry); + } + Entry expected = expectEvict.removeFirst(); + assertEquals(expected, entry, "Unexpected evict"); + } + }); + } + + @Test + void evictsByNumberOfEntries() throws SQLException { + Entry a; + Entry b; + Entry c; + Entry d; + Entry e; + + a = use(1); + b = use(2); + c = use(3); + d = use(4); + e = use(5, a); + } + + @Test + void evictsBySize() throws SQLException { + Entry a; + Entry b; + Entry c; + + a = use(330); + b = use(331); + c = use(332); + use(400, a, b); + } + + @Test + void evictsLeastRecentlyUsed() throws SQLException { + Entry a; + Entry b; + Entry c; + Entry d; + + a = use(1); + b = use(2); + c = use(3); + a = use(1); // reuse a + use(5); + d = use(4, b); // expect b to be evicted + } + + @Test + void cyclicReplacement() throws SQLException { + Entry a; + Entry b; + Entry c; + Entry d; + Entry e; + + a = use(1); + b = use(2); + c = use(3); + d = use(4); + e = use(5, a); + + for (int i = 0; i < 1000; i++) { + a = use(1, b); + b = use(2, c); + c = use(3, d); + d = use(4, e); + e = use(5, a); + } + } + + @Test + void duplicateKey() throws SQLException { + Entry a; + + a = use(1); + expectEvict.clear(); + expectEvict.add(a); + // This overwrites the cache, evicting previous entry with exactly the same key + cache.put(1, new Entry(1)); + assertEvict(); + } + + @Test + void caching() throws SQLException { + Entry a; + Entry b; + Entry c; + Entry d; + Entry e; + + a = use(1); + b = use(2); + c = use(3); + d = use(4); + + for (int i = 0; i < 10000; i++) { + c = use(-3); + b = use(-2); + a = use(-1); + e = use(5, d); + c = use(-3); + b = use(-2); + a = use(-1); + d = use(4, e); + } + } + + private Entry use(int expectCreate, Entry... expectEvict) throws SQLException { + this.expectCreate[0] = expectCreate <= 0 ? -1 : expectCreate; + this.expectEvict.clear(); + this.expectEvict.addAll(Arrays.asList(expectEvict)); + Entry a = cache.borrow(Math.abs(expectCreate)); + cache.put(a.id, a); // a + assertEvict(); + return a; + } + + private void assertEvict() { + if (expectEvict.isEmpty()) { + return; + } + fail("Some of the expected evictions not happened: " + expectEvict.toString()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContext.java b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContext.java new file mode 100644 index 0000000..cdb947a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContext.java @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import java.io.IOException; +import java.io.Serializable; +import java.rmi.MarshalledObject; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Map; + +import javax.naming.Binding; +import javax.naming.Context; +import javax.naming.Name; +import javax.naming.NameClassPair; +import javax.naming.NameParser; +import javax.naming.NamingEnumeration; +import javax.naming.NamingException; +import javax.naming.Reference; +import javax.naming.Referenceable; +import javax.naming.spi.ObjectFactory; + +/** + * The Context for a trivial JNDI implementation. This is not meant to be very useful, beyond + * testing JNDI features of the connection pools. It is not a complete JNDI implementations. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public class MiniJndiContext implements Context { + private final Map map = new HashMap<>(); + + public MiniJndiContext() { + } + + @Override + public Object lookup(Name name) throws NamingException { + return lookup(name.get(0)); + } + + @Override + public Object lookup(String name) throws NamingException { + Object o = map.get(name); + if (o == null) { + return null; + } + if (o instanceof Reference) { + Reference ref = (Reference) o; + try { + Class factoryClass = Class.forName(ref.getFactoryClassName()); + ObjectFactory fac = (ObjectFactory) factoryClass.newInstance(); + return fac.getObjectInstance(ref, null, this, null); + } catch (Exception e) { + throw new NamingException("Unable to dereference to object: " + e); + } + } else if (o instanceof MarshalledObject) { + try { + return ((MarshalledObject) o).get(); + } catch (IOException e) { + throw new NamingException("Unable to deserialize object: " + e); + } catch (ClassNotFoundException e) { + throw new NamingException("Unable to deserialize object: " + e); + } + } else { + throw new NamingException("JNDI Object is neither Referenceable nor Serializable"); + } + } + + @Override + public void bind(Name name, Object obj) throws NamingException { + rebind(name.get(0), obj); + } + + @Override + public void bind(String name, Object obj) throws NamingException { + rebind(name, obj); + } + + @Override + public void rebind(Name name, Object obj) throws NamingException { + rebind(name.get(0), obj); + } + + @Override + public void rebind(String name, Object obj) throws NamingException { + if (obj instanceof Referenceable) { + Reference ref = ((Referenceable) obj).getReference(); + map.put(name, ref); + } else if (obj instanceof Serializable) { + try { + MarshalledObject mo = new MarshalledObject<>(obj); + map.put(name, mo); + } catch (IOException e) { + throw new NamingException("Unable to serialize object to JNDI: " + e); + } + } else { + throw new NamingException( + "Object to store in JNDI is neither Referenceable nor Serializable"); + } + } + + @Override + public void unbind(Name name) throws NamingException { + unbind(name.get(0)); + } + + @Override + public void unbind(String name) throws NamingException { + map.remove(name); + } + + @Override + public void rename(Name oldName, Name newName) throws NamingException { + rename(oldName.get(0), newName.get(0)); + } + + @Override + public void rename(String oldName, String newName) throws NamingException { + map.put(newName, map.remove(oldName)); + } + + @Override + public NamingEnumeration list(Name name) throws NamingException { + return null; + } + + @Override + public NamingEnumeration list(String name) throws NamingException { + return null; + } + + @Override + public NamingEnumeration listBindings(Name name) throws NamingException { + return null; + } + + @Override + public NamingEnumeration listBindings(String name) throws NamingException { + return null; + } + + @Override + public void destroySubcontext(Name name) throws NamingException { + } + + @Override + public void destroySubcontext(String name) throws NamingException { + } + + @Override + public Context createSubcontext(Name name) throws NamingException { + return null; + } + + @Override + public Context createSubcontext(String name) throws NamingException { + return null; + } + + @Override + public Object lookupLink(Name name) throws NamingException { + return null; + } + + @Override + public Object lookupLink(String name) throws NamingException { + return null; + } + + @Override + public NameParser getNameParser(Name name) throws NamingException { + return null; + } + + @Override + public NameParser getNameParser(String name) throws NamingException { + return null; + } + + @Override + public Name composeName(Name name, Name prefix) throws NamingException { + return null; + } + + @Override + public String composeName(String name, String prefix) throws NamingException { + return null; + } + + @Override + public Object addToEnvironment(String propName, Object propVal) throws NamingException { + return null; + } + + @Override + public Object removeFromEnvironment(String propName) throws NamingException { + return null; + } + + @Override + public Hashtable getEnvironment() throws NamingException { + return null; + } + + @Override + public void close() throws NamingException { + } + + @Override + public String getNameInNamespace() throws NamingException { + return null; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContextFactory.java b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContextFactory.java new file mode 100644 index 0000000..1b82ab2 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContextFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import java.util.Hashtable; + +import javax.naming.Context; +import javax.naming.NamingException; +import javax.naming.spi.InitialContextFactory; + +/** + * The ICF for a trivial JNDI implementation. This is not meant to be very useful, beyond testing + * JNDI features of the connection pools. + * + * @author Aaron Mulder (ammulder@chariotsolutions.com) + */ +public class MiniJndiContextFactory implements InitialContextFactory { + @Override + public Context getInitialContext(Hashtable environment) throws NamingException { + return new MiniJndiContext(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ObjectFactoryTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ObjectFactoryTest.java new file mode 100644 index 0000000..a04b8ea --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/ObjectFactoryTest.java @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2022, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.postgresql.PGProperty; +import org.postgresql.jdbc.SslMode; +import org.postgresql.test.TestUtil; +import org.postgresql.util.ObjectFactory; +import org.postgresql.util.PSQLState; + +import org.junit.jupiter.api.Test; +import org.opentest4j.MultipleFailuresError; + +import java.sql.SQLException; +import java.util.Properties; + +import javax.net.SocketFactory; + +class ObjectFactoryTest { + Properties props = new Properties(); + + static class BadObject { + static boolean wasInstantiated; + + BadObject() { + wasInstantiated = true; + throw new RuntimeException("I should not be instantiated"); + } + } + + private void testInvalidInstantiation(PGProperty prop, PSQLState expectedSqlState) { + prop.set(props, BadObject.class.getName()); + + BadObject.wasInstantiated = false; + SQLException ex = assertThrows(SQLException.class, () -> { + TestUtil.openDB(props); + }); + + try { + assertAll( + () -> assertFalse(BadObject.wasInstantiated, "ObjectFactory should not have " + + "instantiated bad object for " + prop), + () -> assertEquals(expectedSqlState.getState(), ex.getSQLState(), () -> "#getSQLState()"), + () -> { + assertThrows( + ClassCastException.class, + () -> { + throw ex.getCause(); + }, + () -> "Wrong class specified for " + prop.name() + + " => ClassCastException is expected in SQLException#getCause()" + ); + } + ); + } catch (MultipleFailuresError e) { + // Add the original exception so it is easier to understand the reason for the test to fail + e.addSuppressed(ex); + throw e; + } + } + + @Test + void invalidSocketFactory() { + testInvalidInstantiation(PGProperty.SOCKET_FACTORY, PSQLState.CONNECTION_FAILURE); + } + + @Test + void invalidSSLFactory() { + TestUtil.assumeSslTestsEnabled(); + // We need at least "require" to trigger SslSockerFactory instantiation + PGProperty.SSL_MODE.set(props, SslMode.REQUIRE.value); + testInvalidInstantiation(PGProperty.SSL_FACTORY, PSQLState.CONNECTION_FAILURE); + } + + @Test + void invalidAuthenticationPlugin() { + testInvalidInstantiation(PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME, + PSQLState.INVALID_PARAMETER_VALUE); + } + + @Test + void invalidSslHostnameVerifier() { + TestUtil.assumeSslTestsEnabled(); + // Hostname verification is done at verify-full level only + PGProperty.SSL_MODE.set(props, SslMode.VERIFY_FULL.value); + PGProperty.SSL_ROOT_CERT.set(props, TestUtil.getSslTestCertPath("goodroot.crt")); + testInvalidInstantiation(PGProperty.SSL_HOSTNAME_VERIFIER, PSQLState.CONNECTION_FAILURE); + } + + @Test + void instantiateInvalidSocketFactory() { + Properties props = new Properties(); + assertThrows(ClassCastException.class, () -> { + ObjectFactory.instantiate(SocketFactory.class, BadObject.class.getName(), props, + false, null); + }); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyMaxResultBufferParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyMaxResultBufferParserTest.java new file mode 100644 index 0000000..48be46d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyMaxResultBufferParserTest.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.postgresql.util.PGPropertyMaxResultBufferParser; +import org.postgresql.util.PSQLException; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.lang.management.ManagementFactory; +import java.util.Arrays; +import java.util.Collection; + +public class PGPropertyMaxResultBufferParserTest { + public static Collection data() { + Object[][] data = new Object[][]{ + {"100", 100L}, + {"10K", 10L * 1000}, + {"25M", 25L * 1000 * 1000}, + //next two should be too big + {"35G", (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())}, + {"1T", (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())}, + //percent test + {"5p", (long) (0.05 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())}, + {"10pct", (long) (0.10 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())}, + {"15percent", + (long) (0.15 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())}, + //for testing empty property + {"", -1}, + {null, -1} + }; + return Arrays.asList(data); + } + + @MethodSource("data") + @ParameterizedTest(name = "{index}: Test with valueToParse={0}, expectedResult={1}") + void getMaxResultBufferValue(String valueToParse, long expectedResult) { + assertDoesNotThrow(() -> { + long result = PGPropertyMaxResultBufferParser.parseProperty(valueToParse); + assertEquals(expectedResult, result); + }); + } + + @Test + void getMaxResultBufferValueException() throws PSQLException { + assertThrows(PSQLException.class, () -> { + long ignore = PGPropertyMaxResultBufferParser.parseProperty("abc"); + }); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/PasswordUtilTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/PasswordUtilTest.java new file mode 100644 index 0000000..82080cf --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/PasswordUtilTest.java @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGConnection; +import org.postgresql.core.Utils; +import org.postgresql.test.TestUtil; +import org.postgresql.util.PasswordUtil; + +import org.junit.jupiter.api.Test; + +import java.security.SecureRandom; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Properties; + +class PasswordUtilTest { + private static final SecureRandom rng = new SecureRandom(); + + private static String randomSuffix() { + return Long.toHexString(rng.nextLong()); + } + + private void assertValidUsernamePassword(String user, String password) { + Properties props = new Properties(); + props.setProperty("user", user); + props.setProperty("password", password); + try (Connection conn = TestUtil.openDB(props)) { + String actualUser = TestUtil.queryForString(conn, "SELECT USER"); + assertEquals(user, actualUser, "User should match"); + } catch (SQLException e) { + throw new RuntimeException("Failed to authenticate using supplied user and password", e); + } + } + + private void assertInvalidUsernamePassword(String user, String password) { + Properties props = new Properties(); + props.setProperty("user", user); + props.setProperty("password", password); + assertThrows(SQLException.class, () -> { + try (Connection conn = TestUtil.openDB(props)) { + conn.getSchema(); // Do something with conn to appease checkstyle + } + }, "User should not be able to authenticate"); + } + + private void assertWiped(char[] passwordChars) { + char[] expected = Arrays.copyOf(passwordChars, passwordChars.length); + Arrays.fill(passwordChars, (char) 0); + assertArrayEquals(expected, passwordChars, "password array should be all zeros after use"); + } + + private void testUserPassword(String encryptionType, String username, String password, + String encodedPassword) throws SQLException { + String escapedUsername = Utils.escapeIdentifier(null, username).toString(); + + try (Connection superConn = TestUtil.openPrivilegedDB()) { + TestUtil.execute(superConn, "CREATE USER " // + + escapedUsername // + + " WITH PASSWORD '" + encodedPassword + "'"); + + String shadowPass = TestUtil.queryForString(superConn, // + "SELECT passwd FROM pg_shadow WHERE usename = ?", username); + assertEquals(shadowPass, encodedPassword, "pg_shadow value of password must match encoded"); + + // We should be able to log in using our new user: + assertValidUsernamePassword(username, password); + // We also check that we cannot log in with the wrong password to ensure that + // the server is not simply trusting everything + assertInvalidUsernamePassword(username, "Bad Password:" + password); + + String newPassword = "mySecretNewPassword" + randomSuffix(); + PGConnection pgConn = superConn.unwrap(PGConnection.class); + char[] newPasswordChars = newPassword.toCharArray(); + pgConn.alterUserPassword(username, newPasswordChars, encryptionType); + assertNotEquals(newPassword, String.valueOf(newPasswordChars), "newPassword char[] array should be wiped and not match original after encoding"); + assertWiped(newPasswordChars); + + // We should be able to log in using our new password + assertValidUsernamePassword(username, newPassword); + // We also check that we cannot log in with the wrong password to ensure that + // the server is not simply trusting everything + assertInvalidUsernamePassword(username, "Bad Password:" + newPassword); + } finally { + try (Connection superConn = TestUtil.openPrivilegedDB()) { + TestUtil.execute(superConn, "DROP USER " + escapedUsername); + } catch (Exception ignore) { } + } + } + + private void testUserPassword(String encryptionType, String username, String password) throws SQLException { + char[] passwordChars = password.toCharArray(); + String encodedPassword = PasswordUtil.encodePassword( + username, passwordChars, + encryptionType == null ? "md5" : encryptionType); + assertNotEquals(password, String.valueOf(passwordChars), "password char[] array should be wiped and not match original password after encoding"); + assertWiped(passwordChars); + testUserPassword(encryptionType, username, password, encodedPassword); + } + + private void testUserPassword(String encryptionType) throws SQLException { + String username = "test_password_" + randomSuffix(); + String password = "t0pSecret" + randomSuffix(); + + testUserPassword(encryptionType, username, password); + testUserPassword(encryptionType, username, "password with spaces"); + testUserPassword(encryptionType, username, "password with single ' quote'"); + testUserPassword(encryptionType, username, "password with double \" quote'"); + testUserPassword(encryptionType, username + " with spaces", password); + testUserPassword(encryptionType, username + " with single ' quote", password); + testUserPassword(encryptionType, username + " with single \" quote", password); + } + + @Test + void encodePasswordWithServersPasswordEncryption() throws SQLException { + String encryptionType; + try (Connection conn = TestUtil.openPrivilegedDB()) { + encryptionType = TestUtil.queryForString(conn, "SHOW password_encryption"); + } + testUserPassword(encryptionType); + } + + @Test + void alterUserPasswordSupportsNullEncoding() throws SQLException { + testUserPassword(null); + } + + @Test + void mD5() throws SQLException { + testUserPassword("md5"); + } + + @Test + void encryptionTypeValueOfOn() throws SQLException { + testUserPassword("on"); + } + + @Test + void encryptionTypeValueOfOff() throws SQLException { + testUserPassword("off"); + } + + @Test + @DisabledIfServerVersionBelow("10.0") + void scramSha256() throws SQLException { + testUserPassword("scram-sha-256"); + } + + @Test + @DisabledIfServerVersionBelow("10.0") + void customScramParams() throws SQLException { + String username = "test_password_" + randomSuffix(); + String password = "t0pSecret" + randomSuffix(); + byte[] salt = new byte[32]; + rng.nextBytes(salt); + int iterations = 12345; + String encodedPassword = PasswordUtil.encodeScramSha256(password.toCharArray(), iterations, salt); + assertTrue(encodedPassword.startsWith("SCRAM-SHA-256$" + iterations + ":"), "encoded password should have custom iteration count"); + testUserPassword("scram-sha-256", username, password, encodedPassword); + } + + @Test + void unknownEncryptionType() throws SQLException { + String username = "test_password_" + randomSuffix(); + String password = "t0pSecret" + randomSuffix(); + char[] passwordChars = password.toCharArray(); + assertThrows(SQLException.class, () -> { + PasswordUtil.encodePassword(username, passwordChars, "not-a-real-encryption-type"); + }); + assertWiped(passwordChars); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/RegexMatcher.java b/pgjdbc/src/test/java/org/postgresql/test/util/RegexMatcher.java new file mode 100644 index 0000000..382eb4c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/RegexMatcher.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +import java.util.regex.Pattern; + +/** + * Provides a matcher for String objects which does a regex comparison. + */ +public final class RegexMatcher extends TypeSafeMatcher { + + private final Pattern pattern; + + /** + * @param pattern + * The pattern to match items on. + */ + private RegexMatcher(Pattern pattern) { + this.pattern = pattern; + } + + public static Matcher matchesPattern(String pattern) { + return new RegexMatcher(Pattern.compile(pattern)); + } + + /** + * {@inheritDoc} + */ + @Override + public void describeTo(Description description) { + description.appendText("matches regex=" + pattern.toString()); + } + + /** + * {@inheritDoc} + */ + @Override + protected boolean matchesSafely(String item) { + return pattern.matcher(item).matches(); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionParseTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionParseTest.java new file mode 100644 index 0000000..5f11586 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionParseTest.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.postgresql.core.ServerVersion; +import org.postgresql.core.Version; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +public class ServerVersionParseTest { + public static Iterable data() { + return Arrays.asList(new Object[][]{ + /* 4 part version tests */ + {"7.4.0.0", 70400, null}, + {"9.0.0.0", 90000, null}, + {"9.0.1.0", 90001, null}, + {"9.2.1.0", 90201, null}, + {"7.4.0", 70400, null}, + {"9.0.0", 90000, null}, + {"9.0.1", 90001, null}, + {"9.2.1", 90201, null}, + /* Major only */ + {"7.4", 70400, null}, + {"9.0", 90000, null}, + {"9.2", 90200, null}, + {"9.6", 90600, null}, + {"10", 100000, null}, + {"11", 110000, null}, + {"12", 120000, null}, + /* Multidigit */ + {"9.4.10", 90410, null}, + {"9.20.10", 92010, null}, + /* After 10 */ + {"10.1", 100001, null}, + {"10.10", 100010, null}, + {"11.1", 110001, null}, + {"123.20", 1230020, null}, + /* Fail cases */ + {"9.20.100", -1, "Should've rejected three-digit minor version"}, + {"9.100.10", -1, "Should've rejected three-digit second part of major version"}, + {"10.100.10", -1, "10+ version should have 2 components only"}, + {"12345.1", -1, "Too big version number"}, + /* Preparsed */ + {"90104", 90104, null}, + {"090104", 90104, null}, + {"070400", 70400, null}, + {"100004", 100004, null}, + {"10000", 10000, null}, + /* --with-extra-version or beta/devel tags */ + {"9.4devel", 90400, null}, + {"9.4beta1", 90400, null}, + {"10devel", 100000, null}, + {"10beta1", 100000, null}, + {"10.1devel", 100001, null}, + {"10.1beta1", 100001, null}, + {"9.4.1bobs", 90401, null}, + {"9.4.1bobspatched9.4", 90401, null}, + {"9.4.1-bobs-patched-postgres-v2.2", 90401, null}, + + }); + } + + @MethodSource("data") + @ParameterizedTest(name = "str = {0}, expected = {1}") + void run(String versionString, int versionNum, String rejectReason) { + try { + Version version = ServerVersion.from(versionString); + if (rejectReason == null) { + assertEquals(versionNum, version.getVersionNum(), "Parsing " + versionString); + } else { + fail("Should fail to parse " + versionString + ", " + rejectReason); + } + } catch (NumberFormatException e) { + if (rejectReason != null) { + return; + } + throw e; + } + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionTest.java new file mode 100644 index 0000000..99f106d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionTest.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2004, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.core.ServerVersion; + +import org.junit.jupiter.api.Test; + +class ServerVersionTest { + @Test + void versionIncreases() { + ServerVersion prev = null; + for (ServerVersion serverVersion : ServerVersion.values()) { + if (prev != null) { + assertTrue(prev.getVersionNum() < serverVersion.getVersionNum(), + prev + " should be less than " + serverVersion); + } + prev = serverVersion; + } + } + + @Test + void versions() { + assertEquals(ServerVersion.v12.getVersionNum(), ServerVersion.from("12.0").getVersionNum()); + assertEquals(120004, ServerVersion.from("12.4").getVersionNum()); + assertEquals(ServerVersion.v11.getVersionNum(), ServerVersion.from("11.0").getVersionNum()); + assertEquals(110006, ServerVersion.from("11.6").getVersionNum()); + assertEquals(ServerVersion.v10.getVersionNum(), ServerVersion.from("10.0").getVersionNum()); + assertTrue(ServerVersion.v9_6.getVersionNum() < ServerVersion.from("9.6.4").getVersionNum()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/StrangeProxyServer.java b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeProxyServer.java new file mode 100644 index 0000000..dc34fbe --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeProxyServer.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.util; + +import org.postgresql.test.TestUtil; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketTimeoutException; + +/** + * Proxy server that allows for pretending that traffic did not arrive at the + * destination. Client connections created prior to a call to + * stopForwardingOlderClients() will not have any subsequent traffic forwarded. + * Bytes are transferred one-by-one. If either side of the connection reaches + * EOF then both sides are immediately closed. + */ +public class StrangeProxyServer implements Closeable { + private final ServerSocket serverSock; + private volatile boolean keepRunning = true; + private volatile long minAcceptedAt; + + public StrangeProxyServer(String destHost, int destPort) throws IOException { + this.serverSock = new ServerSocket(0); + this.serverSock.setSoTimeout(100); + doAsync(() -> { + while (keepRunning) { + try { + Socket sourceSock = serverSock.accept(); + final long acceptedAt = System.currentTimeMillis(); + Socket destSock = new Socket(destHost, destPort); + doAsync(() -> transferOneByOne(acceptedAt, sourceSock, destSock)); + doAsync(() -> transferOneByOne(acceptedAt, destSock, sourceSock)); + } catch (SocketTimeoutException ignore) { + } catch (IOException e) { + throw new RuntimeException(e); + } + } + TestUtil.closeQuietly(serverSock); + }); + } + + public int getServerPort() { + return this.serverSock.getLocalPort(); + } + + @Override + public void close() { + this.keepRunning = false; + } + + public void stopForwardingOlderClients() { + this.minAcceptedAt = System.currentTimeMillis(); + } + + public void stopForwardingAllClients() { + this.minAcceptedAt = Long.MAX_VALUE; + } + + private void doAsync(Runnable task) { + Thread thread = new Thread(task); + thread.setDaemon(true); + thread.start(); + } + + private void transferOneByOne(long acceptedAt, Socket source, Socket dest) { + try { + InputStream in = source.getInputStream(); + OutputStream out = dest.getOutputStream(); + int b; + // As long as we're running try to read + while (keepRunning && (b = in.read()) >= 0) { + // But only write it if the client is newer than the last call to stopForwardingOlderClients() + if (acceptedAt >= minAcceptedAt) { + out.write(b); + } + } + } catch (IOException ignore) { + } finally { + TestUtil.closeQuietly(source); + TestUtil.closeQuietly(dest); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/xa/XADataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/xa/XADataSourceTest.java new file mode 100644 index 0000000..1ae284d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/xa/XADataSourceTest.java @@ -0,0 +1,824 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.xa; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +import org.postgresql.test.TestUtil; +import org.postgresql.test.jdbc2.optional.BaseDataSourceTest; +import org.postgresql.xa.PGXADataSource; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.Random; + +import javax.sql.XAConnection; +import javax.sql.XADataSource; +import javax.transaction.xa.XAException; +import javax.transaction.xa.XAResource; +import javax.transaction.xa.Xid; + +public class XADataSourceTest { + + private XADataSource xaDs; + + private Connection dbConn; + private boolean connIsSuper; + + private XAConnection xaconn; + private XAResource xaRes; + private Connection conn; + + public XADataSourceTest() { + xaDs = new PGXADataSource(); + BaseDataSourceTest.setupDataSource((PGXADataSource) xaDs); + } + + @BeforeAll + static void beforeClass() throws Exception { + try (Connection con = TestUtil.openDB()) { + assumeTrue(isPreparedTransactionEnabled(con), "max_prepared_transactions should be non-zero for XA tests"); + } + } + + @BeforeEach + void setUp() throws Exception { + dbConn = TestUtil.openDB(); + + // Check if we're operating as a superuser; some tests require it. + Statement st = dbConn.createStatement(); + st.executeQuery("SHOW is_superuser;"); + ResultSet rs = st.getResultSet(); + rs.next(); // One row is guaranteed + connIsSuper = rs.getBoolean(1); // One col is guaranteed + st.close(); + + TestUtil.createTable(dbConn, "testxa1", "foo int"); + TestUtil.createTable(dbConn, "testxa2", "foo int primary key"); + TestUtil.createTable(dbConn, "testxa3", "foo int references testxa2(foo) deferrable"); + + clearAllPrepared(); + + xaconn = xaDs.getXAConnection(); + xaRes = xaconn.getXAResource(); + conn = xaconn.getConnection(); + } + + private static boolean isPreparedTransactionEnabled(Connection connection) throws SQLException { + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery("SHOW max_prepared_transactions"); + rs.next(); + int mpt = rs.getInt(1); + rs.close(); + stmt.close(); + return mpt > 0; + } + + @AfterEach + void tearDown() throws SQLException { + try { + xaconn.close(); + } catch (Exception ignored) { + } + + clearAllPrepared(); + TestUtil.dropTable(dbConn, "testxa3"); + TestUtil.dropTable(dbConn, "testxa2"); + TestUtil.dropTable(dbConn, "testxa1"); + TestUtil.closeDB(dbConn); + + } + + private void clearAllPrepared() throws SQLException { + Statement st = dbConn.createStatement(); + try { + ResultSet rs = st.executeQuery( + "SELECT x.gid, x.owner = current_user " + + "FROM pg_prepared_xacts x " + + "WHERE x.database = current_database()"); + + Statement st2 = dbConn.createStatement(); + while (rs.next()) { + // TODO: This should really use org.junit.Assume once we move to JUnit 4 + assertTrue(rs.getBoolean(2), + "Only prepared xacts owned by current user may be present in db"); + st2.executeUpdate("ROLLBACK PREPARED '" + rs.getString(1) + "'"); + } + st2.close(); + } finally { + st.close(); + } + } + + static class CustomXid implements Xid { + private static Random rand = new Random(System.currentTimeMillis()); + byte[] gtrid = new byte[Xid.MAXGTRIDSIZE]; + byte[] bqual = new byte[Xid.MAXBQUALSIZE]; + + CustomXid(int i) { + rand.nextBytes(gtrid); + gtrid[0] = (byte) i; + gtrid[1] = (byte) i; + gtrid[2] = (byte) i; + gtrid[3] = (byte) i; + gtrid[4] = (byte) i; + bqual[0] = 4; + bqual[1] = 5; + bqual[2] = 6; + } + + @Override + public int getFormatId() { + return 0; + } + + @Override + public byte[] getGlobalTransactionId() { + return gtrid; + } + + @Override + public byte[] getBranchQualifier() { + return bqual; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Xid)) { + return false; + } + + Xid other = (Xid) o; + if (other.getFormatId() != this.getFormatId()) { + return false; + } + if (!Arrays.equals(other.getBranchQualifier(), this.getBranchQualifier())) { + return false; + } + return Arrays.equals(other.getGlobalTransactionId(), this.getGlobalTransactionId()); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + Arrays.hashCode(getBranchQualifier()); + result = prime * result + getFormatId(); + result = prime * result + Arrays.hashCode(getGlobalTransactionId()); + return result; + } + } + + /* + * Check that the equals method works for the connection wrapper returned by + * PGXAConnection.getConnection(). + */ + @Test + void wrapperEquals() throws Exception { + assertEquals(conn, conn, "Wrappers should be equal"); + assertNotEquals(null, conn, "Wrapper should be unequal to null"); + assertNotEquals("dummy string object", conn, "Wrapper should be unequal to unrelated object"); + } + + @Test + void onePhase() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + conn.createStatement().executeQuery("SELECT * FROM testxa1"); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.commit(xid, true); + } + + @Test + void twoPhaseCommit() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + conn.createStatement().executeQuery("SELECT * FROM testxa1"); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + xaRes.commit(xid, false); + } + + @Test + void closeBeforeCommit() throws Exception { + Xid xid = new CustomXid(5); + xaRes.start(xid, XAResource.TMNOFLAGS); + assertEquals(1, conn.createStatement().executeUpdate("INSERT INTO testxa1 VALUES (1)")); + conn.close(); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.commit(xid, true); + + ResultSet rs = dbConn.createStatement().executeQuery("SELECT foo FROM testxa1"); + assertTrue(rs.next()); + assertEquals(1, rs.getInt(1)); + } + + @Test + void recover() throws Exception { + Xid xid = new CustomXid(12345); + xaRes.start(xid, XAResource.TMNOFLAGS); + conn.createStatement().executeQuery("SELECT * FROM testxa1"); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + { + Xid[] recoveredXidArray = xaRes.recover(XAResource.TMSTARTRSCAN); + + boolean recoveredXid = false; + + for (Xid aRecoveredXidArray : recoveredXidArray) { + if (xid.equals(aRecoveredXidArray)) { + recoveredXid = true; + break; + } + } + + assertTrue(recoveredXid, "Did not recover prepared xid"); + assertEquals(0, xaRes.recover(XAResource.TMNOFLAGS).length); + } + + xaRes.rollback(xid); + + { + Xid[] recoveredXidArray = xaRes.recover(XAResource.TMSTARTRSCAN); + + boolean recoveredXid = false; + + for (Xid aRecoveredXidArray : recoveredXidArray) { + if (xaRes.equals(aRecoveredXidArray)) { + recoveredXid = true; + break; + } + } + + assertFalse(recoveredXid, "Recovered rolled back xid"); + } + } + + @Test + void rollback() throws XAException { + Xid xid = new CustomXid(3); + + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + xaRes.rollback(xid); + } + + @Test + void rollbackWithoutPrepare() throws XAException { + Xid xid = new CustomXid(4); + + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.rollback(xid); + } + + @Test + void autoCommit() throws Exception { + Xid xid = new CustomXid(6); + + // When not in an XA transaction, autocommit should be true + // per normal JDBC rules. + assertTrue(conn.getAutoCommit()); + + // When in an XA transaction, autocommit should be false + xaRes.start(xid, XAResource.TMNOFLAGS); + assertFalse(conn.getAutoCommit()); + xaRes.end(xid, XAResource.TMSUCCESS); + assertFalse(conn.getAutoCommit()); + xaRes.commit(xid, true); + assertTrue(conn.getAutoCommit()); + + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + assertTrue(conn.getAutoCommit()); + xaRes.commit(xid, false); + assertTrue(conn.getAutoCommit()); + + // Check that autocommit is reset to true after a 1-phase rollback + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.rollback(xid); + assertTrue(conn.getAutoCommit()); + + // Check that autocommit is reset to true after a 2-phase rollback + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + xaRes.rollback(xid); + assertTrue(conn.getAutoCommit()); + + // Check that autoCommit is set correctly after a getConnection-call + conn = xaconn.getConnection(); + assertTrue(conn.getAutoCommit()); + + xaRes.start(xid, XAResource.TMNOFLAGS); + + conn.createStatement().executeQuery("SELECT * FROM testxa1"); + + Timestamp ts1 = getTransactionTimestamp(conn); + + conn.close(); + conn = xaconn.getConnection(); + assertFalse(conn.getAutoCommit()); + + Timestamp ts2 = getTransactionTimestamp(conn); + + /* + * Check that we're still in the same transaction. close+getConnection() should not rollback the + * XA-transaction implicitly. + */ + assertEquals(ts1, ts2); + + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + xaRes.rollback(xid); + assertTrue(conn.getAutoCommit()); + } + + /** + *

Get the time the current transaction was started from the server.

+ * + *

This can be used to check that transaction doesn't get committed/ rolled back inadvertently, by + * calling this once before and after the suspected piece of code, and check that they match. It's + * a bit iffy, conceivably you might get the same timestamp anyway if the suspected piece of code + * runs fast enough, and/or the server clock is very coarse grained. But it'll do for testing + * purposes.

+ */ + private static Timestamp getTransactionTimestamp(Connection conn) throws SQLException { + ResultSet rs = conn.createStatement().executeQuery("SELECT now()"); + rs.next(); + return rs.getTimestamp(1); + } + + @Test + void endThenJoin() throws XAException { + Xid xid = new CustomXid(5); + + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.start(xid, XAResource.TMJOIN); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.commit(xid, true); + } + + @Test + void restoreOfAutoCommit() throws Exception { + conn.setAutoCommit(false); + + Xid xid = new CustomXid(14); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.commit(xid, true); + + assertFalse( + conn.getAutoCommit(), + "XaResource should have restored connection autocommit mode after commit or rollback to the initial state."); + + // Test true case + conn.setAutoCommit(true); + + xid = new CustomXid(15); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.commit(xid, true); + + assertTrue( + conn.getAutoCommit(), + "XaResource should have restored connection autocommit mode after commit or rollback to the initial state."); + + } + + @Test + void restoreOfAutoCommitEndThenJoin() throws Exception { + // Test with TMJOIN + conn.setAutoCommit(true); + + Xid xid = new CustomXid(16); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.start(xid, XAResource.TMJOIN); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.commit(xid, true); + + assertTrue( + conn.getAutoCommit(), + "XaResource should have restored connection autocommit mode after start(TMNOFLAGS) end() start(TMJOIN) and then commit or rollback to the initial state."); + + } + + /** + * Test how the driver responds to rolling back a transaction that has already been rolled back. + * Check the driver reports the xid does not exist. The db knows the fact. ERROR: prepared + * transaction with identifier "blah" does not exist + */ + @Test + void repeatedRolledBack() throws Exception { + Xid xid = new CustomXid(654321); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + // tm crash + xaRes.recover(XAResource.TMSTARTRSCAN); + xaRes.rollback(xid); + try { + xaRes.rollback(xid); + fail("Rollback was successful"); + } catch (XAException xae) { + assertEquals(XAException.XAER_NOTA, xae.errorCode, "Checking the errorCode is XAER_NOTA indicating the " + "xid does not exist."); + } + } + + /** + * Invoking prepare on already prepared {@link Xid} causes {@link XAException} being thrown + * with error code {@link XAException#XAER_PROTO}. + */ + @Test + void preparingPreparedXid() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + try { + xaRes.prepare(xid); + fail("Prepare is expected to fail with XAER_PROTO as xid was already prepared"); + } catch (XAException xae) { + assertEquals(XAException.XAER_PROTO, xae.errorCode, "Prepare call on already prepared xid " + xid + " expects XAER_PROTO"); + } finally { + xaRes.rollback(xid); + } + } + + /** + * Invoking commit on already committed {@link Xid} causes {@link XAException} being thrown + * with error code {@link XAException#XAER_NOTA}. + */ + @Test + void committingCommittedXid() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + xaRes.commit(xid, false); + + try { + xaRes.commit(xid, false); + fail("Commit is expected to fail with XAER_NOTA as xid was already committed"); + } catch (XAException xae) { + assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on already committed xid " + xid + " expects XAER_NOTA"); + } + } + + /** + * Invoking commit on {@link Xid} committed by different connection. + * That different connection could be for example transaction manager recovery. + */ + @Test + void commitByDifferentConnection() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + XADataSource secondDs = null; + try { + secondDs = new PGXADataSource(); + BaseDataSourceTest.setupDataSource((PGXADataSource) secondDs); + XAResource secondXaRes = secondDs.getXAConnection().getXAResource(); + secondXaRes.recover(XAResource.TMSTARTRSCAN | XAResource.TMENDRSCAN); + secondXaRes.commit(xid, false); + } finally { + if (secondDs != null) { + secondDs.getXAConnection().close(); + } + } + + try { + xaRes.commit(xid, false); + fail("Commit is expected to fail with XAER_RMERR as somebody else already committed"); + } catch (XAException xae) { + assertEquals(XAException.XAER_RMERR, xae.errorCode, "Commit call on already committed xid " + xid + " expects XAER_RMERR"); + } + } + + /** + * Invoking rollback on {@link Xid} rolled-back by different connection. + * That different connection could be for example transaction manager recovery. + */ + @Test + void rollbackByDifferentConnection() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + XADataSource secondDs = null; + try { + secondDs = new PGXADataSource(); + BaseDataSourceTest.setupDataSource((PGXADataSource) secondDs); + XAResource secondXaRes = secondDs.getXAConnection().getXAResource(); + secondXaRes.recover(XAResource.TMSTARTRSCAN | XAResource.TMENDRSCAN); + secondXaRes.rollback(xid); + } finally { + if (secondDs != null) { + secondDs.getXAConnection().close(); + } + } + + try { + xaRes.rollback(xid); + fail("Rollback is expected to fail with XAER_RMERR as somebody else already rolled-back"); + } catch (XAException xae) { + assertEquals(XAException.XAER_RMERR, xae.errorCode, "Rollback call on already rolled-back xid " + xid + " expects XAER_RMERR"); + } + } + + /** + * One-phase commit of prepared {@link Xid} should throw exception. + */ + @Test + void onePhaseCommitOfPrepared() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + try { + xaRes.commit(xid, true); + fail("One-phase commit is expected to fail with XAER_PROTO when called on prepared xid"); + } catch (XAException xae) { + assertEquals(XAException.XAER_PROTO, xae.errorCode, "One-phase commit of prepared xid " + xid + " expects XAER_PROTO"); + } + } + + /** + * Invoking one-phase commit on already one-phase committed {@link Xid} causes + * {@link XAException} being thrown with error code {@link XAException#XAER_NOTA}. + */ + @Test + void onePhaseCommittingCommittedXid() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.commit(xid, true); + + try { + xaRes.commit(xid, true); + fail("One-phase commit is expected to fail with XAER_NOTA as xid was already committed"); + } catch (XAException xae) { + assertEquals(XAException.XAER_NOTA, xae.errorCode, "One-phase commit call on already committed xid " + xid + " expects XAER_NOTA"); + } + } + + /** + * When unknown xid is tried to be prepared the expected {@link XAException#errorCode} + * is {@link XAException#XAER_NOTA}. + */ + @Test + void prepareUnknownXid() throws Exception { + Xid xid = new CustomXid(1); + try { + xaRes.prepare(xid); + fail("Prepare is expected to fail with XAER_NOTA as used unknown xid"); + } catch (XAException xae) { + assertEquals(XAException.XAER_NOTA, xae.errorCode, "Prepare call on unknown xid " + xid + " expects XAER_NOTA"); + } + } + + /** + * When unknown xid is tried to be committed the expected {@link XAException#errorCode} + * is {@link XAException#XAER_NOTA}. + */ + @Test + void commitUnknownXid() throws Exception { + Xid xid = new CustomXid(1); + Xid unknownXid = new CustomXid(42); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + try { + xaRes.commit(unknownXid, false); + fail("Commit is expected to fail with XAER_NOTA as used unknown xid"); + } catch (XAException xae) { + assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA"); + } finally { + xaRes.rollback(xid); + } + } + + /** + * When unknown xid is tried to be committed with one-phase commit optimization + * the expected {@link XAException#errorCode} is {@link XAException#XAER_NOTA}. + */ + @Test + void onePhaseCommitUnknownXid() throws Exception { + Xid xid = new CustomXid(1); + Xid unknownXid = new CustomXid(42); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + try { + xaRes.commit(unknownXid, true); + fail("One-phase commit is expected to fail with XAER_NOTA as used unknown xid"); + } catch (XAException xae) { + assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA"); + } finally { + xaRes.rollback(xid); + } + } + + /** + * When unknown xid is tried to be rolled-back the expected {@link XAException#errorCode} + * is {@link XAException#XAER_NOTA}. + */ + @Test + void rollbackUnknownXid() throws Exception { + Xid xid = new CustomXid(1); + Xid unknownXid = new CustomXid(42); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + try { + xaRes.rollback(unknownXid); + fail("Rollback is expected to fail as used unknown xid"); + } catch (XAException xae) { + assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA"); + } finally { + xaRes.rollback(xid); + } + } + + /** + * When trying to commit xid which was already removed by arbitrary action of database. + * Resource manager can't expect state of the {@link Xid}. + */ + @Test + void databaseRemovesPreparedBeforeCommit() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + clearAllPrepared(); + + try { + xaRes.commit(xid, false); + fail("Commit is expected to fail as committed xid was removed before"); + } catch (XAException xae) { + assertEquals(XAException.XAER_RMERR, xae.errorCode, "Commit call on xid " + xid + " not known to DB expects XAER_RMERR"); + } + } + + /** + * When trying to rollback xid which was already removed by arbitrary action of database. + * Resource manager can't expect state of the {@link Xid}. + */ + @Test + void databaseRemovesPreparedBeforeRollback() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + clearAllPrepared(); + + try { + xaRes.rollback(xid); + fail("Rollback is expected to fail as committed xid was removed before"); + } catch (XAException xae) { + assertEquals(XAException.XAER_RMERR, xae.errorCode, "Rollback call on xid " + xid + " not known to DB expects XAER_RMERR"); + } + } + + /** + * When trying to commit and connection issue happens then + * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected. + */ + @Test + void networkIssueOnCommit() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + xaconn.close(); + + try { + xaRes.commit(xid, false); + fail("Commit is expected to fail as connection was closed"); + } catch (XAException xae) { + assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "Commit call on closed connection expects XAER_RMFAIL"); + } + } + + /** + * When trying to one-phase commit and connection issue happens then + * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected. + */ + @Test + void networkIssueOnOnePhaseCommit() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + + xaconn.close(); + + try { + xaRes.commit(xid, true); + fail("One-phase commit is expected to fail as connection was closed"); + } catch (XAException xae) { + assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "One-phase commit call on closed connection expects XAER_RMFAIL"); + } + } + + /** + * When trying to rollback and connection issue happens then + * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected. + */ + @Test + void networkIssueOnRollback() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + xaRes.end(xid, XAResource.TMSUCCESS); + xaRes.prepare(xid); + + xaconn.close(); + + try { + xaRes.rollback(xid); + fail("Rollback is expected to fail as connection was closed"); + } catch (XAException xae) { + assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "Rollback call on closed connection expects XAER_RMFAIL"); + } + } + + /** + * When using deferred constraints a constraint violation can occur on prepare. This has to be + * mapped to the correct XA Error Code + */ + @Test + void mappingOfConstraintViolations() throws Exception { + Xid xid = new CustomXid(1); + xaRes.start(xid, XAResource.TMNOFLAGS); + assertEquals(0, conn.createStatement().executeUpdate("SET CONSTRAINTS ALL DEFERRED")); + assertEquals(1, conn.createStatement().executeUpdate("INSERT INTO testxa3 VALUES (4)")); + xaRes.end(xid, XAResource.TMSUCCESS); + + try { + xaRes.prepare(xid); + + fail("Prepare is expected to fail as an integrity violation occurred"); + } catch (XAException xae) { + assertEquals(XAException.XA_RBINTEGRITY, xae.errorCode, "Prepare call with deferred constraints violations expects XA_RBINTEGRITY"); + } + } + + /* + * We don't support transaction interleaving. public void testInterleaving1() throws Exception { + * Xid xid1 = new CustomXid(1); Xid xid2 = new CustomXid(2); + * + * xaRes.start(xid1, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate( + * "UPDATE testxa1 SET foo = 'ccc'"); xaRes.end(xid1, XAResource.TMSUCCESS); + * + * xaRes.start(xid2, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate( + * "UPDATE testxa2 SET foo = 'bbb'"); + * + * xaRes.commit(xid1, true); + * + * xaRes.end(xid2, XAResource.TMSUCCESS); + * + * xaRes.commit(xid2, true); + * + * } public void testInterleaving2() throws Exception { Xid xid1 = new CustomXid(1); Xid xid2 = + * new CustomXid(2); Xid xid3 = new CustomXid(3); + * + * xaRes.start(xid1, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate( + * "UPDATE testxa1 SET foo = 'aa'"); xaRes.end(xid1, XAResource.TMSUCCESS); + * + * xaRes.start(xid2, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate( + * "UPDATE testxa2 SET foo = 'bb'"); xaRes.end(xid2, XAResource.TMSUCCESS); + * + * xaRes.start(xid3, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate( + * "UPDATE testxa3 SET foo = 'cc'"); xaRes.end(xid3, XAResource.TMSUCCESS); + * + * xaRes.commit(xid1, true); xaRes.commit(xid2, true); xaRes.commit(xid3, true); } + */ +} diff --git a/pgjdbc/src/test/java/org/postgresql/test/xa/XATestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/xa/XATestSuite.java new file mode 100644 index 0000000..d4d4cb7 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/test/xa/XATestSuite.java @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2009, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.test.xa; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + XADataSourceTest.class, +}) +public class XATestSuite { +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/Await.java b/pgjdbc/src/test/java/org/postgresql/util/Await.java new file mode 100644 index 0000000..c40c44c --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/Await.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +/* changes were made to move it into the org.postgresql.util package + * + * Copyright 2022 Juan Lopes + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.postgresql.util; + +import java.time.Duration; + +public class Await { + public static void until(String message, Duration timeout, Condition condition) throws InterruptedException { + long deadline = System.currentTimeMillis() + timeout.toMillis(); + while (!condition.get()) { + if (System.currentTimeMillis() > deadline) { + throw new AssertionError("Condition not met within " + timeout + ": " + message); + } + Thread.sleep(100); + } + } + + public interface Condition { + boolean get(); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/BigDecimalByteConverterTest.java b/pgjdbc/src/test/java/org/postgresql/util/BigDecimalByteConverterTest.java new file mode 100644 index 0000000..a1af327 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/BigDecimalByteConverterTest.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collection; + +/** + * + * @author Brett Okken + */ +public class BigDecimalByteConverterTest { + public static Iterable data() { + final Collection numbers = new ArrayList<>(); + numbers.add(new Object[]{new BigDecimal("0.1")}); + numbers.add(new Object[]{new BigDecimal("0.10")}); + numbers.add(new Object[]{new BigDecimal("0.01")}); + numbers.add(new Object[]{new BigDecimal("0.001")}); + numbers.add(new Object[]{new BigDecimal("0.0001")}); + numbers.add(new Object[]{new BigDecimal("0.00001")}); + numbers.add(new Object[]{new BigDecimal("1.0")}); + numbers.add(new Object[]{new BigDecimal("0.000000000000000000000000000000000000000000000000000")}); + numbers.add(new Object[]{new BigDecimal("0.100000000000000000000000000000000000000000000009900")}); + numbers.add(new Object[]{new BigDecimal("-1.0")}); + numbers.add(new Object[]{new BigDecimal("-1")}); + numbers.add(new Object[]{new BigDecimal("1.2")}); + numbers.add(new Object[]{new BigDecimal("-2.05")}); + numbers.add(new Object[]{new BigDecimal("0.000000000000000000000000000990")}); + numbers.add(new Object[]{new BigDecimal("-0.000000000000000000000000000990")}); + numbers.add(new Object[]{new BigDecimal("10.0000000000099")}); + numbers.add(new Object[]{new BigDecimal(".10000000000000")}); + numbers.add(new Object[]{new BigDecimal("1.10000000000000")}); + numbers.add(new Object[]{new BigDecimal("99999.2")}); + numbers.add(new Object[]{new BigDecimal("99999")}); + numbers.add(new Object[]{new BigDecimal("-99999.2")}); + numbers.add(new Object[]{new BigDecimal("-99999")}); + numbers.add(new Object[]{new BigDecimal("2147483647")}); + numbers.add(new Object[]{new BigDecimal("-2147483648")}); + numbers.add(new Object[]{new BigDecimal("2147483648")}); + numbers.add(new Object[]{new BigDecimal("-2147483649")}); + numbers.add(new Object[]{new BigDecimal("9223372036854775807")}); + numbers.add(new Object[]{new BigDecimal("-9223372036854775808")}); + numbers.add(new Object[]{new BigDecimal("9223372036854775808")}); + numbers.add(new Object[]{new BigDecimal("-9223372036854775809")}); + numbers.add(new Object[]{new BigDecimal("10223372036850000000")}); + numbers.add(new Object[]{new BigDecimal("19223372036854775807")}); + numbers.add(new Object[]{new BigDecimal("19223372036854775807.300")}); + numbers.add(new Object[]{new BigDecimal("-19223372036854775807.300")}); + numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(1234567890987654321L), -1)}); + numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(1234567890987654321L), -5)}); + numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(-1234567890987654321L), -3)}); + numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(6), -8)}); + numbers.add(new Object[]{new BigDecimal("30000")}); + numbers.add(new Object[]{new BigDecimal("40000").setScale(15)}); + numbers.add(new Object[]{new BigDecimal("20000.000000000000000000")}); + numbers.add(new Object[]{new BigDecimal("9990000").setScale(8)}); + numbers.add(new Object[]{new BigDecimal("1000000").setScale(31)}); + numbers.add(new Object[]{new BigDecimal("10000000000000000000000000000000000000").setScale(14)}); + numbers.add(new Object[]{new BigDecimal("90000000000000000000000000000000000000")}); + return numbers; + } + + @MethodSource("data") + @ParameterizedTest(name = "number = {0,number,#,###.##################################################}") + void binary(BigDecimal number) { + testBinaryConversion(number); + } + + @Test + void bigDecimal10_pow_131072_minus_1() { + testBinaryConversion( + new BigDecimal(BigInteger.TEN.pow(131072).subtract(BigInteger.ONE)) + ); + } + + static void testBinaryConversion(BigDecimal number) { + final byte[] bytes = ByteConverter.numeric(number); + final BigDecimal actual = (BigDecimal) ByteConverter.numeric(bytes); + if (number.scale() >= 0) { + assertEquals(number, actual); + } else { + assertEquals(number.toPlainString(), actual.toPlainString()); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/IntListTest.java b/pgjdbc/src/test/java/org/postgresql/util/IntListTest.java new file mode 100644 index 0000000..e7d1451 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/IntListTest.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.junit.jupiter.api.Test; + +/** + * Tests {@link IntList}. + */ +class IntListTest { + + @Test + void size() { + final IntList list = new IntList(); + assertEquals(0, list.size()); + list.add(3); + assertEquals(1, list.size()); + + for (int i = 0; i < 48; i++) { + list.add(i); + } + assertEquals(49, list.size()); + + list.clear(); + assertEquals(0, list.size()); + } + + @Test + void get_empty() { + final IntList list = new IntList(); + assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(0)); + } + + @Test + void get_negative() { + final IntList list = new IntList(); + list.add(3); + assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(-1)); + } + + @Test + void get_tooLarge() { + final IntList list = new IntList(); + list.add(3); + assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(1)); + } + + @Test + void get() { + final IntList list = new IntList(); + list.add(3); + assertEquals(3, list.get(0)); + + for (int i = 0; i < 1048; i++) { + list.add(i); + } + + assertEquals(3, list.get(0)); + + for (int i = 0; i < 1048; i++) { + assertEquals(i, list.get(i + 1)); + } + + list.clear(); + list.add(4); + assertEquals(4, list.get(0)); + } + + @Test + void toArray() { + int[] emptyArray = new IntList().toArray(); + IntList list = new IntList(); + assertSame(emptyArray, list.toArray(), "emptyList.toArray()"); + + list.add(45); + assertArrayEquals(new int[]{45}, list.toArray()); + + list.clear(); + assertSame(emptyArray, list.toArray(), "emptyList.toArray() after clearing the list"); + + final int[] expected = new int[1048]; + for (int i = 0; i < 1048; i++) { + list.add(i); + expected[i] = i; + } + assertArrayEquals(expected, list.toArray()); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/LazyCleanerTest.java b/pgjdbc/src/test/java/org/postgresql/util/LazyCleanerTest.java new file mode 100644 index 0000000..fb97b36 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/LazyCleanerTest.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +/* changes were made to move it into the org.postgresql.util package + * + * Copyright 2022 Juan Lopes + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.postgresql.util; + +import static java.time.Duration.ofSeconds; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +public class LazyCleanerTest { + @Test + void phantomCleaner() throws InterruptedException { + List list = new ArrayList<>(Arrays.asList( + new Object(), new Object(), new Object())); + + LazyCleaner t = new LazyCleaner(ofSeconds(5), "Cleaner"); + + String[] collected = new String[list.size()]; + List> cleaners = new ArrayList<>(); + for (int i = 0; i < list.size(); i++) { + final int ii = i; + cleaners.add( + t.register( + list.get(i), + leak -> { + collected[ii] = leak ? "LEAK" : "NO LEAK"; + if (ii == 0) { + throw new RuntimeException( + "Exception from cleanup action to verify if the cleaner thread would survive" + ); + } + } + ) + ); + } + assertEquals( + list.size(), + t.getWatchedCount(), + "All objects are strongly-reachable, so getWatchedCount should reflect it" + ); + + assertTrue(t.isThreadRunning(), + "cleanup thread should be running, and it should wait for the leaks"); + + cleaners.get(1).clean(); + + assertEquals( + list.size() - 1, + t.getWatchedCount(), + "One object has been released properly, so getWatchedCount should reflect it" + ); + + list.set(0, null); + System.gc(); + System.gc(); + + Await.until( + "One object was released, and another one has leaked, so getWatchedCount should reflect it", + ofSeconds(5), + () -> t.getWatchedCount() == list.size() - 2 + ); + + list.clear(); + System.gc(); + System.gc(); + + Await.until( + "The cleanup thread should detect leaks and terminate within 5-10 seconds after GC", + ofSeconds(10), + () -> !t.isThreadRunning() + ); + + assertEquals( + Arrays.asList("LEAK", "NO LEAK", "LEAK").toString(), + Arrays.asList(collected).toString(), + "Second object has been released properly, so it should be reported as NO LEAK" + ); + } + + @Test + void getThread() throws InterruptedException { + String threadName = UUID.randomUUID().toString(); + LazyCleaner t = new LazyCleaner(ofSeconds(5), threadName); + List list = new ArrayList<>(); + list.add(new Object()); + LazyCleaner.Cleanable cleanable = + t.register( + list.get(0), + leak -> { + throw new IllegalStateException("test exception from CleaningAction"); + } + ); + assertTrue(t.isThreadRunning(), + "cleanup thread should be running, and it should wait for the leaks"); + Thread thread = getThreadByName(threadName); + thread.interrupt(); + Await.until( + "The cleanup thread should ignore the interrupt since there's one object to monitor", + ofSeconds(10), + () -> !thread.isInterrupted() + ); + assertThrows( + IllegalStateException.class, + cleanable::clean, + "Exception from cleanable.clean() should be rethrown" + ); + thread.interrupt(); + Await.until( + "The cleanup thread should exit shortly after interrupt as there's no leaks to monitor", + ofSeconds(1), + () -> !t.isThreadRunning() + ); + } + + public static Thread getThreadByName(String threadName) { + for (Thread t : Thread.getAllStackTraces().keySet()) { + if (t.getName().equals(threadName)) { + return t; + } + } + throw new IllegalStateException("Cleanup thread " + threadName + " not found"); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/NullOutputStream.java b/pgjdbc/src/test/java/org/postgresql/util/NullOutputStream.java new file mode 100644 index 0000000..764dc1a --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/NullOutputStream.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.io.OutputStream; +import java.io.PrintStream; + +/** + * Created by davec on 3/14/17. + */ +public class NullOutputStream extends PrintStream { + + public NullOutputStream(OutputStream out) { + super(out); + } + + @Override + public void write(int b) { + + } + + @Override + public void write(byte[] buf, int off, int len) { + + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/NumberParserTest.java b/pgjdbc/src/test/java/org/postgresql/util/NumberParserTest.java new file mode 100644 index 0000000..84a0fbd --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/NumberParserTest.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.List; + +class NumberParserTest { + @Test + void getFastLong_normalLongs() { + List tests = new ArrayList<>(); + for (long base : new long[]{0, 42, 65536, -65536, Long.MAX_VALUE}) { + for (int diff = -10; diff <= 10; diff++) { + tests.add(base + diff); + } + } + + for (Long test : tests) { + assertGetLongResult(Long.toString(test), test); + } + } + + @Test + void getFastLong_discardsFractionalPart() { + assertGetLongResult("234.435", 234); + assertGetLongResult("-234234.", -234234); + } + + @Test + void getFastLong_failOnIncorrectStrings() { + assertGetLongFail(""); + assertGetLongFail("-234.12542."); + assertGetLongFail("."); + assertGetLongFail("-."); + assertGetLongFail(Long.toString(Long.MIN_VALUE).substring(1)); + } + + private void assertGetLongResult(String s, long expected) { + try { + assertEquals( + expected, + NumberParser.getFastLong(s.getBytes(), Long.MIN_VALUE, Long.MAX_VALUE), + "string \"" + s + "\" parsed well to number " + expected + ); + } catch (NumberFormatException nfe) { + fail("failed to parse(NumberFormatException) string \"" + s + "\", expected result " + expected); + } + } + + private void assertGetLongFail(String s) { + try { + long ret = NumberParser.getFastLong(s.getBytes(), Long.MIN_VALUE, Long.MAX_VALUE); + fail("Expected NumberFormatException on parsing \"" + s + "\", but result: " + ret); + } catch (NumberFormatException nfe) { + // ok + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/OSUtilTest.java b/pgjdbc/src/test/java/org/postgresql/util/OSUtilTest.java new file mode 100644 index 0000000..6c020e8 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/OSUtilTest.java @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; +import uk.org.webcompere.systemstubs.environment.EnvironmentVariables; +import uk.org.webcompere.systemstubs.properties.SystemProperties; +import uk.org.webcompere.systemstubs.resource.Resources; + +import java.io.File; + +@StubEnvironmentAndProperties +class OSUtilTest { + + @Test + void getUserConfigRootDirectory() throws Exception { + // windows + Resources.with(new EnvironmentVariables("APPDATA", "C:\\Users\\realuser\\AppData\\Roaming"), + new SystemProperties("os.name", "Windows 10")).execute(() -> { + String result = OSUtil.getUserConfigRootDirectory(); + assertEquals("C:\\Users\\realuser\\AppData\\Roaming" + File.separator + "postgresql", result); + } + ); + // linux + Resources.with(new SystemProperties("os.name", "Linux", "user.home", "/home/realuser")).execute(() -> { + String result = OSUtil.getUserConfigRootDirectory(); + assertEquals("/home/realuser", result); + } + ); + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/PGPropertyUtilTest.java b/pgjdbc/src/test/java/org/postgresql/util/PGPropertyUtilTest.java new file mode 100644 index 0000000..b5b7867 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/PGPropertyUtilTest.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.postgresql.PGProperty; + +import org.junit.jupiter.api.Test; + +import java.util.Properties; + +class PGPropertyUtilTest { + + @Test + void propertiesConsistencyCheck() { + // PGPORT + Properties properties = new Properties(); + PGProperty.PG_PORT.set(properties, "0"); + assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties)); + PGProperty.PG_PORT.set(properties, "1"); + assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties)); + PGProperty.PG_PORT.set(properties, "5432"); + assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties)); + PGProperty.PG_PORT.set(properties, "65535"); + assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties)); + PGProperty.PG_PORT.set(properties, "65536"); + assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties)); + PGProperty.PG_PORT.set(properties, "abcdef"); + assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties)); + // any other not handled + properties = new Properties(); + properties.setProperty("not-handled-key", "not-handled-value"); + assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties)); + } + + // data for next two test methods + private static final String[][] TRANSLATION_TABLE = { + {"allowEncodingChanges", "allowEncodingChanges"}, + {"port", "PGPORT"}, + {"host", "PGHOST"}, + {"dbname", "PGDBNAME"}, + }; + + @Test + void translatePGServiceToPGProperty() { + for (String[] row : TRANSLATION_TABLE) { + assertEquals(row[1], PGPropertyUtil.translatePGServiceToPGProperty(row[0])); + } + } + + @Test + void translatePGPropertyToPGService() { + for (String[] row : TRANSLATION_TABLE) { + assertEquals(row[0], PGPropertyUtil.translatePGPropertyToPGService(row[1])); + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/PGbyteaTest.java b/pgjdbc/src/test/java/org/postgresql/util/PGbyteaTest.java new file mode 100644 index 0000000..3a32f1d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/PGbyteaTest.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2021, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; + +import org.junit.jupiter.api.Test; + +import java.sql.SQLException; +import java.util.Random; + +class PGbyteaTest { + + private static final byte[] HEX_DIGITS_U = new byte[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', + 'C', 'D', 'E', 'F'}; + private static final byte[] HEX_DIGITS_L = new byte[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', + 'c', 'd', 'e', 'f'}; + + @Test + void hexDecode_lower() throws SQLException { + final byte[] data = new byte[1023]; + new Random(7).nextBytes(data); + final byte[] encoded = hexEncode(data, HEX_DIGITS_L); + final byte[] decoded = PGbytea.toBytes(encoded); + assertArrayEquals(data, decoded); + } + + @Test + void hexDecode_upper() throws SQLException { + final byte[] data = new byte[9513]; + new Random(-8).nextBytes(data); + final byte[] encoded = hexEncode(data, HEX_DIGITS_U); + final byte[] decoded = PGbytea.toBytes(encoded); + assertArrayEquals(data, decoded); + } + + private static byte[] hexEncode(byte[] data, byte[] hexDigits) { + + // the string created will have 2 characters for each byte. + // and 2 lead characters to indicate hex encoding + final byte[] encoded = new byte[2 + (data.length << 1)]; + encoded[0] = '\\'; + encoded[1] = 'x'; + for (int i = 0; i < data.length; i++) { + final int idx = (i << 1) + 2; + final byte b = data[i]; + encoded[idx] = hexDigits[(b & 0xF0) >>> 4]; + encoded[idx + 1] = hexDigits[b & 0x0F]; + } + return encoded; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/PGtokenizerTest.java b/pgjdbc/src/test/java/org/postgresql/util/PGtokenizerTest.java new file mode 100644 index 0000000..26926ff --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/PGtokenizerTest.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +class PGtokenizerTest { + + @Test + void tokenize() { + PGtokenizer pGtokenizer = new PGtokenizer("1,2EC1830300027,1,,", ','); + assertEquals(5, pGtokenizer.getSize()); + } + + @Test + void tokenize2() { + PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f(10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ','); + assertEquals(8, pGtokenizer.getSize()); + } + + @Test + void tokenize3() { + PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f)10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ','); + assertEquals(8, pGtokenizer.getSize()); + } + + @Test + void tokenize4() { + PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f()10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ','); + assertEquals(8, pGtokenizer.getSize()); + } + + @Test + void removePara() { + String string = PGtokenizer.removePara("(1,2EC1830300027,1,,)"); + assertEquals("1,2EC1830300027,1,,", string); + } + +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/ReaderInputStreamTest.java b/pgjdbc/src/test/java/org/postgresql/util/ReaderInputStreamTest.java new file mode 100644 index 0000000..cf33a98 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/ReaderInputStreamTest.java @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2016, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.CharArrayReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.io.StringReader; +import java.nio.charset.MalformedInputException; +import java.util.Arrays; + +class ReaderInputStreamTest { + // 132878 = U+2070E - chosen because it is the first supplementary character + // in the International Ideographic Core (IICore) + // see http://www.i18nguy.com/unicode/supplementary-test.html for further explanation + + // Character.highSurrogate(132878) = 0xd841 + private static final char LEADING_SURROGATE = 0xd841; + + // Character.lowSurrogate(132878) = 0xdf0e + private static final char TRAILING_SURROGATE = 0xdf0e; + + @Test + @SuppressWarnings("nullability") + void NullReaderTest() { + assertThrows(IllegalArgumentException.class, () -> { + new ReaderInputStream(null); + }); + } + + @Test + void cbufTooSmallReaderTest() { + assertThrows(IllegalArgumentException.class, () -> { + new ReaderInputStream(new StringReader("abc"), 1); + }); + } + + private static void read(InputStream is, int... expected) throws IOException { + byte[] actual = new byte[4]; + Arrays.fill(actual, (byte) 0x00); + int nActual = is.read(actual); + int[] actualInts = new int[4]; + for (int i = 0; i < actual.length; i++) { + actualInts[i] = actual[i] & 0xff; + } + if (expected.length > 0) { + // Ensure "expected" has 4 bytes + expected = Arrays.copyOf(expected, 4); + assertEquals(Arrays.toString(expected), Arrays.toString(actualInts)); + } else { + assertEquals(-1, nActual, "should be end-of-stream"); + is.close(); + } + } + + @Test + void SimpleTest() throws IOException { + char[] chars = {'a', 'b', 'c'}; + Reader reader = new CharArrayReader(chars); + InputStream is = new ReaderInputStream(reader); + read(is, 0x61, 0x62, 0x63); + read(is); + } + + @Test + void inputSmallerThanCbufsizeTest() throws IOException { + char[] chars = {'a'}; + Reader reader = new CharArrayReader(chars); + InputStream is = new ReaderInputStream(reader, 2); + read(is, 0x61); + read(is); + } + + @Test + void tooManyReadsTest() throws IOException { + char[] chars = {'a'}; + Reader reader = new CharArrayReader(chars); + InputStream is = new ReaderInputStream(reader, 2); + read(is, 0x61); + assertEquals(-1, is.read(), "should be end-of-stream"); + assertEquals(-1, is.read(), "should be end-of-stream"); + assertEquals(-1, is.read(), "should be end-of-stream"); + is.close(); + } + + @Test + void surrogatePairSpansCharBufBoundaryTest() throws IOException { + char[] chars = {'a', LEADING_SURROGATE, TRAILING_SURROGATE}; + Reader reader = new CharArrayReader(chars); + InputStream is = new ReaderInputStream(reader, 2); + read(is, 0x61, 0xF0, 0xA0, 0x9C); + read(is, 0x8E); + read(is); + } + + @Test + void invalidInputTest() throws IOException { + assertThrows(MalformedInputException.class, () -> { + char[] chars = {'a', LEADING_SURROGATE, LEADING_SURROGATE}; + Reader reader = new CharArrayReader(chars); + InputStream is = new ReaderInputStream(reader, 2); + read(is); + }); + } + + @Test + void unmatchedLeadingSurrogateInputTest() throws IOException { + assertThrows(MalformedInputException.class, () -> { + char[] chars = {LEADING_SURROGATE}; + Reader reader = new CharArrayReader(chars); + InputStream is = new ReaderInputStream(reader, 2); + read(is, 0x00); + }); + } + + @Test + void unmatchedTrailingSurrogateInputTest() throws IOException { + assertThrows(MalformedInputException.class, () -> { + char[] chars = {TRAILING_SURROGATE}; + Reader reader = new CharArrayReader(chars); + InputStream is = new ReaderInputStream(reader, 2); + read(is); + }); + } + + @Test + @SuppressWarnings("nullness") + void nullArrayReadTest() throws IOException { + assertThrows(NullPointerException.class, () -> { + Reader reader = new StringReader("abc"); + InputStream is = new ReaderInputStream(reader); + is.read(null, 0, 4); + }); + } + + @Test + void invalidOffsetArrayReadTest() throws IOException { + assertThrows(IndexOutOfBoundsException.class, () -> { + Reader reader = new StringReader("abc"); + InputStream is = new ReaderInputStream(reader); + byte[] bytes = new byte[4]; + is.read(bytes, 5, 4); + }); + } + + @Test + void negativeOffsetArrayReadTest() throws IOException { + assertThrows(IndexOutOfBoundsException.class, () -> { + Reader reader = new StringReader("abc"); + InputStream is = new ReaderInputStream(reader); + byte[] bytes = new byte[4]; + is.read(bytes, -1, 4); + }); + } + + @Test + void invalidLengthArrayReadTest() throws IOException { + assertThrows(IndexOutOfBoundsException.class, () -> { + Reader reader = new StringReader("abc"); + InputStream is = new ReaderInputStream(reader); + byte[] bytes = new byte[4]; + is.read(bytes, 1, 4); + }); + } + + @Test + void negativeLengthArrayReadTest() throws IOException { + assertThrows(IndexOutOfBoundsException.class, () -> { + Reader reader = new StringReader("abc"); + InputStream is = new ReaderInputStream(reader); + byte[] bytes = new byte[4]; + is.read(bytes, 1, -2); + }); + } + + @Test + void zeroLengthArrayReadTest() throws IOException { + Reader reader = new StringReader("abc"); + InputStream is = new ReaderInputStream(reader); + byte[] bytes = new byte[4]; + assertEquals(0, is.read(bytes, 1, 0), "requested 0 byte read"); + } + + @Test + void singleCharArrayReadTest() throws IOException { + Reader reader = new SingleCharPerReadReader(LEADING_SURROGATE, TRAILING_SURROGATE); + InputStream is = new ReaderInputStream(reader); + read(is, 0xF0, 0xA0, 0x9C, 0x8E); + read(is); + } + + @Test + void malformedSingleCharArrayReadTest() throws IOException { + assertThrows(MalformedInputException.class, () -> { + Reader reader = new SingleCharPerReadReader(LEADING_SURROGATE, LEADING_SURROGATE); + InputStream is = new ReaderInputStream(reader); + read(is, 0xF0, 0xA0, 0x9C, 0x8E); + }); + } + + @Test + void readsEqualToBlockSizeTest() throws Exception { + final int blockSize = 8 * 1024; + final int dataSize = blockSize + 57; + final byte[] data = new byte[dataSize]; + final byte[] buffer = new byte[blockSize]; + + InputStreamReader isr = new InputStreamReader(new ByteArrayInputStream(data), "UTF-8"); + ReaderInputStream r = new ReaderInputStream(isr, blockSize); + + int total = 0; + + total += r.read(buffer, 0, blockSize); + total += r.read(buffer, 0, blockSize); + + assertEquals(dataSize, total, "Data not read completely: missing " + (dataSize - total) + " bytes"); + } + + private static class SingleCharPerReadReader extends Reader { + private final char[] data; + private int i; + + private SingleCharPerReadReader(char... data) { + this.data = data; + } + + @Override + public int read(char[] cbuf, int off, int len) throws IOException { + if (i < data.length) { + cbuf[off] = data[i++]; + return 1; + } + + return -1; + } + + @Override + public void close() throws IOException { + } + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/StubEnvironmentAndProperties.java b/pgjdbc/src/test/java/org/postgresql/util/StubEnvironmentAndProperties.java new file mode 100644 index 0000000..23f780d --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/StubEnvironmentAndProperties.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2022, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.parallel.Isolated; +import uk.org.webcompere.systemstubs.jupiter.SystemStubsExtension; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * This annotation is used to mark a test method as a test that should be run with stubbing system + * calls like {@code System#getProperty} and {@code System#getenv}. + *

The tests should be run in isolation to prevent concurrent modification of properties and + * the environment.

+ *

Note: environment mocking works from a single thread only until + * Fix multi-threaded + * environment variable mocking, and Mocked + * static methods are not available in other threads are resolved

+ */ +@Isolated +@ExtendWith(SystemStubsExtension.class) +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD, ElementType.TYPE}) +public @interface StubEnvironmentAndProperties { +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/TestLogHandler.java b/pgjdbc/src/test/java/org/postgresql/util/TestLogHandler.java new file mode 100644 index 0000000..30850a1 --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/TestLogHandler.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import java.util.ArrayList; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.logging.Handler; +import java.util.logging.LogRecord; +import java.util.regex.Pattern; + +public class TestLogHandler extends Handler { + public Queue records = new ConcurrentLinkedQueue<>(); + + @Override + public void publish(LogRecord record) { + records.add(record); + } + + @Override + public void flush() { + } + + @Override + public void close() throws SecurityException { + } + + public List getRecordsMatching(Pattern messagePattern) { + List matches = new ArrayList<>(); + for (LogRecord r: this.records) { + String message = r.getMessage(); + if (message != null && messagePattern.matcher(message).find()) { + matches.add(r); + } + } + return matches; + } +} diff --git a/pgjdbc/src/test/java/org/postgresql/util/UnusualBigDecimalByteConverterTest.java b/pgjdbc/src/test/java/org/postgresql/util/UnusualBigDecimalByteConverterTest.java new file mode 100644 index 0000000..1cf056e --- /dev/null +++ b/pgjdbc/src/test/java/org/postgresql/util/UnusualBigDecimalByteConverterTest.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2020, PostgreSQL Global Development Group + * See the LICENSE file in the project root for more information. + */ + +package org.postgresql.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +import java.math.BigDecimal; + +/** + * Tests unusual binary representations of numeric values. + * @author Brett Okken + */ +class UnusualBigDecimalByteConverterTest { + + /** + * Typically a number < 1 would have sections of leading '0' values represented in weight + * rather than including as short values. + */ + @Test + void test_4_leading_0() { + //len 2 + //weight -1 + //scale 5 + final byte[] data = new byte[]{0, 2, -1, -1, 0, 0, 0, 5, 0, 0, 23, 112}; + final BigDecimal actual = (BigDecimal) ByteConverter.numeric(data); + assertEquals(new BigDecimal("0.00006"), actual); + } +} diff --git a/pgjdbc/src/test/resources/pg_service/.pg_service.conf b/pgjdbc/src/test/resources/pg_service/.pg_service.conf new file mode 100644 index 0000000..7fa2b3b --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/.pg_service.conf @@ -0,0 +1,35 @@ +# not used section +[mydb1] +host=local-somehost1 +port=5433 +# next line has invalid key +user =admin +line with invalid syntax + +[test-service1] +host=local-test-host.test.net +port=5433 + # comment +user=admin +# space after equal sign is intentional +dbname= test_dbname + +[fail-case-1] +# space before equal sign is intentional +host =local-somehost1 +port=5433 +user=admin + +[fail-CASE-2] +host=local-somehost2 + +[fail-case-3] +host + +[ success-case-3 ] +host=local-somehost3 + +[success case 4] +host=local-somehost4 + +[empty-service1] diff --git a/pgjdbc/src/test/resources/pg_service/.pgpass b/pgjdbc/src/test/resources/pg_service/.pgpass new file mode 100644 index 0000000..327e135 --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/.pgpass @@ -0,0 +1,25 @@ + +# hostname:port:database:username:password + +localhost:5432:postgres:postgres:postgres1 +localhost2:5432:postgres:postgres:postgres\ +localhost3:5432:postgres:postgres:postgres\: +localhost4:5432:postgres:postgres:postgres1\:: +localhost5:5432:postgres:postgres:postgres5: +localhost6:5432:postgres:postgres:post\\gres\\ +localhost7:5432:postgres:postgres: ab cd +# NB! no spaces at the end of line +localhost8:5432:postgres:postgres: + +::1:1234:colon:db:colon:user:pass\:pass +::1:12345:colon\:db:colon\:user:pass\:pass1 + +::1:1234:slash\db:slash\user:pass\\pass +\:\:1:12345:slash\\db:slash\\user:pass\\pass1 + +*:5432:postgres:postgres:anyhost5 +localhost11:*:postgres:postgres:anyport5 +localhost12:5432:*:postgres:anydb5 +localhost13:5432:postgres:*:anyuser5 + +*:*:*:*:absolute-any diff --git a/pgjdbc/src/test/resources/pg_service/pg_service.conf b/pgjdbc/src/test/resources/pg_service/pg_service.conf new file mode 100644 index 0000000..0e3aa67 --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/pg_service.conf @@ -0,0 +1,8 @@ +[test-service1] +host=global-test-host.test.net +port=5433 + # comment +user=admin +dbname=test_dbname + +[empty-service1] diff --git a/pgjdbc/src/test/resources/pg_service/pgpassfileEnv.conf b/pgjdbc/src/test/resources/pg_service/pgpassfileEnv.conf new file mode 100644 index 0000000..3f19950 --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/pgpassfileEnv.conf @@ -0,0 +1 @@ +localhost:5432:postgres1:postgres2:postgres3 diff --git a/pgjdbc/src/test/resources/pg_service/pgpassfileProps.conf b/pgjdbc/src/test/resources/pg_service/pgpassfileProps.conf new file mode 100644 index 0000000..6d46415 --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/pgpassfileProps.conf @@ -0,0 +1,5 @@ +# intentional short line +localhost88 +localhost9\ +# +localhost77:5432:*:postgres11:postgres22 diff --git a/pgjdbc/src/test/resources/pg_service/pgservicefileEnv.conf b/pgjdbc/src/test/resources/pg_service/pgservicefileEnv.conf new file mode 100644 index 0000000..32ff545 --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/pgservicefileEnv.conf @@ -0,0 +1,20 @@ + +# comment +[test-service1] + host=pgservicefileEnv-test-host.test.net + port=5433 + # comment +user=admin +dbname=test_dbname +sslmode=disable + +# duplicate service section is intentional +[test-service1] +host=another-pgservicefileEnv-test-host.test.net + +[mydb2] +host=global-somehost2 +port=5433 +user=admin + +[empty-service1] diff --git a/pgjdbc/src/test/resources/pg_service/pgservicefileProps.conf b/pgjdbc/src/test/resources/pg_service/pgservicefileProps.conf new file mode 100644 index 0000000..922a6b5 --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/pgservicefileProps.conf @@ -0,0 +1,29 @@ +# comment +[mydb1] +host=global-somehost1 +port=5433 +user=admin + +[test-service1] +host=pgservicefileProps-test-host.test.net +port=5433 + # comment +user=admin +dbname=test_dbname + +[empty-service1] + +[mydb2] +host=global-somehost2 +port=5433 +user=admin + +[driverTestService1] +host=test-host1 +port=5444 +dbname=testdb1 +[driverTestService2] +host=test-host1,[::1],test-host2 +# intentional: less ports than hosts +port=5541,5542 +dbname=testdb1 diff --git a/pgjdbc/src/test/resources/pg_service/postgresql/.pg_service.conf b/pgjdbc/src/test/resources/pg_service/postgresql/.pg_service.conf new file mode 100644 index 0000000..7fa2b3b --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/postgresql/.pg_service.conf @@ -0,0 +1,35 @@ +# not used section +[mydb1] +host=local-somehost1 +port=5433 +# next line has invalid key +user =admin +line with invalid syntax + +[test-service1] +host=local-test-host.test.net +port=5433 + # comment +user=admin +# space after equal sign is intentional +dbname= test_dbname + +[fail-case-1] +# space before equal sign is intentional +host =local-somehost1 +port=5433 +user=admin + +[fail-CASE-2] +host=local-somehost2 + +[fail-case-3] +host + +[ success-case-3 ] +host=local-somehost3 + +[success case 4] +host=local-somehost4 + +[empty-service1] diff --git a/pgjdbc/src/test/resources/pg_service/postgresql/pgpass.conf b/pgjdbc/src/test/resources/pg_service/postgresql/pgpass.conf new file mode 100644 index 0000000..327e135 --- /dev/null +++ b/pgjdbc/src/test/resources/pg_service/postgresql/pgpass.conf @@ -0,0 +1,25 @@ + +# hostname:port:database:username:password + +localhost:5432:postgres:postgres:postgres1 +localhost2:5432:postgres:postgres:postgres\ +localhost3:5432:postgres:postgres:postgres\: +localhost4:5432:postgres:postgres:postgres1\:: +localhost5:5432:postgres:postgres:postgres5: +localhost6:5432:postgres:postgres:post\\gres\\ +localhost7:5432:postgres:postgres: ab cd +# NB! no spaces at the end of line +localhost8:5432:postgres:postgres: + +::1:1234:colon:db:colon:user:pass\:pass +::1:12345:colon\:db:colon\:user:pass\:pass1 + +::1:1234:slash\db:slash\user:pass\\pass +\:\:1:12345:slash\\db:slash\\user:pass\\pass1 + +*:5432:postgres:postgres:anyhost5 +localhost11:*:postgres:postgres:anyport5 +localhost12:5432:*:postgres:anydb5 +localhost13:5432:postgres:*:anyuser5 + +*:*:*:*:absolute-any diff --git a/pgjdbc/src/test/resources/test-file.xml b/pgjdbc/src/test/resources/test-file.xml new file mode 100644 index 0000000..01f6fd4 --- /dev/null +++ b/pgjdbc/src/test/resources/test-file.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/saslprep/build.gradle b/saslprep/build.gradle new file mode 100644 index 0000000..6dec168 --- /dev/null +++ b/saslprep/build.gradle @@ -0,0 +1,3 @@ +dependencies { + api project(':stringprep') +} diff --git a/saslprep/src/main/java/com/ongres/saslprep/SASLprep.java b/saslprep/src/main/java/com/ongres/saslprep/SASLprep.java new file mode 100644 index 0000000..bfd58ef --- /dev/null +++ b/saslprep/src/main/java/com/ongres/saslprep/SASLprep.java @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2019 OnGres, Inc. + * SPDX-License-Identifier: BSD-2-Clause + */ + +package com.ongres.saslprep; + +import java.util.EnumSet; +import java.util.Set; + +import com.ongres.stringprep.Option; +import com.ongres.stringprep.Profile; +import com.ongres.stringprep.ProfileName; +import com.ongres.stringprep.Tables; + +/** + * SASLprep: Stringprep Profile for User Names and Passwords. + * + *

The use of simple user names and passwords in authentication and authorization is pervasive on + * the Internet. To increase the likelihood that user name and password input and comparison work in + * ways that make sense for typical users throughout the world, this document defines rules for + * preparing internationalized user names and passwords for comparison. For simplicity and + * implementation ease, a single algorithm is defined for both user names and passwords. + */ +@ProfileName("SASLprep") +public final class SASLprep implements Profile { + + private final EnumSet

    + *
  • The SecureRandom used ({@link SecureRandom} by default) are thread-safe too. + * The contract of {@link java.util.Random} marks it as thread-safe, so inherited classes are also expected + * to maintain it. + *
  • + *
  • No external nonceSupplier is provided; or if provided, it is thread-safe.
  • + *
+ * So this class, once instantiated via the {@link Builder#setup()}} method, can serve for multiple users and + * authentications. + */ +public class ScramClient { + /** + * Length (in characters, bytes) of the nonce generated by default (if no nonce supplier is provided) + */ + public static final int DEFAULT_NONCE_LENGTH = 24; + + /** + * Select whether this client will support channel binding or not + */ + public enum ChannelBinding { + /** + * Don't use channel binding. Server must support at least one non-channel binding mechanism. + */ + NO(Gs2CbindFlag.CLIENT_NOT), + + /** + * Force use of channel binding. Server must support at least one channel binding mechanism. + * Channel binding data will need to be provided as part of the ClientFirstMessage. + */ + YES(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED), + + /** + * Channel binding is preferred. Non-channel binding mechanisms will be used if either the server does not + * support channel binding, or no channel binding data is provided as part of the ClientFirstMessage + */ + IF_SERVER_SUPPORTS_IT(Gs2CbindFlag.CLIENT_YES_SERVER_NOT) + ; + + private final Gs2CbindFlag gs2CbindFlag; + + ChannelBinding(Gs2CbindFlag gs2CbindFlag) { + this.gs2CbindFlag = gs2CbindFlag; + } + + public Gs2CbindFlag gs2CbindFlag() { + return gs2CbindFlag; + } + } + + private final ChannelBinding channelBinding; + private final StringPreparation stringPreparation; + private final ScramMechanism scramMechanism; + private final SecureRandom secureRandom; + private final NonceSupplier nonceSupplier; + + private ScramClient( + ChannelBinding channelBinding, StringPreparation stringPreparation, + ScramMechanism nonChannelBindingMechanism, ScramMechanism channelBindingMechanism, + SecureRandom secureRandom, NonceSupplier nonceSupplier + ) { + assert null != channelBinding : "channelBinding"; + assert null != stringPreparation : "stringPreparation"; + assert null != nonChannelBindingMechanism || null != channelBindingMechanism + : "Either a channel-binding or a non-binding mechanism must be present"; + assert null != secureRandom : "secureRandom"; + assert null != nonceSupplier : "nonceSupplier"; + + + this.channelBinding = channelBinding; + this.stringPreparation = stringPreparation; + this.scramMechanism = null != nonChannelBindingMechanism ? nonChannelBindingMechanism : channelBindingMechanism; + this.secureRandom = secureRandom; + this.nonceSupplier = nonceSupplier; + } + + /** + * Selects for the client whether to use channel binding. + * Refer to {@link ChannelBinding} documentation for the description of the possible values. + * @param channelBinding The channel binding setting + * @return The next step in the chain (PreBuilder1). + * @throws IllegalArgumentException If channelBinding is null + */ + public static PreBuilder1 channelBinding(ChannelBinding channelBinding) throws IllegalArgumentException { + return new PreBuilder1(checkNotNull(channelBinding, "channelBinding")); + } + + /** + * This class is not meant to be used directly. + * Use {@link ScramClient#channelBinding(ChannelBinding)} instead. + */ + public static class PreBuilder1 { + protected final ChannelBinding channelBinding; + + private PreBuilder1(ChannelBinding channelBinding) { + this.channelBinding = channelBinding; + } + + /** + * Selects the string preparation algorithm to use by the client. + * @param stringPreparation The string preparation algorithm + * @throws IllegalArgumentException If stringPreparation is null + */ + public PreBuilder2 stringPreparation(StringPreparation stringPreparation) throws IllegalArgumentException { + return new PreBuilder2(channelBinding, checkNotNull(stringPreparation, "stringPreparation")); + } + } + + /** + * This class is not meant to be used directly. + * Use {@link ScramClient#channelBinding(ChannelBinding)}.{#stringPreparation(StringPreparation)} instead. + */ + public static class PreBuilder2 extends PreBuilder1 { + protected final StringPreparation stringPreparation; + protected ScramMechanism nonChannelBindingMechanism = null; + protected ScramMechanism channelBindingMechanism = null; + + private PreBuilder2(ChannelBinding channelBinding, StringPreparation stringPreparation) { + super(channelBinding); + this.stringPreparation = stringPreparation; + } + + /** + * Inform the client of the SCRAM mechanisms supported by the server. + * Based on this list, the channel binding settings previously specified, + * and the relative strength of the supported SCRAM mechanisms for this client, + * the client will have enough data to select which mechanism to use for future interactions with the server. + * All names provided here need to be standar IANA Registry names for SCRAM mechanisms, or will be ignored. + * + * @see + * SASL SCRAM Family Mechanisms + * + * @param serverMechanisms One or more IANA-registered SCRAM mechanism names, as advertised by the server + * @throws IllegalArgumentException If no server mechanisms are provided + */ + public Builder selectMechanismBasedOnServerAdvertised(String... serverMechanisms) { + checkArgument(null != serverMechanisms && serverMechanisms.length > 0, "serverMechanisms"); + + nonChannelBindingMechanism = ScramMechanisms.selectMatchingMechanism(false, serverMechanisms); + if(channelBinding == ChannelBinding.NO && null == nonChannelBindingMechanism) { + throw new IllegalArgumentException("Server does not support non channel binding mechanisms"); + } + + channelBindingMechanism = ScramMechanisms.selectMatchingMechanism(true, serverMechanisms); + if(channelBinding == ChannelBinding.YES && null == channelBindingMechanism) { + throw new IllegalArgumentException("Server does not support channel binding mechanisms"); + } + + if(null == channelBindingMechanism && null == nonChannelBindingMechanism) { + throw new IllegalArgumentException("There are no matching mechanisms between client and server"); + } + + return new Builder(channelBinding, stringPreparation, nonChannelBindingMechanism, channelBindingMechanism); + } + + /** + * Inform the client of the SCRAM mechanisms supported by the server. + * Calls {@link Builder#selectMechanismBasedOnServerAdvertised(String...)} + * with the results of splitting the received comma-separated values. + * @param serverMechanismsCsv A CSV (Comma-Separated Values) String, containining all the SCRAM mechanisms + * supported by the server + * @throws IllegalArgumentException If selectMechanismBasedOnServerAdvertisedCsv is null + */ + public Builder selectMechanismBasedOnServerAdvertisedCsv(String serverMechanismsCsv) + throws IllegalArgumentException { + return selectMechanismBasedOnServerAdvertised( + checkNotNull(serverMechanismsCsv, "serverMechanismsCsv").split(",") + ); + } + + /** + * Select a fixed client mechanism. It must be compatible with the channel binding selection previously + * performed. If automatic selection based on server advertised mechanisms is preferred, please use methods + * {@link Builder#selectMechanismBasedOnServerAdvertised(String...)} or + * {@link Builder#selectMechanismBasedOnServerAdvertisedCsv(String)}. + * @param scramMechanism The selected scram mechanism + * @throws IllegalArgumentException If the selected mechanism is null or not compatible with the prior + * channel binding selection, + * or channel binding selection is dependent on the server advertised methods + */ + public Builder selectClientMechanism(ScramMechanism scramMechanism) { + checkNotNull(scramMechanism, "scramMechanism"); + if(channelBinding == ChannelBinding.IF_SERVER_SUPPORTS_IT) { + throw new IllegalArgumentException( + "If server selection is considered, no direct client selection should be performed" + ); + } + if( + channelBinding == ChannelBinding.YES && ! scramMechanism.supportsChannelBinding() + || + channelBinding == ChannelBinding.NO && scramMechanism.supportsChannelBinding() + ) { + throw new IllegalArgumentException("Incompatible selection of mechanism and channel binding"); + } + + if(scramMechanism.supportsChannelBinding()) { + return new Builder(channelBinding, stringPreparation, null, scramMechanism); + } else { + return new Builder(channelBinding, stringPreparation, scramMechanism, null); + } + } + } + + /** + * This class is not meant to be used directly. + * Use instead {@link ScramClient#channelBinding(ChannelBinding)} and chained methods. + */ + public static class Builder extends PreBuilder2 { + private final ScramMechanism nonChannelBindingMechanism; + private final ScramMechanism channelBindingMechanism; + + private SecureRandom secureRandom = new SecureRandom(); + private NonceSupplier nonceSupplier; + private int nonceLength = DEFAULT_NONCE_LENGTH; + + private Builder( + ChannelBinding channelBinding, StringPreparation stringPreparation, + ScramMechanism nonChannelBindingMechanism, ScramMechanism channelBindingMechanism + ) { + super(channelBinding, stringPreparation); + this.nonChannelBindingMechanism = nonChannelBindingMechanism; + this.channelBindingMechanism = channelBindingMechanism; + } + + /** + * Optional call. Selects a non-default SecureRandom instance, + * based on the given algorithm and optionally provider. + * This SecureRandom instance will be used to generate secure random values, + * like the ones required to generate the nonce + * (unless an external nonce provider is given via {@link Builder#nonceSupplier(NonceSupplier)}). + * Algorithm and provider names are those supported by the {@link SecureRandom} class. + * @param algorithm The name of the algorithm to use. + * @param provider The name of the provider of SecureRandom. Might be null. + * @return The same class + * @throws IllegalArgumentException If algorithm is null, or either the algorithm or provider are not supported + */ + public Builder secureRandomAlgorithmProvider(String algorithm, String provider) + throws IllegalArgumentException { + checkNotNull(algorithm, "algorithm"); + try { + secureRandom = null == provider ? + SecureRandom.getInstance(algorithm) : + SecureRandom.getInstance(algorithm, provider); + } catch (NoSuchAlgorithmException | NoSuchProviderException e) { + throw new IllegalArgumentException("Invalid algorithm or provider", e); + } + + return this; + } + + /** + * Optional call. The client will use a default nonce generator, + * unless an external one is provided by this method. * + * @param nonceSupplier A supplier of valid nonce Strings. + * Please note that according to the + * SCRAM RFC + * only ASCII printable characters (except the comma, ',') are permitted on a nonce. + * Length is not limited. + * @return The same class + * @throws IllegalArgumentException If nonceSupplier is null + */ + public Builder nonceSupplier(NonceSupplier nonceSupplier) throws IllegalArgumentException { + this.nonceSupplier = checkNotNull(nonceSupplier, "nonceSupplier"); + + return this; + } + + /** + * Sets a non-default ({@link ScramClient#DEFAULT_NONCE_LENGTH}) length for the nonce generation, + * if no alternate nonceSupplier is provided via {@link Builder#nonceSupplier(NonceSupplier)}. + * @param length The length of the nonce. Must be positive and greater than 0 + * @return The same class + * @throws IllegalArgumentException If length is less than 1 + */ + public Builder nonceLength(int length) throws IllegalArgumentException { + this.nonceLength = gt0(length, "length"); + + return this; + } + + /** + * Gets the client, fully constructed and configured, with the provided channel binding, string preparation + * properties, and the selected SCRAM mechanism based on server supported mechanisms. + * If no SecureRandom algorithm and provider were provided, a default one would be used. + * If no nonceSupplier was provided, a default nonce generator would be used, + * of the {@link ScramClient#DEFAULT_NONCE_LENGTH} length, unless {@link Builder#nonceLength(int)} is called. + * @return The fully built instance. + */ + public ScramClient setup() { + return new ScramClient( + channelBinding, stringPreparation, nonChannelBindingMechanism, channelBindingMechanism, + secureRandom, + nonceSupplier != null ? nonceSupplier : new NonceSupplier() { + @Override + public String get() { + return CryptoUtil.nonce(nonceLength, secureRandom); + } + } + + ); + } + } + + public StringPreparation getStringPreparation() { + return stringPreparation; + } + + public ScramMechanism getScramMechanism() { + return scramMechanism; + } + + /** + * List all the supported SCRAM mechanisms by this client implementation + * @return A list of the IANA-registered, SCRAM supported mechanisms + */ + public static List supportedMechanisms() { + List supportedMechanisms = new ArrayList<>(); + for (ScramMechanisms scramMechanisms : ScramMechanisms.values()) { + supportedMechanisms.add(scramMechanisms.getName()); + } + return supportedMechanisms; + } + + /** + * Instantiates a {@link ScramSession} for the specified user and this parametrized generator. + * @param user The username of the authentication exchange + * @return The ScramSession instance + */ + public ScramSession scramSession(String user) { + return new ScramSession(scramMechanism, stringPreparation, checkNotEmpty(user, "user"), nonceSupplier.get()); + } +} diff --git a/scram-client/src/main/java/com/ongres/scram/client/ScramSession.java b/scram-client/src/main/java/com/ongres/scram/client/ScramSession.java new file mode 100644 index 0000000..1a834e4 --- /dev/null +++ b/scram-client/src/main/java/com/ongres/scram/client/ScramSession.java @@ -0,0 +1,282 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.client; + + +import static com.ongres.scram.common.util.Preconditions.checkNotEmpty; +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + +import com.ongres.scram.common.ScramFunctions; +import com.ongres.scram.common.ScramMechanism; +import com.ongres.scram.common.bouncycastle.base64.Base64; +import com.ongres.scram.common.exception.ScramInvalidServerSignatureException; +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.exception.ScramServerErrorException; +import com.ongres.scram.common.gssapi.Gs2CbindFlag; +import com.ongres.scram.common.message.ClientFinalMessage; +import com.ongres.scram.common.message.ClientFirstMessage; +import com.ongres.scram.common.message.ServerFinalMessage; +import com.ongres.scram.common.message.ServerFirstMessage; +import com.ongres.scram.common.stringprep.StringPreparation; + + +/** + * A class that represents a SCRAM client. Use this class to perform a SCRAM negotiation with a SCRAM server. + * This class performs an authentication execution for a given user, and has state related to it. + * Thus, it cannot be shared across users or authentication executions. + */ +public class ScramSession { + private final ScramMechanism scramMechanism; + private final StringPreparation stringPreparation; + private final String user; + private final String nonce; + private ClientFirstMessage clientFirstMessage; + private String serverFirstMessageString; + + /** + * Constructs a SCRAM client, to perform an authentication for a given user. + * This class can be instantiated directly, + * but it is recommended that a {@link ScramClient} is used instead. + * @param scramMechanism The SCRAM mechanism that will be using this client + * @param stringPreparation + * @param user + * @param nonce + */ + public ScramSession(ScramMechanism scramMechanism, StringPreparation stringPreparation, String user, String nonce) { + this.scramMechanism = checkNotNull(scramMechanism, "scramMechanism"); + this.stringPreparation = checkNotNull(stringPreparation, "stringPreparation"); + this.user = checkNotEmpty(user, "user"); + this.nonce = checkNotEmpty(nonce, "nonce"); + } + + private String setAndReturnClientFirstMessage(ClientFirstMessage clientFirstMessage) { + this.clientFirstMessage = clientFirstMessage; + + return clientFirstMessage.toString(); + } + + /** + * Returns the text representation of a SCRAM client-first-message, with the GSS-API header values indicated. + * @param gs2CbindFlag The channel binding flag + * @param cbindName The channel binding algorithm name, if channel binding is supported, or null + * @param authzid The optional + * @return The message + */ + public String clientFirstMessage(Gs2CbindFlag gs2CbindFlag, String cbindName, String authzid) { + return setAndReturnClientFirstMessage(new ClientFirstMessage(gs2CbindFlag, authzid, cbindName, user, nonce)); + } + + /** + * Returns the text representation of a SCRAM client-first-message, with no channel binding nor authzid. + * @return The message + */ + public String clientFirstMessage() { + return setAndReturnClientFirstMessage(new ClientFirstMessage(user, nonce)); + } + + /** + * Process a received server-first-message. + * Generate by calling {@link #receiveServerFirstMessage(String)}. + */ + public class ServerFirstProcessor { + private final ServerFirstMessage serverFirstMessage; + + private ServerFirstProcessor(String receivedServerFirstMessage) throws ScramParseException { + serverFirstMessageString = receivedServerFirstMessage; + serverFirstMessage = ServerFirstMessage.parseFrom(receivedServerFirstMessage, nonce); + } + + public String getSalt() { + return serverFirstMessage.getSalt(); + } + + public int getIteration() { + return serverFirstMessage.getIteration(); + } + + /** + * Generates a {@link ClientFinalProcessor}, that allows to generate the client-final-message and also + * receive and parse the server-first-message. It is based on the user's password. + * @param password The user's password + * @return The handler + * @throws IllegalArgumentException If the message is null or empty + */ + public ClientFinalProcessor clientFinalProcessor(String password) throws IllegalArgumentException { + return new ClientFinalProcessor( + serverFirstMessage.getNonce(), + checkNotEmpty(password, "password"), + getSalt(), + getIteration() + ); + } + + /** + * Generates a {@link ClientFinalProcessor}, that allows to generate the client-final-message and also + * receive and parse the server-first-message. It is based on the clientKey and storedKey, + * which, if available, provide an optimized path versus providing the original user's password. + * @param clientKey The client key, as per the SCRAM algorithm. + * It can be generated with: + * {@link ScramFunctions#clientKey(ScramMechanism, StringPreparation, String, byte[], int)} + * @param storedKey The stored key, as per the SCRAM algorithm. + * It can be generated from the client key with: + * {@link ScramFunctions#storedKey(ScramMechanism, byte[])} + * @return The handler + * @throws IllegalArgumentException If the message is null or empty + */ + public ClientFinalProcessor clientFinalProcessor(byte[] clientKey, byte[] storedKey) + throws IllegalArgumentException { + return new ClientFinalProcessor( + serverFirstMessage.getNonce(), + checkNotNull(clientKey, "clientKey"), + checkNotNull(storedKey, "storedKey") + ); + } + } + + /** + * Processor that allows to generate the client-final-message, + * as well as process the server-final-message and verify server's signature. + * Generate the processor by calling either {@link ServerFirstProcessor#clientFinalProcessor(String)} + * or {@link ServerFirstProcessor#clientFinalProcessor(byte[], byte[])}. + */ + public class ClientFinalProcessor { + private final String nonce; + private final byte[] clientKey; + private final byte[] storedKey; + private final byte[] serverKey; + private String authMessage; + + private ClientFinalProcessor(String nonce, byte[] clientKey, byte[] storedKey, byte[] serverKey) { + assert null != clientKey : "clientKey"; + assert null != storedKey : "storedKey"; + assert null != serverKey : "serverKey"; + + this.nonce = nonce; + this.clientKey = clientKey; + this.storedKey = storedKey; + this.serverKey = serverKey; + } + + private ClientFinalProcessor(String nonce, byte[] clientKey, byte[] serverKey) { + this(nonce, clientKey, ScramFunctions.storedKey(scramMechanism, clientKey), serverKey); + } + + private ClientFinalProcessor(String nonce, byte[] saltedPassword) { + this( + nonce, + ScramFunctions.clientKey(scramMechanism, saltedPassword), + ScramFunctions.serverKey(scramMechanism, saltedPassword) + ); + } + + private ClientFinalProcessor(String nonce, String password, String salt, int iteration) { + this( + nonce, + ScramFunctions.saltedPassword( + scramMechanism, stringPreparation, password, Base64.decode(salt), iteration + ) + ); + } + + private synchronized void generateAndCacheAuthMessage(byte[] cbindData) { + if(null != authMessage) { + return; + } + + authMessage = clientFirstMessage.writeToWithoutGs2Header(new StringBuffer()) + .append(",") + .append(serverFirstMessageString) + .append(",") + .append(ClientFinalMessage.writeToWithoutProof(clientFirstMessage.getGs2Header(), cbindData, nonce)) + .toString(); + } + + /** + * Generates the SCRAM representation of the client-final-message, including the given channel-binding data. + * @param cbindData The bytes of the channel-binding data + * @return The message + */ + public String clientFinalMessage(byte[] cbindData) { + if(null == authMessage) { + generateAndCacheAuthMessage(cbindData); + } + + ClientFinalMessage clientFinalMessage = new ClientFinalMessage( + clientFirstMessage.getGs2Header(), + cbindData, + nonce, + ScramFunctions.clientProof( + clientKey, + ScramFunctions.clientSignature(scramMechanism, storedKey, authMessage) + ) + ); + + return clientFinalMessage.toString(); + } + + /** + * Generates the SCRAM representation of the client-final-message. + * @return The message + */ + public String clientFinalMessage() { + return clientFinalMessage(null); + } + + /** + * Receive and process the server-final-message. + * Server SCRAM signatures is verified. + * @param serverFinalMessage The received server-final-message + * @throws ScramParseException If the message is not a valid server-final-message + * @throws ScramServerErrorException If the server-final-message contained an error + * @throws IllegalArgumentException If the message is null or empty + */ + public void receiveServerFinalMessage(String serverFinalMessage) + throws ScramParseException, ScramServerErrorException, ScramInvalidServerSignatureException, + IllegalArgumentException { + checkNotEmpty(serverFinalMessage, "serverFinalMessage"); + + ServerFinalMessage message = ServerFinalMessage.parseFrom(serverFinalMessage); + if(message.isError()) { + throw new ScramServerErrorException(message.getError()); + } + if(! ScramFunctions.verifyServerSignature( + scramMechanism, serverKey, authMessage, message.getVerifier() + )) { + throw new ScramInvalidServerSignatureException("Invalid server SCRAM signature"); + } + } + } + + /** + * Constructs a handler for the server-first-message, from its String representation. + * @param serverFirstMessage The message + * @return The handler + * @throws ScramParseException If the message is not a valid server-first-message + * @throws IllegalArgumentException If the message is null or empty + */ + public ServerFirstProcessor receiveServerFirstMessage(String serverFirstMessage) + throws ScramParseException, IllegalArgumentException { + return new ServerFirstProcessor(checkNotEmpty(serverFirstMessage, "serverFirstMessage")); + } +} diff --git a/scram-client/src/main/java/module-info.java b/scram-client/src/main/java/module-info.java new file mode 100644 index 0000000..77442db --- /dev/null +++ b/scram-client/src/main/java/module-info.java @@ -0,0 +1,4 @@ +module org.xbib.scram.client { + requires transitive org.xbib.scram.common; + exports com.ongres.scram.client; +} diff --git a/scram-client/src/test/java/com/ongres/scram/client/ScramClientTest.java b/scram-client/src/test/java/com/ongres/scram/client/ScramClientTest.java new file mode 100644 index 0000000..2c92e17 --- /dev/null +++ b/scram-client/src/test/java/com/ongres/scram/client/ScramClientTest.java @@ -0,0 +1,201 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.client; + + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.util.Arrays; + +import org.junit.Test; + +import com.ongres.scram.common.ScramMechanisms; +import com.ongres.scram.common.stringprep.StringPreparations; +import com.ongres.scram.common.util.CryptoUtil; + + +public class ScramClientTest { + @Test + public void getValid() { + ScramClient client1 = ScramClient + .channelBinding(ScramClient.ChannelBinding.NO) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1") + .setup(); + ScramClient client2 = ScramClient + .channelBinding(ScramClient.ChannelBinding.YES) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1", "SCRAM-SHA-256-PLUS") + .nonceLength(64) + .setup(); + ScramClient client3 = ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1", "SCRAM-SHA-1-PLUS") + .nonceSupplier + (new NonceSupplier() { + @Override + public String get() { + return CryptoUtil.nonce(36); + } + }) + .setup(); + ScramClient client4 = ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertisedCsv("SCRAM-SHA-1,SCRAM-SHA-256-PLUS") + .secureRandomAlgorithmProvider("SHA1PRNG", "SUN") + .nonceLength(64) + .setup(); + ScramClient client5 = ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertisedCsv("SCRAM-SHA-1,SCRAM-SHA-256-PLUS") + .secureRandomAlgorithmProvider("SHA1PRNG", null) + .nonceLength(64) + .setup(); + ScramClient client6 = ScramClient + .channelBinding(ScramClient.ChannelBinding.NO) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectClientMechanism(ScramMechanisms.SCRAM_SHA_1) + .setup(); + ScramClient client7 = ScramClient + .channelBinding(ScramClient.ChannelBinding.YES) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectClientMechanism(ScramMechanisms.SCRAM_SHA_256_PLUS) + .setup(); + + for (ScramClient client : new ScramClient[] { + client1, client2, client3, client4, client5, client6, client7 + }) { + assertNotNull(client); + } + } + + @Test + public void getInvalid() { + int n = 0; + + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.NO) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1-PLUS") + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.YES) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1-PLUS,SCRAM-SAH-256-PLUS") + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("INVALID-SCRAM-MECHANISM") + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1", "SCRAM-SHA-1-PLUS") + .nonceSupplier(null) + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1", "SCRAM-SHA-1-PLUS") + .nonceLength(0) + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1", "SCRAM-SHA-1-PLUS") + .secureRandomAlgorithmProvider("Invalid algorithm", null) + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1", "SCRAM-SHA-1-PLUS") + .secureRandomAlgorithmProvider("SHA1PRNG", "Invalid provider") + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.YES) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectClientMechanism(ScramMechanisms.SCRAM_SHA_1) + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.NO) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectClientMechanism(ScramMechanisms.SCRAM_SHA_1_PLUS) + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + try { + assertNotNull(ScramClient + .channelBinding(ScramClient.ChannelBinding.IF_SERVER_SUPPORTS_IT) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectClientMechanism(ScramMechanisms.SCRAM_SHA_1) + .setup() + ); + } catch (IllegalArgumentException e) { n++; } + + assertEquals(10, n); + } + + @Test + public void supportedMechanismsTestAll() { + String[] expecteds = new String[] { "SCRAM-SHA-1", "SCRAM-SHA-1-PLUS", "SCRAM-SHA-256", "SCRAM-SHA-256-PLUS" }; + Arrays.sort(expecteds); + String[] actuals = ScramClient.supportedMechanisms().toArray(new String[0]); + Arrays.sort(actuals); + assertArrayEquals( + expecteds, + actuals + ); + } +} diff --git a/scram-client/src/test/java/com/ongres/scram/client/ScramSessionTest.java b/scram-client/src/test/java/com/ongres/scram/client/ScramSessionTest.java new file mode 100644 index 0000000..40b670d --- /dev/null +++ b/scram-client/src/test/java/com/ongres/scram/client/ScramSessionTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.client; + + +import com.ongres.scram.common.exception.ScramInvalidServerSignatureException; +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.exception.ScramServerErrorException; +import com.ongres.scram.common.stringprep.StringPreparations; +import org.junit.Test; + +import static com.ongres.scram.common.RfcExampleSha1.*; +import static org.junit.Assert.*; + +public class ScramSessionTest { + private final ScramClient scramClient = ScramClient + .channelBinding(ScramClient.ChannelBinding.NO) + .stringPreparation(StringPreparations.NO_PREPARATION) + .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1") + .nonceSupplier + (new NonceSupplier() { + @Override + public String get() { + return CLIENT_NONCE; + } + }) + .setup(); + + @Test + public void completeTest() + throws ScramParseException, ScramInvalidServerSignatureException, ScramServerErrorException { + ScramSession scramSession = scramClient.scramSession(USER); + assertEquals(CLIENT_FIRST_MESSAGE, scramSession.clientFirstMessage()); + + ScramSession.ServerFirstProcessor serverFirstProcessor = scramSession.receiveServerFirstMessage( + SERVER_FIRST_MESSAGE + ); + assertEquals(SERVER_SALT, serverFirstProcessor.getSalt()); + assertEquals(SERVER_ITERATIONS, serverFirstProcessor.getIteration()); + + ScramSession.ClientFinalProcessor clientFinalProcessor = serverFirstProcessor.clientFinalProcessor(PASSWORD); + assertEquals(CLIENT_FINAL_MESSAGE, clientFinalProcessor.clientFinalMessage()); + + clientFinalProcessor.receiveServerFinalMessage(SERVER_FINAL_MESSAGE); + } +} diff --git a/scram-common/build.gradle b/scram-common/build.gradle new file mode 100644 index 0000000..c137ec1 --- /dev/null +++ b/scram-common/build.gradle @@ -0,0 +1,3 @@ +dependencies { + api project(':saslprep') +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/ScramAttributeValue.java b/scram-common/src/main/java/com/ongres/scram/common/ScramAttributeValue.java new file mode 100644 index 0000000..48b77a3 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/ScramAttributeValue.java @@ -0,0 +1,59 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.util.AbstractCharAttributeValue; + +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * Parse and write SCRAM Attribute-Value pairs. + */ +public class ScramAttributeValue extends AbstractCharAttributeValue { + public ScramAttributeValue(ScramAttributes attribute, String value) { + super(attribute, checkNotNull(value, "value")); + } + + public static StringBuffer writeTo(StringBuffer sb, ScramAttributes attribute, String value) { + return new ScramAttributeValue(attribute, value).writeTo(sb); + } + + /** + * Parses a potential ScramAttributeValue String. + * @param value The string that contains the Attribute-Value pair. + * @return The parsed class + * @throws ScramParseException If the argument is empty or an invalid Attribute-Value + */ + public static ScramAttributeValue parse(String value) + throws ScramParseException { + if(null == value || value.length() < 3 || value.charAt(1) != '=') { + throw new ScramParseException("Invalid ScramAttributeValue '" + value + "'"); + } + + return new ScramAttributeValue(ScramAttributes.byChar(value.charAt(0)), value.substring(2)); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/ScramAttributes.java b/scram-common/src/main/java/com/ongres/scram/common/ScramAttributes.java new file mode 100644 index 0000000..84dea0d --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/ScramAttributes.java @@ -0,0 +1,158 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.util.CharAttribute; + +import java.util.HashMap; +import java.util.Map; + +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * SCRAM Attributes as defined in Section 5.1 of the RFC. + * + * Not all the available attributes may be available in this implementation. + */ +public enum ScramAttributes implements CharAttribute { + /** + * This attribute specifies the name of the user whose password is used for authentication + * (a.k.a. "authentication identity" [RFC4422]). + * If the "a" attribute is not specified (which would normally be the case), this username is also the identity + * that will be associated with the connection subsequent to authentication and authorization. + * + * The client SHOULD prepare the username using the "SASLprep" profile + * [RFC4013] of the "stringprep" algorithm + * [RFC3454] treating it as a query string + * (i.e., unassigned Unicode code points are allowed). + * + * The characters ',' or '=' in usernames are sent as '=2C' and '=3D' respectively. + */ + USERNAME('n'), + + /** + * This is an optional attribute, and is part of the GS2 [RFC5801] + * bridge between the GSS-API and SASL. This attribute specifies an authorization identity. + * A client may include it in its first message to the server if it wants to authenticate as one user, + * but subsequently act as a different user. This is typically used by an administrator to perform some management + * task on behalf of another user, or by a proxy in some situations. + * + * If this attribute is omitted (as it normally would be), the authorization identity is assumed to be derived + * from the username specified with the (required) "n" attribute. + * + * The server always authenticates the user specified by the "n" attribute. + * If the "a" attribute specifies a different user, the server associates that identity with the connection after + * successful authentication and authorization checks. + * + * The syntax of this field is the same as that of the "n" field with respect to quoting of '=' and ','. + */ + AUTHZID('a'), + + /** + * This attribute specifies a sequence of random printable ASCII characters excluding ',' + * (which forms the nonce used as input to the hash function). No quoting is applied to this string. + */ + NONCE('r'), + + /** + * This REQUIRED attribute specifies the base64-encoded GS2 header and channel binding data. + * The attribute data consist of: + *
    + *
  • + * the GS2 header from the client's first message + * (recall that the GS2 header contains a channel binding flag and an optional authzid). + * This header is going to include channel binding type prefix + * (see [RFC5056]), + * if and only if the client is using channel binding; + *
  • + *
  • + * followed by the external channel's channel binding data, + * if and only if the client is using channel binding. + *
  • + *
+ */ + CHANNEL_BINDING('c'), + + /** + * This attribute specifies the base64-encoded salt used by the server for this user. + */ + SALT('s'), + + /** + * This attribute specifies an iteration count for the selected hash function and user. + */ + ITERATION('i'), + + /** + * This attribute specifies a base64-encoded ClientProof. + */ + CLIENT_PROOF('p'), + + /** + * This attribute specifies a base64-encoded ServerSignature. + */ + SERVER_SIGNATURE('v'), + + /** + * This attribute specifies an error that occurred during authentication exchange. + * Can help diagnose the reason for the authentication exchange failure. + */ + ERROR('e') + ; + + private final char attributeChar; + + ScramAttributes(char attributeChar) { + this.attributeChar = checkNotNull(attributeChar, "attributeChar"); + } + + @Override + public char getChar() { + return attributeChar; + } + + private static final Map REVERSE_MAPPING = new HashMap(); + static { + for(ScramAttributes scramAttribute : values()) { + REVERSE_MAPPING.put(scramAttribute.getChar(), scramAttribute); + } + } + + /** + * Find a SCRAMAttribute by its character. + * @param c The character. + * @return The SCRAMAttribute that has that character. + * @throws ScramParseException If no SCRAMAttribute has this character. + */ + public static ScramAttributes byChar(char c) throws ScramParseException { + if(! REVERSE_MAPPING.containsKey(c)) { + throw new ScramParseException("Attribute with char '" + c + "' does not exist"); + } + + return REVERSE_MAPPING.get(c); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/ScramFunctions.java b/scram-common/src/main/java/com/ongres/scram/common/ScramFunctions.java new file mode 100644 index 0000000..e52167d --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/ScramFunctions.java @@ -0,0 +1,253 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import com.ongres.scram.common.stringprep.StringPreparation; +import com.ongres.scram.common.util.CryptoUtil; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +/** + * Utility functions (e.g. crypto) for SCRAM. + */ +public class ScramFunctions { + private static final byte[] CLIENT_KEY_HMAC_KEY = "Client Key".getBytes(StandardCharsets.UTF_8); + private static final byte[] SERVER_KEY_HMAC_KEY = "Server Key".getBytes(StandardCharsets.UTF_8); + + public ScramFunctions() { + } + + /** + * Compute the salted password, based on the given SCRAM mechanism, the String preparation algorithm, + * the provided salt and the number of iterations. + * + * {@code + * SaltedPassword := Hi(Normalize(password), salt, i) + * } + * + * @param scramMechanism The SCRAM mechanism + * @param stringPreparation The String preparation + * @param password The non-salted password + * @param salt The bytes representing the salt + * @param iteration The number of iterations + * @return The salted password + */ + public static byte[] saltedPassword( + ScramMechanism scramMechanism, StringPreparation stringPreparation, String password, byte[] salt, + int iteration + ) { + return scramMechanism.saltedPassword(stringPreparation, password, salt, iteration); + } + + /** + * Computes the HMAC of the message and key, using the given SCRAM mechanism. + * @param scramMechanism The SCRAM mechanism + * @param message The message to compute the HMAC + * @param key The key used to initialize the MAC + * @return The computed HMAC + */ + public static byte[] hmac(ScramMechanism scramMechanism, byte[] message, byte[] key) { + return scramMechanism.hmac(key, message); + } + + /** + * Generates a client key, from the salted password. + * + * {@code + * ClientKey := HMAC(SaltedPassword, "Client Key") + * } + * + * @param scramMechanism The SCRAM mechanism + * @param saltedPassword The salted password + * @return The client key + */ + public static byte[] clientKey(ScramMechanism scramMechanism, byte[] saltedPassword) { + return hmac(scramMechanism, CLIENT_KEY_HMAC_KEY, saltedPassword); + } + + /** + * Generates a client key from the password and salt. + * + * {@code + * SaltedPassword := Hi(Normalize(password), salt, i) + * ClientKey := HMAC(SaltedPassword, "Client Key") + * } + * + * @param scramMechanism The SCRAM mechanism + * @param stringPreparation The String preparation + * @param password The non-salted password + * @param salt The bytes representing the salt + * @param iteration The number of iterations + * @return The client key + */ + public static byte[] clientKey( + ScramMechanism scramMechanism, StringPreparation stringPreparation, String password, byte[] salt, + int iteration + ) { + return clientKey(scramMechanism, saltedPassword(scramMechanism, stringPreparation, password, salt, iteration)); + } + + /** + * Generates a server key, from the salted password. + * + * {@code + * ServerKey := HMAC(SaltedPassword, "Server Key") + * } + * + * @param scramMechanism The SCRAM mechanism + * @param saltedPassword The salted password + * @return The server key + */ + public static byte[] serverKey(ScramMechanism scramMechanism, byte[] saltedPassword) { + return hmac(scramMechanism, SERVER_KEY_HMAC_KEY, saltedPassword); + } + + /** + * Generates a server key from the password and salt. + * + * {@code + * SaltedPassword := Hi(Normalize(password), salt, i) + * ServerKey := HMAC(SaltedPassword, "Server Key") + * } + * + * @param scramMechanism The SCRAM mechanism + * @param stringPreparation The String preparation + * @param password The non-salted password + * @param salt The bytes representing the salt + * @param iteration The number of iterations + * @return The server key + */ + public static byte[] serverKey( + ScramMechanism scramMechanism, StringPreparation stringPreparation, String password, byte[] salt, + int iteration + ) { + return serverKey(scramMechanism, saltedPassword(scramMechanism, stringPreparation, password, salt, iteration)); + } + + /** + * Computes the hash function of a given value, based on the SCRAM mechanism hash function. + * @param scramMechanism The SCRAM mechanism + * @param value The value to hash + * @return The hashed value + */ + public static byte[] hash(ScramMechanism scramMechanism, byte[] value) { + return scramMechanism.digest(value); + } + + /** + * Generates a stored key, from the salted password. + * + * {@code + * StoredKey := H(ClientKey) + * } + * + * @param scramMechanism The SCRAM mechanism + * @param clientKey The client key + * @return The stored key + */ + public static byte[] storedKey(ScramMechanism scramMechanism, byte[] clientKey) { + return hash(scramMechanism, clientKey); + } + + /** + * Computes the SCRAM client signature. + * + * {@code + * ClientSignature := HMAC(StoredKey, AuthMessage) + * } + * + * @param scramMechanism The SCRAM mechanism + * @param storedKey The stored key + * @param authMessage The auth message + * @return The client signature + */ + public static byte[] clientSignature(ScramMechanism scramMechanism, byte[] storedKey, String authMessage) { + return hmac(scramMechanism, authMessage.getBytes(StandardCharsets.UTF_8), storedKey); + } + + /** + * Computes the SCRAM client proof to be sent to the server on the client-final-message. + * + * {@code + * ClientProof := ClientKey XOR ClientSignature + * } + * + * @param clientKey The client key + * @param clientSignature The client signature + * @return The client proof + */ + public static byte[] clientProof(byte[] clientKey, byte[] clientSignature) { + return CryptoUtil.xor(clientKey, clientSignature); + } + + /** + * Compute the SCRAM server signature. + * + * {@code + * ServerSignature := HMAC(ServerKey, AuthMessage) + * } + * + * @param scramMechanism The SCRAM mechanism + * @param serverKey The server key + * @param authMessage The auth message + * @return The server signature + */ + public static byte[] serverSignature(ScramMechanism scramMechanism, byte[] serverKey, String authMessage) { + return clientSignature(scramMechanism, serverKey, authMessage); + } + + /** + * Verifies that a provided client proof is correct. + * @param scramMechanism The SCRAM mechanism + * @param clientProof The provided client proof + * @param storedKey The stored key + * @param authMessage The auth message + * @return True if the client proof is correct + */ + public static boolean verifyClientProof( + ScramMechanism scramMechanism, byte[] clientProof, byte[] storedKey, String authMessage + ) { + byte[] clientSignature = clientSignature(scramMechanism, storedKey, authMessage); + byte[] clientKey = CryptoUtil.xor(clientSignature, clientProof); + byte[] computedStoredKey = hash(scramMechanism, clientKey); + + return Arrays.equals(storedKey, computedStoredKey); + } + + /** + * Verifies that a provided server proof is correct. + * @param scramMechanism The SCRAM mechanism + * @param serverKey The server key + * @param authMessage The auth message + * @param serverSignature The provided server signature + * @return True if the server signature is correct + */ + public static boolean verifyServerSignature( + ScramMechanism scramMechanism, byte[] serverKey, String authMessage, byte[] serverSignature + ) { + return Arrays.equals(serverSignature(scramMechanism, serverKey, authMessage), serverSignature); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/ScramMechanism.java b/scram-common/src/main/java/com/ongres/scram/common/ScramMechanism.java new file mode 100644 index 0000000..8f6fc29 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/ScramMechanism.java @@ -0,0 +1,80 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import com.ongres.scram.common.stringprep.StringPreparation; + + +/** + * Definition of the functionality to be provided by every ScramMechanism. + * + * Every ScramMechanism implemented must provide implementations of their respective digest and hmac + * function that will not throw a RuntimeException on any JVM, to guarantee true portability of this library. + */ +public interface ScramMechanism { + /** + * The name of the mechanism, which must be a value registered under IANA: + * + * SASL SCRAM Family Mechanisms + * @return The mechanism name + */ + String getName(); + + /** + * Calculate a message digest, according to the algorithm of the SCRAM mechanism. + * @param message the message + * @return The calculated message digest + * @throws RuntimeException If the algorithm is not provided by current JVM or any included implementations + */ + byte[] digest(byte[] message) throws RuntimeException; + + /** + * Calculate the hmac of a key and a message, according to the algorithm of the SCRAM mechanism. + * @param key the key + * @param message the message + * @return The calculated message hmac instance + * @throws RuntimeException If the algorithm is not provided by current JVM or any included implementations + */ + byte[] hmac(byte[] key, byte[] message) throws RuntimeException; + + /** + * Returns the length of the key length of the algorithm. + * @return The length (in bits) + */ + int algorithmKeyLength(); + + /** + * Whether this mechanism supports channel binding + * @return True if it supports channel binding, false otherwise + */ + boolean supportsChannelBinding(); + + /** + * Compute the salted password + * @return The salted password + */ + byte[] saltedPassword(StringPreparation stringPreparation, String password, + byte[] salt, int iteration); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/ScramMechanisms.java b/scram-common/src/main/java/com/ongres/scram/common/ScramMechanisms.java new file mode 100644 index 0000000..c865f96 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/ScramMechanisms.java @@ -0,0 +1,220 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import static com.ongres.scram.common.util.Preconditions.checkNotNull; +import static com.ongres.scram.common.util.Preconditions.gt0; + +import com.ongres.scram.common.bouncycastle.pbkdf2.DigestFactory; +import com.ongres.scram.common.bouncycastle.pbkdf2.KeyParameter; +import com.ongres.scram.common.bouncycastle.pbkdf2.PBEParametersGenerator; +import com.ongres.scram.common.bouncycastle.pbkdf2.PKCS5S2ParametersGenerator; +import com.ongres.scram.common.stringprep.StringPreparation; +import com.ongres.scram.common.util.CryptoUtil; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.HashMap; +import java.util.Map; + +import javax.crypto.Mac; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.SecretKeySpec; + +/** + * SCRAM Mechanisms supported by this library. + * At least, SCRAM-SHA-1 and SCRAM-SHA-256 are provided, since both the hash and the HMAC implementations + * are provided by the Java JDK version 6 or greater. + * + * {@link java.security.MessageDigest}: "Every implementation of the Java platform is required to support the + * following standard MessageDigest algorithms: MD5, SHA-1, SHA-256". + * + * {@link javax.crypto.Mac}: "Every implementation of the Java platform is required to support the following + * standard Mac algorithms: HmacMD5, HmacSHA1, HmacSHA256". + * + * @see + * SASL SCRAM Family Mechanisms + */ +public enum ScramMechanisms implements ScramMechanism { + SCRAM_SHA_1 ( "SHA-1", "SHA-1", 160, "HmacSHA1", false, 1 ), + SCRAM_SHA_1_PLUS ( "SHA-1", "SHA-1", 160, "HmacSHA1", true, 1 ), + SCRAM_SHA_256 ( "SHA-256", "SHA-256", 256, "HmacSHA256", false, 10 ), + SCRAM_SHA_256_PLUS ( "SHA-256", "SHA-256", 256, "HmacSHA256", true, 10 ) + ; + + private static final String SCRAM_MECHANISM_NAME_PREFIX = "SCRAM-"; + private static final String CHANNEL_BINDING_SUFFIX = "-PLUS"; + private static final String PBKDF2_PREFIX_ALGORITHM_NAME = "PBKDF2With"; + private static final Map BY_NAME_MAPPING = valuesAsMap(); + + private final String mechanismName; + private final String hashAlgorithmName; + private final int keyLength; + private final String hmacAlgorithmName; + private final boolean channelBinding; + private final int priority; + + ScramMechanisms( + String name, String hashAlgorithmName, int keyLength, String hmacAlgorithmName, boolean channelBinding, + int priority + ) { + this.mechanismName = SCRAM_MECHANISM_NAME_PREFIX + + checkNotNull(name, "name") + + (channelBinding ? CHANNEL_BINDING_SUFFIX : "") + ; + this.hashAlgorithmName = checkNotNull(hashAlgorithmName, "hashAlgorithmName"); + this.keyLength = gt0(keyLength, "keyLength"); + this.hmacAlgorithmName = checkNotNull(hmacAlgorithmName, "hmacAlgorithmName"); + this.channelBinding = channelBinding; + this.priority = gt0(priority, "priority"); + } + + /** + * Method that returns the name of the hash algorithm. + * It is protected since should be of no interest for direct users. + * The instance is supposed to provide abstractions over the algorithm names, + * and are not meant to be directly exposed. + * @return The name of the hash algorithm + */ + protected String getHashAlgorithmName() { + return hashAlgorithmName; + } + + /** + * Method that returns the name of the HMAC algorithm. + * It is protected since should be of no interest for direct users. + * The instance is supposed to provide abstractions over the algorithm names, + * and are not meant to be directly exposed. + * @return The name of the HMAC algorithm + */ + protected String getHmacAlgorithmName() { + return hmacAlgorithmName; + } + + @Override + public String getName() { + return mechanismName; + } + + @Override + public boolean supportsChannelBinding() { + return channelBinding; + } + + @Override + public int algorithmKeyLength() { + return keyLength; + } + + @Override + public byte[] digest(byte[] message) { + try { + return MessageDigest.getInstance(hashAlgorithmName).digest(message); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("Algorithm " + hashAlgorithmName + " not present in current JVM"); + } + } + + @Override + public byte[] hmac(byte[] key, byte[] message) { + try { + return CryptoUtil.hmac(new SecretKeySpec(key, hmacAlgorithmName), Mac.getInstance(hmacAlgorithmName), message); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("MAC Algorithm " + hmacAlgorithmName + " not present in current JVM"); + } + } + + @Override + public byte[] saltedPassword(StringPreparation stringPreparation, String password, byte[] salt, + int iterations) { + char[] normalizedString = stringPreparation.normalize(password).toCharArray(); + try { + return CryptoUtil.hi( + SecretKeyFactory.getInstance(PBKDF2_PREFIX_ALGORITHM_NAME + hmacAlgorithmName), + algorithmKeyLength(), + normalizedString, + salt, + iterations); + } catch (NoSuchAlgorithmException e) { + if(!ScramMechanisms.SCRAM_SHA_256.getHmacAlgorithmName().equals(getHmacAlgorithmName())) { + throw new RuntimeException("Unsupported PBKDF2 for " + mechanismName); + } + + PBEParametersGenerator generator = new PKCS5S2ParametersGenerator(DigestFactory.createSHA256()); + generator.init(PBEParametersGenerator.PKCS5PasswordToUTF8Bytes(normalizedString), salt, iterations); + KeyParameter params = (KeyParameter)generator.generateDerivedParameters(algorithmKeyLength()); + return params.getKey(); + } + } + + /** + * Gets a SCRAM mechanism, given its standard IANA name. + * @param name The standard IANA full name of the mechanism. + * @return An Optional instance that contains the ScramMechanism if it was found, or empty otherwise. + */ + public static ScramMechanisms byName(String name) { + checkNotNull(name, "name"); + + return BY_NAME_MAPPING.get(name); + } + + /** + * This class classifies SCRAM mechanisms by two properties: whether they support channel binding; + * and a priority, which is higher for safer algorithms (like SHA-256 vs SHA-1). + * + * Given a list of SCRAM mechanisms supported by the peer, pick one that matches the channel binding requirements + * and has the highest priority. + * + * @param channelBinding The type of matching mechanism searched for + * @param peerMechanisms The mechanisms supported by the other peer + * @return The selected mechanism, or null if no mechanism matched + */ + public static ScramMechanism selectMatchingMechanism(boolean channelBinding, String... peerMechanisms) { + ScramMechanisms selectedScramMechanisms = null; + for (String peerMechanism : peerMechanisms) { + ScramMechanisms matchedScramMechanisms = BY_NAME_MAPPING.get(peerMechanism); + if (matchedScramMechanisms != null) { + for (ScramMechanisms candidateScramMechanisms : ScramMechanisms.values()) { + if (channelBinding == candidateScramMechanisms.channelBinding + && candidateScramMechanisms.mechanismName.equals(matchedScramMechanisms.mechanismName) + && (selectedScramMechanisms == null + || selectedScramMechanisms.priority < candidateScramMechanisms.priority)) { + selectedScramMechanisms = candidateScramMechanisms; + } + } + } + } + return selectedScramMechanisms; + } + + private static Map valuesAsMap() { + Map mapScramMechanisms = new HashMap<>(values().length); + for (ScramMechanisms scramMechanisms : values()) { + mapScramMechanisms.put(scramMechanisms.getName(), scramMechanisms); + } + return mapScramMechanisms; + } + +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/ScramStringFormatting.java b/scram-common/src/main/java/com/ongres/scram/common/ScramStringFormatting.java new file mode 100644 index 0000000..ee4c58f --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/ScramStringFormatting.java @@ -0,0 +1,152 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import static com.ongres.scram.common.util.Preconditions.checkNotEmpty; +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + +import java.nio.charset.StandardCharsets; + +import com.ongres.scram.common.bouncycastle.base64.Base64; + +/** + * Class with static methods that provide support for converting to/from salNames. + * @see [RFC5802] Section 7: Formal Syntax + */ +public class ScramStringFormatting { + + public ScramStringFormatting() { + } + + /** + * Given a value-safe-char (normalized UTF-8 String), + * return one where characters ',' and '=' are represented by '=2C' or '=3D', respectively. + * @param value The value to convert so saslName + * @return The saslName, with caracter escaped (if any) + */ + public static String toSaslName(String value) { + if(null == value || value.isEmpty()) { + return value; + } + + int nComma = 0, nEqual = 0; + char[] originalChars = value.toCharArray(); + + // Fast path + for(char c : originalChars) { + if(',' == c) { nComma++; } + else if('=' == c) { nEqual++; } + } + if(nComma == 0 && nEqual == 0) { + return value; + } + + // Replace chars + char[] saslChars = new char[originalChars.length + nComma * 2 + nEqual * 2]; + int i = 0; + for(char c : originalChars) { + if(',' == c) { + saslChars[i++] = '='; + saslChars[i++] = '2'; + saslChars[i++] = 'C'; + } else if('=' == c) { + saslChars[i++] = '='; + saslChars[i++] = '3'; + saslChars[i++] = 'D'; + } else { + saslChars[i++] = c; + } + } + + return new String(saslChars); + } + + /** + * Given a saslName, return a non-escaped String. + * @param value The saslName + * @return The saslName, unescaped + * @throws IllegalArgumentException If a ',' character is present, or a '=' not followed by either '2C' or '3D' + */ + public static String fromSaslName(String value) throws IllegalArgumentException { + if(null == value || value.isEmpty()) { + return value; + } + + int nEqual = 0; + char[] orig = value.toCharArray(); + + // Fast path + for(int i = 0; i < orig.length; i++) { + if(orig[i] == ',') { + throw new IllegalArgumentException("Invalid ',' character present in saslName"); + } + if(orig[i] == '=') { + nEqual++; + if(i + 2 > orig.length - 1) { + throw new IllegalArgumentException("Invalid '=' character present in saslName"); + } + if(! (orig[i+1] == '2' && orig[i+2] == 'C' || orig[i+1] == '3' && orig[i+2] == 'D')) { + throw new IllegalArgumentException( + "Invalid char '=" + orig[i+1] + orig[i+2] + "' found in saslName" + ); + } + } + } + if(nEqual == 0) { + return value; + } + + // Replace characters + char[] replaced = new char[orig.length - nEqual * 2]; + + for(int r = 0, o = 0; r < replaced.length; r++) { + if('=' == orig[o]) { + if(orig[o+1] == '2' && orig[o+2] == 'C') { + replaced[r] = ','; + } else if(orig[o+1] == '3' && orig[o+2] == 'D') { + replaced[r] = '='; + } + o += 3; + } else { + replaced[r] = orig[o]; + o += 1; + } + } + + return new String(replaced); + } + + public static String base64Encode(byte[] value) throws IllegalArgumentException { + return Base64.toBase64String(checkNotNull(value, "value")); + } + + public static String base64Encode(String value) throws IllegalArgumentException { + return base64Encode(checkNotEmpty(value, "value").getBytes(StandardCharsets.UTF_8)); + } + + public static byte[] base64Decode(String value) throws IllegalArgumentException { + return Base64.decode(checkNotEmpty(value, "value")); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Base64.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Base64.java new file mode 100644 index 0000000..57d79cb --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Base64.java @@ -0,0 +1,199 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.base64; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +import com.ongres.scram.common.bouncycastle.pbkdf2.Strings; + +/** + * Utility class for converting Base64 data to bytes and back again. + */ +public class Base64 +{ + private static final Encoder encoder = new Base64Encoder(); + + public Base64() { + } + + public static String toBase64String( + byte[] data) + { + return toBase64String(data, 0, data.length); + } + + public static String toBase64String( + byte[] data, + int off, + int length) + { + byte[] encoded = encode(data, off, length); + return Strings.fromByteArray(encoded); + } + + /** + * encode the input data producing a base 64 encoded byte array. + * + * @return a byte array containing the base 64 encoded data. + */ + public static byte[] encode( + byte[] data) + { + return encode(data, 0, data.length); + } + + /** + * encode the input data producing a base 64 encoded byte array. + * + * @return a byte array containing the base 64 encoded data. + */ + public static byte[] encode( + byte[] data, + int off, + int length) + { + int len = (length + 2) / 3 * 4; + ByteArrayOutputStream bOut = new ByteArrayOutputStream(len); + + try + { + encoder.encode(data, off, length, bOut); + } + catch (Exception e) + { + throw new EncoderException("exception encoding base64 string: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * Encode the byte data to base 64 writing it to the given output stream. + * + * @return the number of bytes produced. + */ + public static int encode( + byte[] data, + OutputStream out) + throws IOException + { + return encoder.encode(data, 0, data.length, out); + } + + /** + * Encode the byte data to base 64 writing it to the given output stream. + * + * @return the number of bytes produced. + */ + public static int encode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + return encoder.encode(data, off, length, out); + } + + /** + * decode the base 64 encoded input data. It is assumed the input data is valid. + * + * @return a byte array representing the decoded data. + */ + public static byte[] decode( + byte[] data) + { + int len = data.length / 4 * 3; + ByteArrayOutputStream bOut = new ByteArrayOutputStream(len); + + try + { + encoder.decode(data, 0, data.length, bOut); + } + catch (Exception e) + { + throw new DecoderException("unable to decode base64 data: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * decode the base 64 encoded String data - whitespace will be ignored. + * + * @return a byte array representing the decoded data. + */ + public static byte[] decode( + String data) + { + int len = data.length() / 4 * 3; + ByteArrayOutputStream bOut = new ByteArrayOutputStream(len); + + try + { + encoder.decode(data, bOut); + } + catch (Exception e) + { + throw new DecoderException("unable to decode base64 string: " + e.getMessage(), e); + } + + return bOut.toByteArray(); + } + + /** + * decode the base 64 encoded String data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public static int decode( + String data, + OutputStream out) + throws IOException + { + return encoder.decode(data, out); + } + + /** + * Decode to an output stream; + * + * @param base64Data The source data. + * @param start Start position. + * @param length the length. + * @param out The output stream to write to. + */ + public static int decode(byte[] base64Data, int start, int length, OutputStream out) + { + try + { + return encoder.decode(base64Data, start, length, out); + } + catch (Exception e) + { + throw new DecoderException("unable to decode base64 data: " + e.getMessage(), e); + } + + } +} \ No newline at end of file diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Base64Encoder.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Base64Encoder.java new file mode 100644 index 0000000..932ea24 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Base64Encoder.java @@ -0,0 +1,400 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.base64; +import java.io.IOException; +import java.io.OutputStream; + +/** + * A streaming Base64 encoder. + */ +public class Base64Encoder + implements Encoder +{ + protected final byte[] encodingTable = + { + (byte)'A', (byte)'B', (byte)'C', (byte)'D', (byte)'E', (byte)'F', (byte)'G', + (byte)'H', (byte)'I', (byte)'J', (byte)'K', (byte)'L', (byte)'M', (byte)'N', + (byte)'O', (byte)'P', (byte)'Q', (byte)'R', (byte)'S', (byte)'T', (byte)'U', + (byte)'V', (byte)'W', (byte)'X', (byte)'Y', (byte)'Z', + (byte)'a', (byte)'b', (byte)'c', (byte)'d', (byte)'e', (byte)'f', (byte)'g', + (byte)'h', (byte)'i', (byte)'j', (byte)'k', (byte)'l', (byte)'m', (byte)'n', + (byte)'o', (byte)'p', (byte)'q', (byte)'r', (byte)'s', (byte)'t', (byte)'u', + (byte)'v', + (byte)'w', (byte)'x', (byte)'y', (byte)'z', + (byte)'0', (byte)'1', (byte)'2', (byte)'3', (byte)'4', (byte)'5', (byte)'6', + (byte)'7', (byte)'8', (byte)'9', + (byte)'+', (byte)'/' + }; + + protected byte padding = (byte)'='; + + /* + * set up the decoding table. + */ + protected final byte[] decodingTable = new byte[128]; + + protected void initialiseDecodingTable() + { + for (int i = 0; i < decodingTable.length; i++) + { + decodingTable[i] = (byte)0xff; + } + + for (int i = 0; i < encodingTable.length; i++) + { + decodingTable[encodingTable[i]] = (byte)i; + } + } + + @SuppressWarnings("this-escape") + public Base64Encoder() + { + initialiseDecodingTable(); + } + + /** + * encode the input data producing a base 64 output stream. + * + * @return the number of bytes produced. + */ + public int encode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + int modulus = length % 3; + int dataLength = (length - modulus); + int a1, a2, a3; + + for (int i = off; i < off + dataLength; i += 3) + { + a1 = data[i] & 0xff; + a2 = data[i + 1] & 0xff; + a3 = data[i + 2] & 0xff; + + out.write(encodingTable[(a1 >>> 2) & 0x3f]); + out.write(encodingTable[((a1 << 4) | (a2 >>> 4)) & 0x3f]); + out.write(encodingTable[((a2 << 2) | (a3 >>> 6)) & 0x3f]); + out.write(encodingTable[a3 & 0x3f]); + } + + /* + * process the tail end. + */ + int b1, b2, b3; + int d1, d2; + + switch (modulus) + { + case 0: /* nothing left to do */ + break; + case 1: + d1 = data[off + dataLength] & 0xff; + b1 = (d1 >>> 2) & 0x3f; + b2 = (d1 << 4) & 0x3f; + + out.write(encodingTable[b1]); + out.write(encodingTable[b2]); + out.write(padding); + out.write(padding); + break; + case 2: + d1 = data[off + dataLength] & 0xff; + d2 = data[off + dataLength + 1] & 0xff; + + b1 = (d1 >>> 2) & 0x3f; + b2 = ((d1 << 4) | (d2 >>> 4)) & 0x3f; + b3 = (d2 << 2) & 0x3f; + + out.write(encodingTable[b1]); + out.write(encodingTable[b2]); + out.write(encodingTable[b3]); + out.write(padding); + break; + } + + return (dataLength / 3) * 4 + ((modulus == 0) ? 0 : 4); + } + + private boolean ignore( + char c) + { + return (c == '\n' || c =='\r' || c == '\t' || c == ' '); + } + + /** + * decode the base 64 encoded byte data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public int decode( + byte[] data, + int off, + int length, + OutputStream out) + throws IOException + { + byte b1, b2, b3, b4; + int outLen = 0; + + int end = off + length; + + while (end > off) + { + if (!ignore((char)data[end - 1])) + { + break; + } + + end--; + } + + // empty data! + if (end == 0) + { + return 0; + } + + int i = 0; + int finish = end; + + while (finish > off && i != 4) + { + if (!ignore((char)data[finish - 1])) + { + i++; + } + + finish--; + } + + i = nextI(data, off, finish); + + while (i < finish) + { + b1 = decodingTable[data[i++]]; + + i = nextI(data, i, finish); + + b2 = decodingTable[data[i++]]; + + i = nextI(data, i, finish); + + b3 = decodingTable[data[i++]]; + + i = nextI(data, i, finish); + + b4 = decodingTable[data[i++]]; + + if ((b1 | b2 | b3 | b4) < 0) + { + throw new IOException("invalid characters encountered in base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + out.write((b3 << 6) | b4); + + outLen += 3; + + i = nextI(data, i, finish); + } + + int e0 = nextI(data, i, end); + int e1 = nextI(data, e0 + 1, end); + int e2 = nextI(data, e1 + 1, end); + int e3 = nextI(data, e2 + 1, end); + + outLen += decodeLastBlock(out, (char)data[e0], (char)data[e1], (char)data[e2], (char)data[e3]); + + return outLen; + } + + private int nextI(byte[] data, int i, int finish) + { + while ((i < finish) && ignore((char)data[i])) + { + i++; + } + return i; + } + + /** + * decode the base 64 encoded String data writing it to the given output stream, + * whitespace characters will be ignored. + * + * @return the number of bytes produced. + */ + public int decode( + String data, + OutputStream out) + throws IOException + { + byte b1, b2, b3, b4; + int length = 0; + + int end = data.length(); + + while (end > 0) + { + if (!ignore(data.charAt(end - 1))) + { + break; + } + + end--; + } + + // empty data! + if (end == 0) + { + return 0; + } + + int i = 0; + int finish = end; + + while (finish > 0 && i != 4) + { + if (!ignore(data.charAt(finish - 1))) + { + i++; + } + + finish--; + } + + i = nextI(data, 0, finish); + + while (i < finish) + { + b1 = decodingTable[data.charAt(i++)]; + + i = nextI(data, i, finish); + + b2 = decodingTable[data.charAt(i++)]; + + i = nextI(data, i, finish); + + b3 = decodingTable[data.charAt(i++)]; + + i = nextI(data, i, finish); + + b4 = decodingTable[data.charAt(i++)]; + + if ((b1 | b2 | b3 | b4) < 0) + { + throw new IOException("invalid characters encountered in base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + out.write((b3 << 6) | b4); + + length += 3; + + i = nextI(data, i, finish); + } + + int e0 = nextI(data, i, end); + int e1 = nextI(data, e0 + 1, end); + int e2 = nextI(data, e1 + 1, end); + int e3 = nextI(data, e2 + 1, end); + + length += decodeLastBlock(out, data.charAt(e0), data.charAt(e1), data.charAt(e2), data.charAt(e3)); + + return length; + } + + private int decodeLastBlock(OutputStream out, char c1, char c2, char c3, char c4) + throws IOException + { + byte b1, b2, b3, b4; + + if (c3 == padding) + { + if (c4 != padding) + { + throw new IOException("invalid characters encountered at end of base64 data"); + } + + b1 = decodingTable[c1]; + b2 = decodingTable[c2]; + + if ((b1 | b2) < 0) + { + throw new IOException("invalid characters encountered at end of base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + + return 1; + } + else if (c4 == padding) + { + b1 = decodingTable[c1]; + b2 = decodingTable[c2]; + b3 = decodingTable[c3]; + + if ((b1 | b2 | b3) < 0) + { + throw new IOException("invalid characters encountered at end of base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + + return 2; + } + else + { + b1 = decodingTable[c1]; + b2 = decodingTable[c2]; + b3 = decodingTable[c3]; + b4 = decodingTable[c4]; + + if ((b1 | b2 | b3 | b4) < 0) + { + throw new IOException("invalid characters encountered at end of base64 data"); + } + + out.write((b1 << 2) | (b2 >> 4)); + out.write((b2 << 4) | (b3 >> 2)); + out.write((b3 << 6) | b4); + + return 3; + } + } + + private int nextI(String data, int i, int finish) + { + while ((i < finish) && ignore(data.charAt(i))) + { + i++; + } + return i; + } +} \ No newline at end of file diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/DecoderException.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/DecoderException.java new file mode 100644 index 0000000..43ac873 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/DecoderException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.base64; + +/** + * Exception thrown if an attempt is made to decode invalid data, or some other failure occurs. + */ +@SuppressWarnings("serial") +public class DecoderException + extends IllegalStateException +{ + private Throwable cause; + + DecoderException(String msg, Throwable cause) + { + super(msg); + + this.cause = cause; + } + + public Throwable getCause() + { + return cause; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Encoder.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Encoder.java new file mode 100644 index 0000000..f6d1681 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/Encoder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.base64; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Encode and decode byte arrays (typically from binary to 7-bit ASCII + * encodings). + */ +public interface Encoder +{ + int encode(byte[] data, int off, int length, OutputStream out) throws IOException; + + int decode(byte[] data, int off, int length, OutputStream out) throws IOException; + + int decode(String data, OutputStream out) throws IOException; +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/EncoderException.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/EncoderException.java new file mode 100644 index 0000000..5079281 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/base64/EncoderException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.base64; + +/** + * Exception thrown if an attempt is made to encode invalid data, or some other failure occurs. + */ +@SuppressWarnings("serial") +public class EncoderException + extends IllegalStateException +{ + private Throwable cause; + + EncoderException(String msg, Throwable cause) + { + super(msg); + + this.cause = cause; + } + + public Throwable getCause() + { + return cause; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Arrays.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Arrays.java new file mode 100644 index 0000000..c9330bf --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Arrays.java @@ -0,0 +1,75 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * General array utilities. + */ +public final class Arrays +{ + private Arrays() + { + // static class, hide constructor + } + + /** + * Make a copy of a range of bytes from the passed in data array. The range can + * extend beyond the end of the input array, in which case the return array will + * be padded with zeroes. + * + * @param data the array from which the data is to be copied. + * @param from the start index at which the copying should take place. + * @param to the final index of the range (exclusive). + * + * @return a new byte array containing the range given. + */ + public static byte[] copyOfRange(byte[] data, int from, int to) + { + int newLength = getLength(from, to); + + byte[] tmp = new byte[newLength]; + + if (data.length - from < newLength) + { + System.arraycopy(data, from, tmp, 0, data.length - from); + } + else + { + System.arraycopy(data, from, tmp, 0, newLength); + } + + return tmp; + } + + private static int getLength(int from, int to) + { + int newLength = to - from; + if (newLength < 0) + { + StringBuffer sb = new StringBuffer(from); + sb.append(" > ").append(to); + throw new IllegalArgumentException(sb.toString()); + } + return newLength; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/CipherParameters.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/CipherParameters.java new file mode 100644 index 0000000..3bf19e7 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/CipherParameters.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * all parameter classes implement this. + */ +public interface CipherParameters +{ +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/DataLengthException.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/DataLengthException.java new file mode 100644 index 0000000..d8564fc --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/DataLengthException.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * this exception is thrown if a buffer that is meant to have output + * copied into it turns out to be too short, or if we've been given + * insufficient input. In general this exception will get thrown rather + * than an ArrayOutOfBounds exception. + */ +@SuppressWarnings("serial") +public class DataLengthException + extends RuntimeCryptoException +{ + /** + * base constructor. + */ + public DataLengthException() + { + } + + /** + * create a DataLengthException with the given message. + * + * @param message the message to be carried with the exception. + */ + public DataLengthException( + String message) + { + super(message); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Digest.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Digest.java new file mode 100644 index 0000000..237183a --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Digest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * interface that a message digest conforms to. + */ +public interface Digest +{ + /** + * return the algorithm name + * + * @return the algorithm name + */ + public String getAlgorithmName(); + + /** + * return the size, in bytes, of the digest produced by this message digest. + * + * @return the size, in bytes, of the digest produced by this message digest. + */ + public int getDigestSize(); + + /** + * update the message digest with a single byte. + * + * @param in the input byte to be entered. + */ + public void update(byte in); + + /** + * update the message digest with a block of bytes. + * + * @param in the byte array containing the data. + * @param inOff the offset into the byte array where the data starts. + * @param len the length of the data. + */ + public void update(byte[] in, int inOff, int len); + + /** + * close the digest, producing the final digest value. The doFinal + * call leaves the digest reset. + * + * @param out the array the digest is to be copied into. + * @param outOff the offset into the out array the digest is to start at. + */ + public int doFinal(byte[] out, int outOff); + + /** + * reset the digest back to it's initial state. + */ + public void reset(); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/DigestFactory.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/DigestFactory.java new file mode 100644 index 0000000..2f82188 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/DigestFactory.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * Basic factory class for message digests. + */ +public final class DigestFactory +{ + public DigestFactory() {} + + public static Digest createSHA256() + { + return new SHA256Digest(); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/EncodableDigest.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/EncodableDigest.java new file mode 100644 index 0000000..71413d8 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/EncodableDigest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * Encodable digests allow you to download an encoded copy of their internal state. This is useful for the situation where + * you need to generate a signature on an external device and it allows for "sign with last round", so a copy of the + * internal state of the digest, plus the last few blocks of the message are all that needs to be sent, rather than the + * entire message. + */ +public interface EncodableDigest +{ + /** + * Return an encoded byte array for the digest's internal state + * + * @return an encoding of the digests internal state. + */ + byte[] getEncodedState(); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/ExtendedDigest.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/ExtendedDigest.java new file mode 100644 index 0000000..ceda3e7 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/ExtendedDigest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +public interface ExtendedDigest + extends Digest +{ + /** + * Return the size in bytes of the internal buffer the digest applies it's compression + * function to. + * + * @return byte length of the digests internal buffer. + */ + public int getByteLength(); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/GeneralDigest.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/GeneralDigest.java new file mode 100644 index 0000000..e151eb0 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/GeneralDigest.java @@ -0,0 +1,179 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * base implementation of MD4 family style digest as outlined in + * "Handbook of Applied Cryptography", pages 344 - 347. + */ +public abstract class GeneralDigest + implements ExtendedDigest, Memoable +{ + private static final int BYTE_LENGTH = 64; + + private final byte[] xBuf = new byte[4]; + private int xBufOff; + + private long byteCount; + + /** + * Standard constructor + */ + protected GeneralDigest() + { + xBufOff = 0; + } + + /** + * Copy constructor. We are using copy constructors in place + * of the Object.clone() interface as this interface is not + * supported by J2ME. + */ + @SuppressWarnings("this-escape") + protected GeneralDigest(GeneralDigest t) + { + copyIn(t); + } + + protected GeneralDigest(byte[] encodedState) + { + System.arraycopy(encodedState, 0, xBuf, 0, xBuf.length); + xBufOff = Pack.bigEndianToInt(encodedState, 4); + byteCount = Pack.bigEndianToLong(encodedState, 8); + } + + protected void copyIn(GeneralDigest t) + { + System.arraycopy(t.xBuf, 0, xBuf, 0, t.xBuf.length); + + xBufOff = t.xBufOff; + byteCount = t.byteCount; + } + + public void update( + byte in) + { + xBuf[xBufOff++] = in; + + if (xBufOff == xBuf.length) + { + processWord(xBuf, 0); + xBufOff = 0; + } + + byteCount++; + } + + public void update( + byte[] in, + int inOff, + int len) + { + len = Math.max(0, len); + + // + // fill the current word + // + int i = 0; + if (xBufOff != 0) + { + while (i < len) + { + xBuf[xBufOff++] = in[inOff + i++]; + if (xBufOff == 4) + { + processWord(xBuf, 0); + xBufOff = 0; + break; + } + } + } + + // + // process whole words. + // + int limit = ((len - i) & ~3) + i; + for (; i < limit; i += 4) + { + processWord(in, inOff + i); + } + + // + // load in the remainder. + // + while (i < len) + { + xBuf[xBufOff++] = in[inOff + i++]; + } + + byteCount += len; + } + + public void finish() + { + long bitLength = (byteCount << 3); + + // + // add the pad bytes. + // + update((byte)128); + + while (xBufOff != 0) + { + update((byte)0); + } + + processLength(bitLength); + + processBlock(); + } + + public void reset() + { + byteCount = 0; + + xBufOff = 0; + for (int i = 0; i < xBuf.length; i++) + { + xBuf[i] = 0; + } + } + + protected void populateState(byte[] state) + { + System.arraycopy(xBuf, 0, state, 0, xBufOff); + Pack.intToBigEndian(xBufOff, state, 4); + Pack.longToBigEndian(byteCount, state, 8); + } + + public int getByteLength() + { + return BYTE_LENGTH; + } + + protected abstract void processWord(byte[] in, int inOff); + + protected abstract void processLength(long bitLength); + + protected abstract void processBlock(); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/HMac.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/HMac.java new file mode 100644 index 0000000..b09fa98 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/HMac.java @@ -0,0 +1,245 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +import java.util.Hashtable; + +/** + * HMAC implementation based on RFC2104 + * + * H(K XOR opad, H(K XOR ipad, text)) + */ +public class HMac + implements Mac +{ + private final static byte IPAD = (byte)0x36; + private final static byte OPAD = (byte)0x5C; + + private Digest digest; + private int digestSize; + private int blockLength; + private Memoable ipadState; + private Memoable opadState; + + private byte[] inputPad; + private byte[] outputBuf; + + private static Hashtable blockLengths; + + static + { + blockLengths = new Hashtable<>(); + + blockLengths.put("GOST3411", Integers.valueOf(32)); + + blockLengths.put("MD2", Integers.valueOf(16)); + blockLengths.put("MD4", Integers.valueOf(64)); + blockLengths.put("MD5", Integers.valueOf(64)); + + blockLengths.put("RIPEMD128", Integers.valueOf(64)); + blockLengths.put("RIPEMD160", Integers.valueOf(64)); + + blockLengths.put("SHA-1", Integers.valueOf(64)); + blockLengths.put("SHA-224", Integers.valueOf(64)); + blockLengths.put("SHA-256", Integers.valueOf(64)); + blockLengths.put("SHA-384", Integers.valueOf(128)); + blockLengths.put("SHA-512", Integers.valueOf(128)); + + blockLengths.put("Tiger", Integers.valueOf(64)); + blockLengths.put("Whirlpool", Integers.valueOf(64)); + } + + private static int getByteLength( + Digest digest) + { + if (digest instanceof ExtendedDigest) + { + return ((ExtendedDigest)digest).getByteLength(); + } + + Integer b = blockLengths.get(digest.getAlgorithmName()); + + if (b == null) + { + throw new IllegalArgumentException("unknown digest passed: " + digest.getAlgorithmName()); + } + + return b.intValue(); + } + + /** + * Base constructor for one of the standard digest algorithms that the + * byteLength of the algorithm is know for. + * + * @param digest the digest. + */ + public HMac( + Digest digest) + { + this(digest, getByteLength(digest)); + } + + private HMac( + Digest digest, + int byteLength) + { + this.digest = digest; + this.digestSize = digest.getDigestSize(); + this.blockLength = byteLength; + this.inputPad = new byte[blockLength]; + this.outputBuf = new byte[blockLength + digestSize]; + } + + public String getAlgorithmName() + { + return digest.getAlgorithmName() + "/HMAC"; + } + + public Digest getUnderlyingDigest() + { + return digest; + } + + public void init( + CipherParameters params) + { + digest.reset(); + + byte[] key = ((KeyParameter)params).getKey(); + int keyLength = key.length; + + if (keyLength > blockLength) + { + digest.update(key, 0, keyLength); + digest.doFinal(inputPad, 0); + + keyLength = digestSize; + } + else + { + System.arraycopy(key, 0, inputPad, 0, keyLength); + } + + for (int i = keyLength; i < inputPad.length; i++) + { + inputPad[i] = 0; + } + + System.arraycopy(inputPad, 0, outputBuf, 0, blockLength); + + xorPad(inputPad, blockLength, IPAD); + xorPad(outputBuf, blockLength, OPAD); + + if (digest instanceof Memoable) + { + opadState = ((Memoable)digest).copy(); + + ((Digest)opadState).update(outputBuf, 0, blockLength); + } + + digest.update(inputPad, 0, inputPad.length); + + if (digest instanceof Memoable) + { + ipadState = ((Memoable)digest).copy(); + } + } + + public int getMacSize() + { + return digestSize; + } + + public void update( + byte in) + { + digest.update(in); + } + + public void update( + byte[] in, + int inOff, + int len) + { + digest.update(in, inOff, len); + } + + public int doFinal( + byte[] out, + int outOff) + { + digest.doFinal(outputBuf, blockLength); + + if (opadState != null) + { + ((Memoable)digest).reset(opadState); + digest.update(outputBuf, blockLength, digest.getDigestSize()); + } + else + { + digest.update(outputBuf, 0, outputBuf.length); + } + + int len = digest.doFinal(out, outOff); + + for (int i = blockLength; i < outputBuf.length; i++) + { + outputBuf[i] = 0; + } + + if (ipadState != null) + { + ((Memoable)digest).reset(ipadState); + } + else + { + digest.update(inputPad, 0, inputPad.length); + } + + return len; + } + + /** + * Reset the mac generator. + */ + public void reset() + { + /* + * reset the underlying digest. + */ + digest.reset(); + + /* + * reinitialize the digest. + */ + digest.update(inputPad, 0, inputPad.length); + } + + private static void xorPad(byte[] pad, int len, byte n) + { + for (int i = 0; i < len; ++i) + { + pad[i] ^= n; + } + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Integers.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Integers.java new file mode 100644 index 0000000..891bccc --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Integers.java @@ -0,0 +1,38 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * Utility methods for ints. + */ +public class Integers +{ + + public Integers() { + } + + public static Integer valueOf(int value) + { + return Integer.valueOf(value); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/KeyParameter.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/KeyParameter.java new file mode 100644 index 0000000..f3f1f2b --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/KeyParameter.java @@ -0,0 +1,50 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +public class KeyParameter + implements CipherParameters +{ + private byte[] key; + + public KeyParameter( + byte[] key) + { + this(key, 0, key.length); + } + + public KeyParameter( + byte[] key, + int keyOff, + int keyLen) + { + this.key = new byte[keyLen]; + + System.arraycopy(key, keyOff, this.key, 0, keyLen); + } + + public byte[] getKey() + { + return key; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Mac.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Mac.java new file mode 100644 index 0000000..01f932f --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Mac.java @@ -0,0 +1,93 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + + +/** + * The base interface for implementations of message authentication codes (MACs). + */ +public interface Mac +{ + /** + * Initialise the MAC. + * + * @param params the key and other data required by the MAC. + * @exception IllegalArgumentException if the params argument is + * inappropriate. + */ + public void init(CipherParameters params) + throws IllegalArgumentException; + + /** + * Return the name of the algorithm the MAC implements. + * + * @return the name of the algorithm the MAC implements. + */ + public String getAlgorithmName(); + + /** + * Return the block size for this MAC (in bytes). + * + * @return the block size for this MAC in bytes. + */ + public int getMacSize(); + + /** + * add a single byte to the mac for processing. + * + * @param in the byte to be processed. + * @exception IllegalStateException if the MAC is not initialised. + */ + public void update(byte in) + throws IllegalStateException; + + /** + * @param in the array containing the input. + * @param inOff the index in the array the data begins at. + * @param len the length of the input starting at inOff. + * @exception IllegalStateException if the MAC is not initialised. + * @exception DataLengthException if there isn't enough data in in. + */ + public void update(byte[] in, int inOff, int len) + throws DataLengthException, IllegalStateException; + + /** + * Compute the final stage of the MAC writing the output to the out + * parameter. + *

+ * doFinal leaves the MAC in the same state it was after the last init. + * + * @param out the array the MAC is to be output to. + * @param outOff the offset into the out buffer the output is to start at. + * @exception DataLengthException if there isn't enough space in out. + * @exception IllegalStateException if the MAC is not initialised. + */ + public int doFinal(byte[] out, int outOff) + throws DataLengthException, IllegalStateException; + + /** + * Reset the MAC. At the end of resetting the MAC should be in the + * in the same state it was after the last init (if there was one). + */ + public void reset(); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Memoable.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Memoable.java new file mode 100644 index 0000000..4d435d9 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Memoable.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * Interface for Memoable objects. Memoable objects allow the taking of a snapshot of their internal state + * via the copy() method and then reseting the object back to that state later using the reset() method. + */ +public interface Memoable +{ + /** + * Produce a copy of this object with its configuration and in its current state. + *

+ * The returned object may be used simply to store the state, or may be used as a similar object + * starting from the copied state. + */ + Memoable copy(); + + /** + * Restore a copied object state into this object. + *

+ * Implementations of this method should try to avoid or minimise memory allocation to perform the reset. + * + * @param other an object originally {@link #copy() copied} from an object of the same type as this instance. + * @throws ClassCastException if the provided object is not of the correct type. + */ + void reset(Memoable other); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/PBEParametersGenerator.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/PBEParametersGenerator.java new file mode 100644 index 0000000..6b78a23 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/PBEParametersGenerator.java @@ -0,0 +1,116 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * super class for all Password Based Encryption (PBE) parameter generator classes. + */ +public abstract class PBEParametersGenerator +{ + protected byte[] password; + protected byte[] salt; + protected int iterationCount; + + /** + * base constructor. + */ + protected PBEParametersGenerator() + { + } + + /** + * initialise the PBE generator. + * + * @param password the password converted into bytes (see below). + * @param salt the salt to be mixed with the password. + * @param iterationCount the number of iterations the "mixing" function + * is to be applied for. + */ + public void init( + byte[] password, + byte[] salt, + int iterationCount) + { + this.password = password; + this.salt = salt; + this.iterationCount = iterationCount; + } + + /** + * return the password byte array. + * + * @return the password byte array. + */ + public byte[] getPassword() + { + return password; + } + + /** + * return the salt byte array. + * + * @return the salt byte array. + */ + public byte[] getSalt() + { + return salt; + } + + /** + * return the iteration count. + * + * @return the iteration count. + */ + public int getIterationCount() + { + return iterationCount; + } + + /** + * generate derived parameters for a key of length keySize. + * + * @param keySize the length, in bits, of the key required. + * @return a parameters object representing a key. + */ + public abstract CipherParameters generateDerivedParameters(int keySize); + + /** + * converts a password to a byte array according to the scheme in + * PKCS5 (UTF-8, no padding) + * + * @param password a character array representing the password. + * @return a byte array representing the password. + */ + public static byte[] PKCS5PasswordToUTF8Bytes( + char[] password) + { + if (password != null) + { + return Strings.toUTF8ByteArray(password); + } + else + { + return new byte[0]; + } + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/PKCS5S2ParametersGenerator.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/PKCS5S2ParametersGenerator.java new file mode 100644 index 0000000..2129f4c --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/PKCS5S2ParametersGenerator.java @@ -0,0 +1,124 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * Generator for PBE derived keys and ivs as defined by PKCS 5 V2.0 Scheme 2. + * This generator uses a SHA-1 HMac as the calculation function. + *

+ * The document this implementation is based on can be found at + * + * RSA's PKCS5 Page + */ +public class PKCS5S2ParametersGenerator + extends PBEParametersGenerator +{ + private Mac hMac; + private byte[] state; + + public PKCS5S2ParametersGenerator(Digest digest) + { + hMac = new HMac(digest); + state = new byte[hMac.getMacSize()]; + } + + private void F( + byte[] S, + int c, + byte[] iBuf, + byte[] out, + int outOff) + { + if (c == 0) + { + throw new IllegalArgumentException("iteration count must be at least 1."); + } + + if (S != null) + { + hMac.update(S, 0, S.length); + } + + hMac.update(iBuf, 0, iBuf.length); + hMac.doFinal(state, 0); + + System.arraycopy(state, 0, out, outOff, state.length); + + for (int count = 1; count < c; count++) + { + hMac.update(state, 0, state.length); + hMac.doFinal(state, 0); + + for (int j = 0; j != state.length; j++) + { + out[outOff + j] ^= state[j]; + } + } + } + + private byte[] generateDerivedKey( + int dkLen) + { + int hLen = hMac.getMacSize(); + int l = (dkLen + hLen - 1) / hLen; + byte[] iBuf = new byte[4]; + byte[] outBytes = new byte[l * hLen]; + int outPos = 0; + + CipherParameters param = new KeyParameter(password); + + hMac.init(param); + + for (int i = 1; i <= l; i++) + { + // Increment the value in 'iBuf' + int pos = 3; + while (++iBuf[pos] == 0) + { + --pos; + } + + F(salt, iterationCount, iBuf, outBytes, outPos); + outPos += hLen; + } + + return outBytes; + } + + /** + * Generate a key parameter derived from the password, salt, and iteration + * count we are currently initialised with. + * + * @param keySize the size of the key we want (in bits) + * @return a KeyParameter object. + */ + public CipherParameters generateDerivedParameters( + int keySize) + { + keySize = keySize / 8; + + byte[] dKey = Arrays.copyOfRange(generateDerivedKey(keySize), 0, keySize); + + return new KeyParameter(dKey, 0, keySize); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Pack.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Pack.java new file mode 100644 index 0000000..caedfa6 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Pack.java @@ -0,0 +1,85 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * Utility methods for converting byte arrays into ints and longs, and back again. + */ +public abstract class Pack +{ + public Pack() { + } + + public static int bigEndianToInt(byte[] bs, int off) + { + int n = bs[ off] << 24; + n |= (bs[++off] & 0xff) << 16; + n |= (bs[++off] & 0xff) << 8; + n |= (bs[++off] & 0xff); + return n; + } + + public static long bigEndianToLong(byte[] bs, int off) + { + int hi = bigEndianToInt(bs, off); + int lo = bigEndianToInt(bs, off + 4); + return ((hi & 0xffffffffL) << 32) | (lo & 0xffffffffL); + } + + public static void longToBigEndian(long n, byte[] bs, int off) + { + intToBigEndian((int)(n >>> 32), bs, off); + intToBigEndian((int)(n & 0xffffffffL), bs, off + 4); + } + + public static byte[] longToBigEndian(long[] ns) + { + byte[] bs = new byte[8 * ns.length]; + longToBigEndian(ns, bs, 0); + return bs; + } + + public static void longToBigEndian(long[] ns, byte[] bs, int off) + { + for (int i = 0; i < ns.length; ++i) + { + longToBigEndian(ns[i], bs, off); + off += 8; + } + } + + public static short littleEndianToShort(byte[] bs, int off) + { + int n = bs[ off] & 0xff; + n |= (bs[++off] & 0xff) << 8; + return (short)n; + } + + public static void intToBigEndian(int n, byte[] bs, int off) + { + bs[ off] = (byte)(n >>> 24); + bs[++off] = (byte)(n >>> 16); + bs[++off] = (byte)(n >>> 8); + bs[++off] = (byte)(n ); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/RuntimeCryptoException.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/RuntimeCryptoException.java new file mode 100644 index 0000000..4430022 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/RuntimeCryptoException.java @@ -0,0 +1,49 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * the foundation class for the exceptions thrown by the crypto packages. + */ +@SuppressWarnings("serial") +public class RuntimeCryptoException + extends RuntimeException +{ + /** + * base constructor. + */ + public RuntimeCryptoException() + { + } + + /** + * create a RuntimeCryptoException with the given message. + * + * @param message the message to be carried with the exception. + */ + public RuntimeCryptoException( + String message) + { + super(message); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/SHA256Digest.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/SHA256Digest.java new file mode 100644 index 0000000..d7d1596 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/SHA256Digest.java @@ -0,0 +1,383 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +/** + * FIPS 180-2 implementation of SHA-256. + * + *

+ *         block  word  digest
+ * SHA-1   512    32    160
+ * SHA-256 512    32    256
+ * SHA-384 1024   64    384
+ * SHA-512 1024   64    512
+ * 
+ */ +@SuppressWarnings("this-escape") +public class SHA256Digest + extends GeneralDigest + implements EncodableDigest +{ + private static final int DIGEST_LENGTH = 32; + + private int H1, H2, H3, H4, H5, H6, H7, H8; + + private int[] X = new int[64]; + private int xOff; + + /** + * Standard constructor + */ + public SHA256Digest() + { + reset(); + } + + /** + * Copy constructor. This will copy the state of the provided + * message digest. + */ + public SHA256Digest(SHA256Digest t) + { + super(t); + + copyIn(t); + } + + private void copyIn(SHA256Digest t) + { + super.copyIn(t); + + H1 = t.H1; + H2 = t.H2; + H3 = t.H3; + H4 = t.H4; + H5 = t.H5; + H6 = t.H6; + H7 = t.H7; + H8 = t.H8; + + System.arraycopy(t.X, 0, X, 0, t.X.length); + xOff = t.xOff; + } + + /** + * State constructor - create a digest initialised with the state of a previous one. + * + * @param encodedState the encoded state from the originating digest. + */ + public SHA256Digest(byte[] encodedState) + { + super(encodedState); + + H1 = Pack.bigEndianToInt(encodedState, 16); + H2 = Pack.bigEndianToInt(encodedState, 20); + H3 = Pack.bigEndianToInt(encodedState, 24); + H4 = Pack.bigEndianToInt(encodedState, 28); + H5 = Pack.bigEndianToInt(encodedState, 32); + H6 = Pack.bigEndianToInt(encodedState, 36); + H7 = Pack.bigEndianToInt(encodedState, 40); + H8 = Pack.bigEndianToInt(encodedState, 44); + + xOff = Pack.bigEndianToInt(encodedState, 48); + for (int i = 0; i != xOff; i++) + { + X[i] = Pack.bigEndianToInt(encodedState, 52 + (i * 4)); + } + } + + + public String getAlgorithmName() + { + return "SHA-256"; + } + + public int getDigestSize() + { + return DIGEST_LENGTH; + } + + protected void processWord( + byte[] in, + int inOff) + { + // Note: Inlined for performance +// X[xOff] = Pack.bigEndianToInt(in, inOff); + int n = in[inOff] << 24; + n |= (in[++inOff] & 0xff) << 16; + n |= (in[++inOff] & 0xff) << 8; + n |= (in[++inOff] & 0xff); + X[xOff] = n; + + if (++xOff == 16) + { + processBlock(); + } + } + + protected void processLength( + long bitLength) + { + if (xOff > 14) + { + processBlock(); + } + + X[14] = (int)(bitLength >>> 32); + X[15] = (int)(bitLength & 0xffffffff); + } + + public int doFinal( + byte[] out, + int outOff) + { + finish(); + + Pack.intToBigEndian(H1, out, outOff); + Pack.intToBigEndian(H2, out, outOff + 4); + Pack.intToBigEndian(H3, out, outOff + 8); + Pack.intToBigEndian(H4, out, outOff + 12); + Pack.intToBigEndian(H5, out, outOff + 16); + Pack.intToBigEndian(H6, out, outOff + 20); + Pack.intToBigEndian(H7, out, outOff + 24); + Pack.intToBigEndian(H8, out, outOff + 28); + + reset(); + + return DIGEST_LENGTH; + } + + /** + * reset the chaining variables + */ + public void reset() + { + super.reset(); + + /* SHA-256 initial hash value + * The first 32 bits of the fractional parts of the square roots + * of the first eight prime numbers + */ + + H1 = 0x6a09e667; + H2 = 0xbb67ae85; + H3 = 0x3c6ef372; + H4 = 0xa54ff53a; + H5 = 0x510e527f; + H6 = 0x9b05688c; + H7 = 0x1f83d9ab; + H8 = 0x5be0cd19; + + xOff = 0; + for (int i = 0; i != X.length; i++) + { + X[i] = 0; + } + } + + protected void processBlock() + { + // + // expand 16 word block into 64 word blocks. + // + for (int t = 16; t <= 63; t++) + { + X[t] = Theta1(X[t - 2]) + X[t - 7] + Theta0(X[t - 15]) + X[t - 16]; + } + + // + // set up working variables. + // + int a = H1; + int b = H2; + int c = H3; + int d = H4; + int e = H5; + int f = H6; + int g = H7; + int h = H8; + + int t = 0; + for(int i = 0; i < 8; i ++) + { + // t = 8 * i + h += Sum1(e) + Ch(e, f, g) + K[t] + X[t]; + d += h; + h += Sum0(a) + Maj(a, b, c); + ++t; + + // t = 8 * i + 1 + g += Sum1(d) + Ch(d, e, f) + K[t] + X[t]; + c += g; + g += Sum0(h) + Maj(h, a, b); + ++t; + + // t = 8 * i + 2 + f += Sum1(c) + Ch(c, d, e) + K[t] + X[t]; + b += f; + f += Sum0(g) + Maj(g, h, a); + ++t; + + // t = 8 * i + 3 + e += Sum1(b) + Ch(b, c, d) + K[t] + X[t]; + a += e; + e += Sum0(f) + Maj(f, g, h); + ++t; + + // t = 8 * i + 4 + d += Sum1(a) + Ch(a, b, c) + K[t] + X[t]; + h += d; + d += Sum0(e) + Maj(e, f, g); + ++t; + + // t = 8 * i + 5 + c += Sum1(h) + Ch(h, a, b) + K[t] + X[t]; + g += c; + c += Sum0(d) + Maj(d, e, f); + ++t; + + // t = 8 * i + 6 + b += Sum1(g) + Ch(g, h, a) + K[t] + X[t]; + f += b; + b += Sum0(c) + Maj(c, d, e); + ++t; + + // t = 8 * i + 7 + a += Sum1(f) + Ch(f, g, h) + K[t] + X[t]; + e += a; + a += Sum0(b) + Maj(b, c, d); + ++t; + } + + H1 += a; + H2 += b; + H3 += c; + H4 += d; + H5 += e; + H6 += f; + H7 += g; + H8 += h; + + // + // reset the offset and clean out the word buffer. + // + xOff = 0; + for (int i = 0; i < 16; i++) + { + X[i] = 0; + } + } + + /* SHA-256 functions */ + private int Ch( + int x, + int y, + int z) + { + return (x & y) ^ ((~x) & z); + } + + private int Maj( + int x, + int y, + int z) + { + return (x & y) ^ (x & z) ^ (y & z); + } + + private int Sum0( + int x) + { + return ((x >>> 2) | (x << 30)) ^ ((x >>> 13) | (x << 19)) ^ ((x >>> 22) | (x << 10)); + } + + private int Sum1( + int x) + { + return ((x >>> 6) | (x << 26)) ^ ((x >>> 11) | (x << 21)) ^ ((x >>> 25) | (x << 7)); + } + + private int Theta0( + int x) + { + return ((x >>> 7) | (x << 25)) ^ ((x >>> 18) | (x << 14)) ^ (x >>> 3); + } + + private int Theta1( + int x) + { + return ((x >>> 17) | (x << 15)) ^ ((x >>> 19) | (x << 13)) ^ (x >>> 10); + } + + /* SHA-256 Constants + * (represent the first 32 bits of the fractional parts of the + * cube roots of the first sixty-four prime numbers) + */ + static final int K[] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 + }; + + public Memoable copy() + { + return new SHA256Digest(this); + } + + public void reset(Memoable other) + { + SHA256Digest d = (SHA256Digest)other; + + copyIn(d); + } + + public byte[] getEncodedState() + { + byte[] state = new byte[52 + xOff * 4]; + + super.populateState(state); + + Pack.intToBigEndian(H1, state, 16); + Pack.intToBigEndian(H2, state, 20); + Pack.intToBigEndian(H3, state, 24); + Pack.intToBigEndian(H4, state, 28); + Pack.intToBigEndian(H5, state, 32); + Pack.intToBigEndian(H6, state, 36); + Pack.intToBigEndian(H7, state, 40); + Pack.intToBigEndian(H8, state, 44); + Pack.intToBigEndian(xOff, state, 48); + + for (int i = 0; i != xOff; i++) + { + Pack.intToBigEndian(X[i], state, 52 + (i * 4)); + } + + return state; + } +} + diff --git a/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Strings.java b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Strings.java new file mode 100644 index 0000000..34aefbe --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/bouncycastle/pbkdf2/Strings.java @@ -0,0 +1,136 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.bouncycastle.pbkdf2; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +/** + * String utilities. + */ +public final class Strings +{ + + public Strings() { + } + + public static byte[] toUTF8ByteArray(char[] string) + { + ByteArrayOutputStream bOut = new ByteArrayOutputStream(); + + try + { + toUTF8ByteArray(string, bOut); + } + catch (IOException e) + { + throw new IllegalStateException("cannot encode string to byte array!"); + } + + return bOut.toByteArray(); + } + + public static void toUTF8ByteArray(char[] string, OutputStream sOut) + throws IOException + { + char[] c = string; + int i = 0; + + while (i < c.length) + { + char ch = c[i]; + + if (ch < 0x0080) + { + sOut.write(ch); + } + else if (ch < 0x0800) + { + sOut.write(0xc0 | (ch >> 6)); + sOut.write(0x80 | (ch & 0x3f)); + } + // surrogate pair + else if (ch >= 0xD800 && ch <= 0xDFFF) + { + // in error - can only happen, if the Java String class has a + // bug. + if (i + 1 >= c.length) + { + throw new IllegalStateException("invalid UTF-16 codepoint"); + } + char W1 = ch; + ch = c[++i]; + char W2 = ch; + // in error - can only happen, if the Java String class has a + // bug. + if (W1 > 0xDBFF) + { + throw new IllegalStateException("invalid UTF-16 codepoint"); + } + int codePoint = (((W1 & 0x03FF) << 10) | (W2 & 0x03FF)) + 0x10000; + sOut.write(0xf0 | (codePoint >> 18)); + sOut.write(0x80 | ((codePoint >> 12) & 0x3F)); + sOut.write(0x80 | ((codePoint >> 6) & 0x3F)); + sOut.write(0x80 | (codePoint & 0x3F)); + } + else + { + sOut.write(0xe0 | (ch >> 12)); + sOut.write(0x80 | ((ch >> 6) & 0x3F)); + sOut.write(0x80 | (ch & 0x3F)); + } + + i++; + } + } + + /** + * Convert an array of 8 bit characters into a string. + * + * @param bytes 8 bit characters. + * @return resulting String. + */ + public static String fromByteArray(byte[] bytes) + { + return new String(asCharArray(bytes)); + } + + /** + * Do a simple conversion of an array of 8 bit characters into a string. + * + * @param bytes 8 bit characters. + * @return resulting String. + */ + public static char[] asCharArray(byte[] bytes) + { + char[] chars = new char[bytes.length]; + + for (int i = 0; i != chars.length; i++) + { + chars[i] = (char)(bytes[i] & 0xff); + } + + return chars; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/exception/ScramException.java b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramException.java new file mode 100644 index 0000000..88b21d2 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramException.java @@ -0,0 +1,53 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.exception; + + +import javax.security.sasl.SaslException; + + +/** + * This class represents an error when using SCRAM, which is a SASL method. + * + * {@link SaslException} + */ +@SuppressWarnings("serial") +public class ScramException extends SaslException { + /** + * Constructs a new instance of ScramException with a detailed message. + * @param detail A String containing details about the exception + */ + public ScramException(String detail) { + super(detail); + } + + /** + * Constructs a new instance of ScramException with a detailed message and a root cause. + * @param detail A String containing details about the exception + * @param ex The root exception + */ + public ScramException(String detail, Throwable ex) { + super(detail, ex); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/exception/ScramInvalidServerSignatureException.java b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramInvalidServerSignatureException.java new file mode 100644 index 0000000..af5c133 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramInvalidServerSignatureException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.exception; + + +/** + * This class represents an error when parsing SCRAM messages + */ +@SuppressWarnings("serial") +public class ScramInvalidServerSignatureException extends ScramException { + /** + * Constructs a new instance of ScramInvalidServerSignatureException with a detailed message. + * @param detail A String containing details about the exception + */ + public ScramInvalidServerSignatureException(String detail) { + super(detail); + } + + /** + * Constructs a new instance of ScramInvalidServerSignatureException with a detailed message and a root cause. + * @param detail A String containing details about the exception + * @param ex The root exception + */ + public ScramInvalidServerSignatureException(String detail, Throwable ex) { + super(detail, ex); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/exception/ScramParseException.java b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramParseException.java new file mode 100644 index 0000000..5b47c21 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramParseException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.exception; + + +/** + * This class represents an error when parsing SCRAM messages + */ +@SuppressWarnings("serial") +public class ScramParseException extends ScramException { + /** + * Constructs a new instance of ScramParseException with a detailed message. + * @param detail A String containing details about the exception + */ + public ScramParseException(String detail) { + super(detail); + } + + /** + * Constructs a new instance of ScramParseException with a detailed message and a root cause. + * @param detail A String containing details about the exception + * @param ex The root exception + */ + public ScramParseException(String detail, Throwable ex) { + super(detail, ex); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/exception/ScramServerErrorException.java b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramServerErrorException.java new file mode 100644 index 0000000..fae45e0 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/exception/ScramServerErrorException.java @@ -0,0 +1,63 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.exception; + + +import com.ongres.scram.common.message.ServerFinalMessage; + + +/** + * This class represents an error when parsing SCRAM messages + */ +@SuppressWarnings("serial") +public class ScramServerErrorException extends ScramException { + private final ServerFinalMessage.Error error; + + private static String toString(ServerFinalMessage.Error error) { + return "Server-final-message is an error message. Error: " + error.getErrorMessage(); + } + + /** + * Constructs a new instance of ScramServerErrorException with a detailed message. + * @param error The SCRAM error in the message + */ + public ScramServerErrorException(ServerFinalMessage.Error error) { + super(toString(error)); + this.error = error; + } + + /** + * Constructs a new instance of ScramServerErrorException with a detailed message and a root cause. + * @param error The SCRAM error in the message + * @param ex The root exception + */ + public ScramServerErrorException(ServerFinalMessage.Error error, Throwable ex) { + super(toString(error), ex); + this.error = error; + } + + public ServerFinalMessage.Error getError() { + return error; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2AttributeValue.java b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2AttributeValue.java new file mode 100644 index 0000000..3869738 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2AttributeValue.java @@ -0,0 +1,61 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.gssapi; + + +import com.ongres.scram.common.util.AbstractCharAttributeValue; + +/** + * Parse and write GS2 Attribute-Value pairs. + */ +public class Gs2AttributeValue extends AbstractCharAttributeValue { + public Gs2AttributeValue(Gs2Attributes attribute, String value) { + super(attribute, value); + } + + public static StringBuffer writeTo(StringBuffer sb, Gs2Attributes attribute, String value) { + return new Gs2AttributeValue(attribute, value).writeTo(sb); + } + + /** + * Parses a potential Gs2AttributeValue String. + * @param value The string that contains the Attribute-Value pair (where value is optional). + * @return The parsed class, or null if the String was null. + * @throws IllegalArgumentException If the String is an invalid Gs2AttributeValue + */ + public static Gs2AttributeValue parse(String value) throws IllegalArgumentException { + if(null == value) { + return null; + } + + if(value.length() < 1 || value.length() == 2 || (value.length() > 2 && value.charAt(1) != '=')) { + throw new IllegalArgumentException("Invalid Gs2AttributeValue"); + } + + return new Gs2AttributeValue( + Gs2Attributes.byChar(value.charAt(0)), + value.length() > 2 ? value.substring(2) : null + ); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2Attributes.java b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2Attributes.java new file mode 100644 index 0000000..d101e73 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2Attributes.java @@ -0,0 +1,83 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.gssapi; + + +import com.ongres.scram.common.ScramAttributes; +import com.ongres.scram.common.util.CharAttribute; + + +/** + * Possible values of a GS2 Attribute. + * + * @see [RFC5802] Formal Syntax + */ +public enum Gs2Attributes implements CharAttribute { + /** + * Channel binding attribute. Client doesn't support channel binding. + */ + CLIENT_NOT(Gs2CbindFlag.CLIENT_NOT.getChar()), + + /** + * Channel binding attribute. Client does support channel binding but thinks the server does not. + */ + CLIENT_YES_SERVER_NOT(Gs2CbindFlag.CLIENT_YES_SERVER_NOT.getChar()), + + /** + * Channel binding attribute. Client requires channel binding. The selected channel binding follows "p=". + */ + CHANNEL_BINDING_REQUIRED(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED.getChar()), + + /** + * SCRAM attribute. This attribute specifies an authorization identity. + */ + AUTHZID(ScramAttributes.AUTHZID.getChar()) + ; + + private final char flag; + + Gs2Attributes(char flag) { + this.flag = flag; + } + + @Override + public char getChar() { + return flag; + } + + public static Gs2Attributes byChar(char c) { + switch(c) { + case 'n': return CLIENT_NOT; + case 'y': return CLIENT_YES_SERVER_NOT; + case 'p': return CHANNEL_BINDING_REQUIRED; + case 'a': return AUTHZID; + } + + throw new IllegalArgumentException("Invalid GS2Attribute character '" + c + "'"); + } + + public static Gs2Attributes byGS2CbindFlag(Gs2CbindFlag cbindFlag) { + return byChar(cbindFlag.getChar()); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2CbindFlag.java b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2CbindFlag.java new file mode 100644 index 0000000..e2025fa --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2CbindFlag.java @@ -0,0 +1,73 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.gssapi; + + +import com.ongres.scram.common.util.CharAttribute; + + +/** + * Possible values of a GS2 Cbind Flag (channel binding; part of GS2 header). + * These values are sent by the client, and so are interpreted from this perspective. + * + * @see [RFC5802] Formal Syntax + */ +public enum Gs2CbindFlag implements CharAttribute { + /** + * Client doesn't support channel binding. + */ + CLIENT_NOT('n'), + + /** + * Client does support channel binding but thinks the server does not. + */ + CLIENT_YES_SERVER_NOT('y'), + + /** + * Client requires channel binding. The selected channel binding follows "p=". + */ + CHANNEL_BINDING_REQUIRED('p') + ; + + private final char flag; + + Gs2CbindFlag(char flag) { + this.flag = flag; + } + + @Override + public char getChar() { + return flag; + } + + public static Gs2CbindFlag byChar(char c) { + switch(c) { + case 'n': return CLIENT_NOT; + case 'y': return CLIENT_YES_SERVER_NOT; + case 'p': return CHANNEL_BINDING_REQUIRED; + } + + throw new IllegalArgumentException("Invalid Gs2CbindFlag character '" + c + "'"); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2Header.java b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2Header.java new file mode 100644 index 0000000..b794967 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/gssapi/Gs2Header.java @@ -0,0 +1,132 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.gssapi; + + +import com.ongres.scram.common.util.StringWritableCsv; +import com.ongres.scram.common.ScramStringFormatting; +import com.ongres.scram.common.util.AbstractStringWritable; + +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * GSS Header. Format: + * + * {@code + * gs2-header = gs2-cbind-flag "," [ authzid ] "," + * gs2-cbind-flag = ("p=" cb-name) / "n" / "y" + * authzid = "a=" saslname + * } + * + * Current implementation does not support channel binding. + * If p is used as the cbind flag, the cb-name value is not validated. + * + * @see [RFC5802] Formal Syntax + */ +public class Gs2Header extends AbstractStringWritable { + private final Gs2AttributeValue cbind; + private final Gs2AttributeValue authzid; + + /** + * Construct and validates a Gs2Header. + * Only provide the channel binding name if the channel binding flag is set to required. + * @param cbindFlag The channel binding flag + * @param cbName The channel-binding name. Should be not null iif channel binding is required + * @param authzid The optional SASL authorization identity + * @throws IllegalArgumentException If the channel binding flag and argument are invalid + */ + public Gs2Header(Gs2CbindFlag cbindFlag, String cbName, String authzid) throws IllegalArgumentException { + checkNotNull(cbindFlag, "cbindFlag"); + if(cbindFlag == Gs2CbindFlag.CHANNEL_BINDING_REQUIRED ^ cbName != null) { + throw new IllegalArgumentException("Specify channel binding flag and value together, or none"); + } + // TODO: cbName is not being properly validated + cbind = new Gs2AttributeValue(Gs2Attributes.byGS2CbindFlag(cbindFlag), cbName); + + this.authzid = authzid == null ? + null : new Gs2AttributeValue(Gs2Attributes.AUTHZID, ScramStringFormatting.toSaslName(authzid)) + ; + } + + /** + * Construct and validates a Gs2Header with no authzid. + * Only provide the channel binding name if the channel binding flag is set to required. + * @param cbindFlag The channel binding flag + * @param cbName The channel-binding name. Should be not null iif channel binding is required + * @throws IllegalArgumentException If the channel binding flag and argument are invalid + */ + public Gs2Header(Gs2CbindFlag cbindFlag, String cbName) throws IllegalArgumentException { + this(cbindFlag, cbName, null); + } + + /** + * Construct and validates a Gs2Header with no authzid nor channel binding. + * @param cbindFlag The channel binding flag + * @throws IllegalArgumentException If the channel binding is supported (no cbname can be provided here) + */ + public Gs2Header(Gs2CbindFlag cbindFlag) { + this(cbindFlag, null, null); + } + + public Gs2CbindFlag getChannelBindingFlag() { + return Gs2CbindFlag.byChar(cbind.getChar()); + } + + public String getChannelBindingName() { + return cbind.getValue(); + } + + public String getAuthzid() { + return authzid != null ? authzid.getValue() : null; + } + + @Override + public StringBuffer writeTo(StringBuffer sb) { + return StringWritableCsv.writeTo(sb, cbind, authzid); + } + + /** + * Read a Gs2Header from a String. String may contain trailing fields that will be ignored. + * @param message The String containing the Gs2Header + * @return The parsed Gs2Header object + * @throws IllegalArgumentException If the format/values of the String do not conform to a Gs2Header + */ + public static Gs2Header parseFrom(String message) throws IllegalArgumentException { + checkNotNull(message, "Null message"); + + String[] gs2HeaderSplit = StringWritableCsv.parseFrom(message, 2); + if(gs2HeaderSplit.length == 0) { + throw new IllegalArgumentException("Invalid number of fields for the GS2 Header"); + } + + Gs2AttributeValue gs2cbind = Gs2AttributeValue.parse(gs2HeaderSplit[0]); + return new Gs2Header( + Gs2CbindFlag.byChar(gs2cbind.getChar()), + gs2cbind.getValue(), + gs2HeaderSplit[1] == null || gs2HeaderSplit[1].isEmpty() ? + null : Gs2AttributeValue.parse(gs2HeaderSplit[1]).getValue() + ); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/message/ClientFinalMessage.java b/scram-common/src/main/java/com/ongres/scram/common/message/ClientFinalMessage.java new file mode 100644 index 0000000..a47ebe4 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/message/ClientFinalMessage.java @@ -0,0 +1,136 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import com.ongres.scram.common.ScramAttributeValue; +import com.ongres.scram.common.ScramAttributes; +import com.ongres.scram.common.ScramStringFormatting; +import com.ongres.scram.common.gssapi.Gs2Header; +import com.ongres.scram.common.util.StringWritable; +import com.ongres.scram.common.util.StringWritableCsv; + +import static com.ongres.scram.common.util.Preconditions.checkNotEmpty; +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * Constructs and parses client-final-messages. Formal syntax is: + * + * {@code + * client-final-message-without-proof = channel-binding "," nonce ["," extensions] + * client-final-message = client-final-message-without-proof "," proof + * } + * + * Note that extensions are not supported. + * + * @see [RFC5802] Section 7 + */ +public class ClientFinalMessage implements StringWritable { + private final String cbind; + private final String nonce; + private final byte[] proof; + + private static String generateCBind(Gs2Header gs2Header, byte[] cbindData) { + StringBuffer sb = new StringBuffer(); + gs2Header.writeTo(sb) + .append(','); + + if(null != cbindData) { + new ScramAttributeValue( + ScramAttributes.CHANNEL_BINDING, + ScramStringFormatting.base64Encode(cbindData) + ).writeTo(sb); + } + + return sb.toString(); + } + + /** + * Constructus a client-final-message with the provided gs2Header (the same one used in the client-first-message), + * optionally the channel binding data, and the nonce. + * This method is intended to be used by SCRAM clients, and not to be constructed directly. + * @param gs2Header The GSS-API header + * @param cbindData If using channel binding, the channel binding data + * @param nonce The nonce + * @param proof The bytes representing the computed client proof + */ + public ClientFinalMessage(Gs2Header gs2Header, byte[] cbindData, String nonce, byte[] proof) { + this.cbind = generateCBind( + checkNotNull(gs2Header, "gs2Header"), + cbindData + ); + this.nonce = checkNotEmpty(nonce, "nonce"); + this.proof = checkNotNull(proof, "proof"); + } + + private static StringBuffer writeToWithoutProof(StringBuffer sb, String cbind, String nonce) { + return StringWritableCsv.writeTo( + sb, + new ScramAttributeValue(ScramAttributes.CHANNEL_BINDING, ScramStringFormatting.base64Encode(cbind)), + new ScramAttributeValue(ScramAttributes.NONCE, nonce) + ); + } + + private static StringBuffer writeToWithoutProof( + StringBuffer sb, Gs2Header gs2Header, byte[] cbindData, String nonce + ) { + return writeToWithoutProof( + sb, + generateCBind( + checkNotNull(gs2Header, "gs2Header"), + cbindData + ), + nonce + ); + } + + /** + * Returns a StringBuffer filled in with the formatted output of a client-first-message without the proof value. + * This is useful for computing the auth-message, used in turn to compute the proof. + * @param gs2Header The GSS-API header + * @param cbindData The optional channel binding data + * @param nonce The nonce + * @return The String representation of the part of the message that excludes the proof + */ + public static StringBuffer writeToWithoutProof(Gs2Header gs2Header, byte[] cbindData, String nonce) { + return writeToWithoutProof(new StringBuffer(), gs2Header, cbindData, nonce); + } + + @Override + public StringBuffer writeTo(StringBuffer sb) { + writeToWithoutProof(sb, cbind, nonce); + + return StringWritableCsv.writeTo( + sb, + null, // This marks the position of writeToWithoutProof, required for the "," + new ScramAttributeValue(ScramAttributes.CLIENT_PROOF, ScramStringFormatting.base64Encode(proof)) + ); + } + + @Override + public String toString() { + return writeTo(new StringBuffer()).toString(); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/message/ClientFirstMessage.java b/scram-common/src/main/java/com/ongres/scram/common/message/ClientFirstMessage.java new file mode 100644 index 0000000..b2e9291 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/message/ClientFirstMessage.java @@ -0,0 +1,200 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import com.ongres.scram.common.ScramAttributeValue; +import com.ongres.scram.common.ScramAttributes; +import com.ongres.scram.common.ScramStringFormatting; +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.gssapi.Gs2CbindFlag; +import com.ongres.scram.common.gssapi.Gs2Header; +import com.ongres.scram.common.util.StringWritable; +import com.ongres.scram.common.util.StringWritableCsv; + +import static com.ongres.scram.common.util.Preconditions.checkNotEmpty; +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * Constructs and parses client-first-messages. + * Message contains a {@link Gs2Header}, a username and a nonce. Formal syntax is: + * + * {@code + * client-first-message-bare = [reserved-mext ","] username "," nonce ["," extensions] + client-first-message = gs2-header client-first-message-bare + * } + * + * Note that extensions are not supported. + * + * @see [RFC5802] Section 7 + */ +public class ClientFirstMessage implements StringWritable { + private final Gs2Header gs2Header; + private final String user; + private final String nonce; + + /** + * Constructs a client-first-message for the given user, nonce and gs2Header. + * This constructor is intended to be instantiated by a scram client, and not directly. + * The client should be providing the header, and nonce (and probably the user too). + * @param gs2Header The GSS-API header + * @param user The SCRAM user + * @param nonce The nonce for this session + * @throws IllegalArgumentException If any of the arguments is null or empty + */ + public ClientFirstMessage(Gs2Header gs2Header, String user, String nonce) throws IllegalArgumentException { + this.gs2Header = checkNotNull(gs2Header, "gs2Header"); + this.user = checkNotEmpty(user, "user"); + this.nonce = checkNotEmpty(nonce, "nonce"); + } + + private static Gs2Header gs2Header(Gs2CbindFlag gs2CbindFlag, String authzid, String cbindName) { + checkNotNull(gs2CbindFlag, "gs2CbindFlag"); + if(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED == gs2CbindFlag && null == cbindName) { + throw new IllegalArgumentException("Channel binding name is required if channel binding is specified"); + } + + return new Gs2Header(gs2CbindFlag, cbindName, authzid); + } + + /** + * Constructs a client-first-message for the given parameters. + * Under normal operation, this constructor is intended to be instantiated by a scram client, and not directly. + * However, this constructor is more user- or test-friendly, as the arguments are easier to provide without + * building other indirect object parameters. + * @param gs2CbindFlag The channel-binding flag + * @param authzid The optional authzid + * @param cbindName The optional channel binding name + * @param user The SCRAM user + * @param nonce The nonce for this session + * @throws IllegalArgumentException If the flag, user or nonce are null or empty + */ + public ClientFirstMessage(Gs2CbindFlag gs2CbindFlag, String authzid, String cbindName, String user, String nonce) { + this(gs2Header(gs2CbindFlag, authzid, cbindName), user, nonce); + } + + /** + * Constructs a client-first-message for the given parameters, with no channel binding nor authzid. + * Under normal operation, this constructor is intended to be instantiated by a scram client, and not directly. + * However, this constructor is more user- or test-friendly, as the arguments are easier to provide without + * building other indirect object parameters. + * @param user The SCRAM user + * @param nonce The nonce for this session + * @throws IllegalArgumentException If the user or nonce are null or empty + */ + public ClientFirstMessage(String user, String nonce) { + this(gs2Header(Gs2CbindFlag.CLIENT_NOT, null, null), user, nonce); + } + + public Gs2CbindFlag getChannelBindingFlag() { + return gs2Header.getChannelBindingFlag(); + } + + public boolean isChannelBinding() { + return gs2Header.getChannelBindingFlag() == Gs2CbindFlag.CHANNEL_BINDING_REQUIRED; + } + + public String getChannelBindingName() { + return gs2Header.getChannelBindingName(); + } + + public String getAuthzid() { + return gs2Header.getAuthzid(); + } + + public Gs2Header getGs2Header() { + return gs2Header; + } + + public String getUser() { + return user; + } + + public String getNonce() { + return nonce; + } + + /** + * Limited version of the {@link StringWritableCsv#toString()} method, that doesn't write the GS2 header. + * This method is useful to construct the auth message used as part of the SCRAM algorithm. + * @param sb A StringBuffer where to write the data to. + * @return The same StringBuffer + */ + public StringBuffer writeToWithoutGs2Header(StringBuffer sb) { + return StringWritableCsv.writeTo( + sb, + new ScramAttributeValue(ScramAttributes.USERNAME, ScramStringFormatting.toSaslName(user)), + new ScramAttributeValue(ScramAttributes.NONCE, nonce) + ); + } + + @Override + public StringBuffer writeTo(StringBuffer sb) { + StringWritableCsv.writeTo( + sb, + gs2Header, + null // This marks the position of the rest of the elements, required for the "," + ); + + return writeToWithoutGs2Header(sb); + } + + /** + * Construct a {@link ClientFirstMessage} instance from a message (String) + * @param clientFirstMessage The String representing the client-first-message + * @return The instance + * @throws ScramParseException If the message is not a valid client-first-message + * @throws IllegalArgumentException If the message is null or empty + */ + public static ClientFirstMessage parseFrom(String clientFirstMessage) + throws ScramParseException, IllegalArgumentException { + checkNotEmpty(clientFirstMessage, "clientFirstMessage"); + + Gs2Header gs2Header = Gs2Header.parseFrom(clientFirstMessage); // Takes first two fields + String[] userNonceString; + try { + userNonceString = StringWritableCsv.parseFrom(clientFirstMessage, 2, 2); + } catch (IllegalArgumentException e) { + throw new ScramParseException("Illegal series of attributes in client-first-message", e); + } + + ScramAttributeValue user = ScramAttributeValue.parse(userNonceString[0]); + if(ScramAttributes.USERNAME.getChar() != user.getChar()) { + throw new ScramParseException("user must be the 3rd element of the client-first-message"); + } + + ScramAttributeValue nonce = ScramAttributeValue.parse(userNonceString[1]); + if(ScramAttributes.NONCE.getChar() != nonce.getChar()) { + throw new ScramParseException("nonce must be the 4th element of the client-first-message"); + } + + return new ClientFirstMessage(gs2Header, user.getValue(), nonce.getValue()); + } + + @Override + public String toString() { + return writeTo(new StringBuffer()).toString(); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/message/ServerFinalMessage.java b/scram-common/src/main/java/com/ongres/scram/common/message/ServerFinalMessage.java new file mode 100644 index 0000000..0020f8a --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/message/ServerFinalMessage.java @@ -0,0 +1,215 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import static com.ongres.scram.common.util.Preconditions.checkNotEmpty; +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + +import java.util.HashMap; +import java.util.Map; + +import com.ongres.scram.common.ScramAttributeValue; +import com.ongres.scram.common.ScramAttributes; +import com.ongres.scram.common.ScramStringFormatting; +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.util.StringWritable; +import com.ongres.scram.common.util.StringWritableCsv; + +/** + * Constructs and parses server-final-messages. Formal syntax is: + * + * {@code + * server-error = "e=" server-error-value + * + * server-error-value = "invalid-encoding" / + * "extensions-not-supported" / ; unrecognized 'm' value + * "invalid-proof" / + * "channel-bindings-dont-match" / + * "server-does-support-channel-binding" / + * ; server does not support channel binding + * "channel-binding-not-supported" / + * "unsupported-channel-binding-type" / + * "unknown-user" / + * "invalid-username-encoding" / + * ; invalid username encoding (invalid UTF-8 or + * ; SASLprep failed) + * "no-resources" / + * "other-error" / + * server-error-value-ext + * ; Unrecognized errors should be treated as "other-error". + * ; In order to prevent information disclosure, the server + * ; may substitute the real reason with "other-error". + * + * server-error-value-ext = value + * ; Additional error reasons added by extensions + * ; to this document. + * + * verifier = "v=" base64 + * ;; base-64 encoded ServerSignature. + * + * server-final-errorMessage = (server-error / verifier) + * ["," extensions] + * } + * + * Note that extensions are not supported (and, consequently, error message extensions). + * + * @see [RFC5802] Section 7 + */ +public class ServerFinalMessage implements StringWritable { + + /** + * Possible error messages sent on a server-final-message. + */ + public enum Error { + INVALID_ENCODING("invalid-encoding"), + EXTENSIONS_NOT_SUPPORTED("extensions-not-supported"), + INVALID_PROOF("invalid-proof"), + CHANNEL_BINDINGS_DONT_MATCH("channel-bindings-dont-match"), + SERVER_DOES_SUPPORT_CHANNEL_BINDING("server-does-support-channel-binding"), + CHANNEL_BINDING_NOT_SUPPORTED("channel-binding-not-supported"), + UNSUPPORTED_CHANNEL_BINDING_TYPE("unsupported-channel-binding-type"), + UNKNOWN_USER("unknown-user"), + INVALID_USERNAME_ENCODING("invalid-username-encoding"), + NO_RESOURCES("no-resources"), + OTHER_ERROR("other-error") + ; + + private static final Map BY_NAME_MAPPING = valuesAsMap(); + + private final String errorMessage; + + Error(String errorMessage) { + this.errorMessage = errorMessage; + } + + public String getErrorMessage() { + return errorMessage; + } + + public static Error getByErrorMessage(String errorMessage) throws IllegalArgumentException { + checkNotEmpty(errorMessage, "errorMessage"); + + if(! BY_NAME_MAPPING.containsKey(errorMessage)) { + throw new IllegalArgumentException("Invalid error message '" + errorMessage + "'"); + } + + return BY_NAME_MAPPING.get(errorMessage); + } + + private static Map valuesAsMap() { + Map map = new HashMap<>(values().length); + for (Error error : values()) { + map.put(error.errorMessage, error); + } + return map; + } + + } + + private final byte[] verifier; + private final Error error; + + /** + * Constructs a server-final-message with no errors, and the provided server verifier + * @param verifier The bytes of the computed signature + * @throws IllegalArgumentException If the verifier is null + */ + public ServerFinalMessage(byte[] verifier) throws IllegalArgumentException { + this.verifier = checkNotNull(verifier, "verifier"); + this.error = null; + } + + /** + * Constructs a server-final-message which represents a SCRAM error. + * @param error The error + * @throws IllegalArgumentException If the error is null + */ + public ServerFinalMessage(Error error) throws IllegalArgumentException { + this.error = checkNotNull(error, "error"); + this.verifier = null; + } + + /** + * Whether this server-final-message contains an error + * @return True if it contains an error, false if it contains a verifier + */ + public boolean isError() { + return null != error; + } + + public byte[] getVerifier() { + return verifier; + } + + public Error getError() { + return error; + } + + @Override + public StringBuffer writeTo(StringBuffer sb) { + return StringWritableCsv.writeTo( + sb, + isError() ? + new ScramAttributeValue(ScramAttributes.ERROR, error.errorMessage) + : new ScramAttributeValue( + ScramAttributes.SERVER_SIGNATURE, ScramStringFormatting.base64Encode(verifier) + ) + ); + } + + /** + * Parses a server-final-message from a String. + * @param serverFinalMessage The message + * @return A constructed server-final-message instance + * @throws ScramParseException If the argument is not a valid server-final-message + * @throws IllegalArgumentException If the message is null or empty + */ + public static ServerFinalMessage parseFrom(String serverFinalMessage) + throws ScramParseException, IllegalArgumentException { + checkNotEmpty(serverFinalMessage, "serverFinalMessage"); + + String[] attributeValues = StringWritableCsv.parseFrom(serverFinalMessage, 1, 0); + if(attributeValues == null || attributeValues.length != 1) { + throw new ScramParseException("Invalid server-final-message"); + } + + ScramAttributeValue attributeValue = ScramAttributeValue.parse(attributeValues[0]); + if(ScramAttributes.SERVER_SIGNATURE.getChar() == attributeValue.getChar()) { + byte[] verifier = ScramStringFormatting.base64Decode(attributeValue.getValue()); + return new ServerFinalMessage(verifier); + } else if(ScramAttributes.ERROR.getChar() == attributeValue.getChar()) { + return new ServerFinalMessage(Error.getByErrorMessage(attributeValue.getValue())); + } else { + throw new ScramParseException( + "Invalid server-final-message: it must contain either a verifier or an error attribute" + ); + } + } + + @Override + public String toString() { + return writeTo(new StringBuffer()).toString(); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/message/ServerFirstMessage.java b/scram-common/src/main/java/com/ongres/scram/common/message/ServerFirstMessage.java new file mode 100644 index 0000000..08ba9dd --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/message/ServerFirstMessage.java @@ -0,0 +1,161 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import com.ongres.scram.common.ScramAttributeValue; +import com.ongres.scram.common.ScramAttributes; +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.util.StringWritable; +import com.ongres.scram.common.util.StringWritableCsv; + +import static com.ongres.scram.common.util.Preconditions.checkArgument; +import static com.ongres.scram.common.util.Preconditions.checkNotEmpty; + + +/** + * Constructs and parses server-first-messages. Formal syntax is: + * + * {@code + * server-first-message = [reserved-mext ","] nonce "," salt "," + * iteration-count ["," extensions] + * } + * + * Note that extensions are not supported. + * + * @see [RFC5802] Section 7 + */ +public class ServerFirstMessage implements StringWritable { + /** + * Minimum allowed value for the iteration, as per the RFC. + */ + public static final int ITERATION_MIN_VALUE = 4096; + + private final String clientNonce; + private final String serverNonce; + private final String salt; + private final int iteration; + + /** + * Constructs a server-first-message from a client-first-message and the additional required data. + * @param clientNonce String representing the client-first-message + * @param serverNonce Server serverNonce + * @param salt The salt + * @param iteration The iteration count (must be <= 4096) + * @throws IllegalArgumentException If clientFirstMessage, serverNonce or salt are null or empty, + * or iteration < 4096 + */ + public ServerFirstMessage( + String clientNonce, String serverNonce, String salt, int iteration + ) throws IllegalArgumentException { + this.clientNonce = checkNotEmpty(clientNonce, "clientNonce"); + this.serverNonce = checkNotEmpty(serverNonce, "serverNonce"); + this.salt = checkNotEmpty(salt, "salt"); + checkArgument(iteration >= ITERATION_MIN_VALUE, "iteration must be >= " + ITERATION_MIN_VALUE); + this.iteration = iteration; + } + + public String getClientNonce() { + return clientNonce; + } + + public String getServerNonce() { + return serverNonce; + } + + public String getNonce() { + return clientNonce + serverNonce; + } + + public String getSalt() { + return salt; + } + + public int getIteration() { + return iteration; + } + + @Override + public StringBuffer writeTo(StringBuffer sb) { + return StringWritableCsv.writeTo( + sb, + new ScramAttributeValue(ScramAttributes.NONCE, getNonce()), + new ScramAttributeValue(ScramAttributes.SALT, salt), + new ScramAttributeValue(ScramAttributes.ITERATION, iteration + "") + ); + } + + /** + * Parses a server-first-message from a String. + * @param serverFirstMessage The string representing the server-first-message + * @param clientNonce The serverNonce that is present in the client-first-message + * @return The parsed instance + * @throws ScramParseException If the argument is not a valid server-first-message + * @throws IllegalArgumentException If either argument is empty or serverFirstMessage is not a valid message + */ + public static ServerFirstMessage parseFrom(String serverFirstMessage, String clientNonce) + throws ScramParseException, IllegalArgumentException { + checkNotEmpty(serverFirstMessage, "serverFirstMessage"); + checkNotEmpty(clientNonce, "clientNonce"); + + String[] attributeValues = StringWritableCsv.parseFrom(serverFirstMessage, 3, 0); + if(attributeValues.length != 3) { + throw new ScramParseException("Invalid server-first-message"); + } + + ScramAttributeValue nonce = ScramAttributeValue.parse(attributeValues[0]); + if(ScramAttributes.NONCE.getChar() != nonce.getChar()) { + throw new ScramParseException("serverNonce must be the 1st element of the server-first-message"); + } + if(! nonce.getValue().startsWith(clientNonce)) { + throw new ScramParseException("parsed serverNonce does not start with client serverNonce"); + } + + ScramAttributeValue salt = ScramAttributeValue.parse(attributeValues[1]); + if(ScramAttributes.SALT.getChar() != salt.getChar()) { + throw new ScramParseException("salt must be the 2nd element of the server-first-message"); + } + + ScramAttributeValue iteration = ScramAttributeValue.parse(attributeValues[2]); + if(ScramAttributes.ITERATION.getChar() != iteration.getChar()) { + throw new ScramParseException("iteration must be the 3rd element of the server-first-message"); + } + + int iterationInt; + try { + iterationInt = Integer.parseInt(iteration.getValue()); + } catch (NumberFormatException e) { + throw new ScramParseException("invalid iteration"); + } + + return new ServerFirstMessage( + clientNonce, nonce.getValue().substring(clientNonce.length()), salt.getValue(), iterationInt + ); + } + + @Override + public String toString() { + return writeTo(new StringBuffer()).toString(); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/stringprep/StringPreparation.java b/scram-common/src/main/java/com/ongres/scram/common/stringprep/StringPreparation.java new file mode 100644 index 0000000..941a0ee --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/stringprep/StringPreparation.java @@ -0,0 +1,38 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.stringprep; + + +/** + * Interface for all possible String Preparations mechanisms. + */ +public interface StringPreparation { + /** + * Normalize a UTF-8 String according to this String Preparation rules. + * @param value The String to prepare + * @return The prepared String + * @throws IllegalArgumentException If the String to prepare is not valid. + */ + String normalize(String value) throws IllegalArgumentException; +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/stringprep/StringPreparations.java b/scram-common/src/main/java/com/ongres/scram/common/stringprep/StringPreparations.java new file mode 100644 index 0000000..307b4a2 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/stringprep/StringPreparations.java @@ -0,0 +1,74 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.stringprep; + +import com.ongres.scram.common.util.UsAsciiUtils; +import com.ongres.stringprep.Profile; +import com.ongres.stringprep.Stringprep; + +import static com.ongres.scram.common.util.Preconditions.checkNotEmpty; + +public enum StringPreparations implements StringPreparation { + /** + * Implementation of StringPreparation that performs no preparation. + * Non US-ASCII characters will produce an exception. + * Even though the [RFC5802] is not very clear about it, + * this implementation will normalize non-printable US-ASCII characters similarly to what SaslPrep does + * (i.e., removing them). + */ + NO_PREPARATION { + @Override + protected String doNormalize(String value) throws IllegalArgumentException { + return UsAsciiUtils.toPrintable(value); + } + }, + /** + * Implementation of StringPreparation that performs preparation. + * Non US-ASCII characters will produce an exception. + * Even though the [RFC5802] is not very clear about it, + * this implementation will normalize as SaslPrep does. + */ + SASL_PREPARATION { + @Override + protected String doNormalize(String value) throws IllegalArgumentException { + Profile saslPrep = Stringprep.getProvider("SASLprep"); + return saslPrep.prepareStored(value); + } + } + ; + + protected abstract String doNormalize(String value) throws IllegalArgumentException; + + public String normalize(String value) throws IllegalArgumentException { + checkNotEmpty(value, "value"); + + String normalized = doNormalize(value); + + if(null == normalized || normalized.isEmpty()) { + throw new IllegalArgumentException("null or empty value after normalization"); + } + + return normalized; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/AbstractCharAttributeValue.java b/scram-common/src/main/java/com/ongres/scram/common/util/AbstractCharAttributeValue.java new file mode 100644 index 0000000..b554208 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/AbstractCharAttributeValue.java @@ -0,0 +1,67 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * Construct and write generic CharAttribute-Value pairs. + * + * Concrete sub-classes should also provide a static parse(String) creation method. + */ +public class AbstractCharAttributeValue extends AbstractStringWritable implements CharAttributeValue { + private final CharAttribute charAttribute; + private final String value; + + public AbstractCharAttributeValue(CharAttribute charAttribute, String value) throws IllegalArgumentException { + this.charAttribute = checkNotNull(charAttribute, "attribute"); + if(null != value && value.isEmpty()) { + throw new IllegalArgumentException("Value should be either null or non-empty"); + } + this.value = value; + } + + @Override + public char getChar() { + return charAttribute.getChar(); + } + + @Override + public String getValue() { + return value; + } + + @Override + public StringBuffer writeTo(StringBuffer sb) { + sb.append(charAttribute.getChar()); + + if(null != value) { + sb.append('=').append(value); + } + + return sb; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/AbstractStringWritable.java b/scram-common/src/main/java/com/ongres/scram/common/util/AbstractStringWritable.java new file mode 100644 index 0000000..bec981c --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/AbstractStringWritable.java @@ -0,0 +1,38 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +/** + * Basic implementation of the StringWritable interface, that overrides the toString() method. + */ +public abstract class AbstractStringWritable implements StringWritable { + + public AbstractStringWritable() { + } + + public String toString() { + return writeTo(new StringBuffer()).toString(); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/CharAttribute.java b/scram-common/src/main/java/com/ongres/scram/common/util/CharAttribute.java new file mode 100644 index 0000000..e1ccce8 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/CharAttribute.java @@ -0,0 +1,36 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +/** + * Represents an attribute (a key name) that is represented by a single char. + */ +public interface CharAttribute { + /** + * Return the char used to represent this attribute + * @return The character of the attribute + */ + char getChar(); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/CharAttributeValue.java b/scram-common/src/main/java/com/ongres/scram/common/util/CharAttributeValue.java new file mode 100644 index 0000000..4320353 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/CharAttributeValue.java @@ -0,0 +1,36 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +/** + * Augments a {@link CharAttribute} with a String value and the method(s) to write its data to a StringBuffer. + */ +public interface CharAttributeValue extends CharAttribute, StringWritable { + /** + * Returns the value associated with the {@link CharAttribute} + * @return The String value or null if no value is associated + */ + String getValue(); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/CryptoUtil.java b/scram-common/src/main/java/com/ongres/scram/common/util/CryptoUtil.java new file mode 100644 index 0000000..848166c --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/CryptoUtil.java @@ -0,0 +1,183 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import javax.crypto.Mac; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import java.security.InvalidKeyException; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; + +import static com.ongres.scram.common.util.Preconditions.checkArgument; +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * Utility static methods for cryptography related tasks. + */ +public class CryptoUtil { + private static final int MIN_ASCII_PRINTABLE_RANGE = 0x21; + private static final int MAX_ASCII_PRINTABLE_RANGE = 0x7e; + private static final int EXCLUDED_CHAR = (int) ','; // 0x2c + + public CryptoUtil() { + } + + private static class SecureRandomHolder { + private static final SecureRandom INSTANCE = new SecureRandom(); + } + + /** + * Generates a random string (called a 'nonce'), composed of ASCII printable characters, except comma (','). + * @param size The length of the nonce, in characters/bytes + * @param random The SecureRandom to use + * @return The String representing the nonce + */ + public static String nonce(int size, SecureRandom random) { + if(size <= 0) { + throw new IllegalArgumentException("Size must be positive"); + } + + char[] chars = new char[size]; + int r; + for(int i = 0; i < size;) { + r = random.nextInt(MAX_ASCII_PRINTABLE_RANGE - MIN_ASCII_PRINTABLE_RANGE + 1) + MIN_ASCII_PRINTABLE_RANGE; + if(r != EXCLUDED_CHAR) { + chars[i++] = (char) r; + } + } + + return new String(chars); + } + + /** + * Generates a random string (called a 'nonce'), composed of ASCII printable characters, except comma (','). + * It uses a default SecureRandom instance. + * @param size The length of the nonce, in characters/bytes + * @return The String representing the nonce + */ + public static String nonce(int size) { + return nonce(size, SecureRandomHolder.INSTANCE); + } + + /** + * Compute the "Hi" function for SCRAM. + * + * {@code + * Hi(str, salt, i): + * + * U1 := HMAC(str, salt + INT(1)) + * U2 := HMAC(str, U1) + * ... + * Ui-1 := HMAC(str, Ui-2) + * Ui := HMAC(str, Ui-1) + * + * Hi := U1 XOR U2 XOR ... XOR Ui + * + * where "i" is the iteration count, "+" is the string concatenation + * operator, and INT(g) is a 4-octet encoding of the integer g, most + * significant octet first. + * + * Hi() is, essentially, PBKDF2 [RFC2898] with HMAC() as the + * pseudorandom function (PRF) and with dkLen == output length of + * HMAC() == output length of H(). + * } + * + * @param secretKeyFactory The SecretKeyFactory to generate the SecretKey + * @param keyLength The length of the key (in bits) + * @param value The char array to compute the Hi function + * @param salt The salt + * @param iterations The number of iterations + * @return The bytes of the computed Hi value + */ + public static byte[] hi( + SecretKeyFactory secretKeyFactory, int keyLength, char[] value, byte[] salt, int iterations + ) { + try { + PBEKeySpec spec = new PBEKeySpec(value, salt, iterations, keyLength); + SecretKey key = secretKeyFactory.generateSecret(spec); + return key.getEncoded(); + } catch(InvalidKeySpecException e) { + throw new RuntimeException("Platform error: unsupported PBEKeySpec"); + } + } + + /** + * Computes the HMAC of a given message. + * + * {@code + * HMAC(key, str): Apply the HMAC keyed hash algorithm (defined in + * [RFC2104]) using the octet string represented by "key" as the key + * and the octet string "str" as the input string. The size of the + * result is the hash result size for the hash function in use. For + * example, it is 20 octets for SHA-1 (see [RFC3174]). + * } + * + * @param secretKeySpec A key of the given algorithm + * @param mac A MAC instance of the given algorithm + * @param message The message to compute the HMAC + * @return The bytes of the computed HMAC value + */ + public static byte[] hmac(SecretKeySpec secretKeySpec, Mac mac, byte[] message) { + try { + mac.init(secretKeySpec); + } catch (InvalidKeyException e) { + throw new RuntimeException("Platform error: unsupported key for HMAC algorithm"); + } + + return mac.doFinal(message); + } + + /** + * Computes a byte-by-byte xor operation. + * + * {@code + * XOR: Apply the exclusive-or operation to combine the octet string + * on the left of this operator with the octet string on the right of + * this operator. The length of the output and each of the two + * inputs will be the same for this use. + * } + * + * @param value1 + * @param value2 + * @return + * @throws IllegalArgumentException + */ + public static byte[] xor(byte[] value1, byte[] value2) throws IllegalArgumentException { + checkNotNull(value1, "value1"); + checkNotNull(value2, "value2"); + checkArgument(value1.length == value2.length, "Both values must have the same length"); + + byte[] result = new byte[value1.length]; + for(int i = 0; i < value1.length; i++) { + result[i] = (byte) (value1[i] ^ value2[i]); + } + + return result; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/Preconditions.java b/scram-common/src/main/java/com/ongres/scram/common/util/Preconditions.java new file mode 100644 index 0000000..6e25ad4 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/Preconditions.java @@ -0,0 +1,92 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +/** + * Simple methods similar to Precondition class. Avoid importing full library. + */ +public class Preconditions { + + public Preconditions() { + } + + /** + * Checks that the argument is not null. + * @param value The value to be checked + * @param valueName The name of the value that is checked in the method + * @param The type of the value + * @return The same value passed as argument + * @throws IllegalArgumentException If value is null + */ + public static T checkNotNull(T value, String valueName) throws IllegalArgumentException { + if(null == value) { + throw new IllegalArgumentException("Null value for '" + valueName + "'"); + } + + return value; + } + + /** + * Checks that the String is not null and not empty + * @param value The String to check + * @param valueName The name of the value that is checked in the method + * @return The same String passed as argument + * @throws IllegalArgumentException If value is null or empty + */ + public static String checkNotEmpty(String value, String valueName) throws IllegalArgumentException { + if(checkNotNull(value, valueName).isEmpty()) { + throw new IllegalArgumentException("Empty string '" + valueName + "'"); + } + + return value; + } + + /** + * Checks that the argument is valid, based in a check boolean condition. + * @param check The boolean check + * @param valueName The name of the value that is checked in the method + * @throws IllegalArgumentException + */ + public static void checkArgument(boolean check, String valueName) throws IllegalArgumentException { + if(! check) { + throw new IllegalArgumentException("Argument '" + valueName + "' is not valid"); + } + } + + /** + * Checks that the integer argument is positive. + * @param value The value to be checked + * @param valueName The name of the value that is checked in the method + * @return The same value passed as argument + * @throws IllegalArgumentException If value is null + */ + public static int gt0(int value, String valueName) throws IllegalArgumentException { + if(value <= 0) { + throw new IllegalArgumentException("'" + valueName + "' must be positive"); + } + + return value; + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/StringWritable.java b/scram-common/src/main/java/com/ongres/scram/common/util/StringWritable.java new file mode 100644 index 0000000..25d9428 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/StringWritable.java @@ -0,0 +1,37 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +/** + * Interface to denote classes which can write to a StringBuffer. + */ +public interface StringWritable { + /** + * Write the class information to the given StringBuffer. + * @param sb Where to write the data. + * @return The same StringBuffer. + */ + StringBuffer writeTo(StringBuffer sb); +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/StringWritableCsv.java b/scram-common/src/main/java/com/ongres/scram/common/util/StringWritableCsv.java new file mode 100644 index 0000000..537186b --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/StringWritableCsv.java @@ -0,0 +1,127 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import java.util.Arrays; + +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +/** + * Helper class to generate Comma Separated Values of {@link StringWritable}s + */ +public class StringWritableCsv { + + public StringWritableCsv() { + } + + private static void writeStringWritableToStringBuffer(StringWritable value, StringBuffer sb) { + if(null != value) { + value.writeTo(sb); + } + } + + /** + * Write a sequence of {@link StringWritableCsv}s to a StringBuffer. + * Null {@link StringWritable}s are not printed, but separator is still used. + * Separator is a comma (',') + * @param sb The sb to write to + * @param values Zero or more attribute-value pairs to write + * @return The same sb, with data filled in (if any) + * @throws IllegalArgumentException If sb is null + */ + public static StringBuffer writeTo(StringBuffer sb, StringWritable... values) throws IllegalArgumentException { + checkNotNull(sb, "sb"); + if(null == values || values.length == 0) { + return sb; + } + + writeStringWritableToStringBuffer(values[0], sb); + int i = 1; + while (i < values.length) { + sb.append(','); + writeStringWritableToStringBuffer(values[i], sb); + i++; + } + + return sb; + } + + /** + * Parse a String with a {@link StringWritableCsv} into its composing Strings + * represented as Strings. No validation is performed on the individual attribute-values returned. + * @param value The String with the set of attribute-values + * @param n Number of entries to return (entries will be null of there were not enough). 0 means unlimited + * @param offset How many entries to skip before start returning + * @return An array of Strings which represent the individual attribute-values + * @throws IllegalArgumentException If value is null or either n or offset are negative + */ + public static String[] parseFrom(String value, int n, int offset) throws IllegalArgumentException { + checkNotNull(value, "value"); + if(n < 0 || offset < 0) { + throw new IllegalArgumentException("Limit and offset have to be >= 0"); + } + + if(value.isEmpty()) { + return new String[0]; + } + + String[] split = value.split(","); + if(split.length < offset) { + throw new IllegalArgumentException("Not enough items for the given offset"); + } + + return Arrays.copyOfRange( + split, + offset, + (n == 0 ? split.length : n) + offset + ); + } + + /** + * Parse a String with a {@link StringWritableCsv} into its composing Strings + * represented as Strings. No validation is performed on the individual attribute-values returned. + * Elements are returned starting from the first available attribute-value. + * @param value The String with the set of attribute-values + * @param n Number of entries to return (entries will be null of there were not enough). 0 means unlimited + * @return An array of Strings which represent the individual attribute-values + * @throws IllegalArgumentException If value is null or n is negative + */ + public static String[] parseFrom(String value, int n) throws IllegalArgumentException { + return parseFrom(value, n, 0); + } + + /** + * Parse a String with a {@link StringWritableCsv} into its composing Strings + * represented as Strings. No validation is performed on the individual attribute-values returned. + * All the available attribute-values will be returned. + * @param value The String with the set of attribute-values + * @return An array of Strings which represent the individual attribute-values + * @throws IllegalArgumentException If value is null + */ + public static String[] parseFrom(String value) throws IllegalArgumentException{ + return parseFrom(value, 0, 0); + } +} diff --git a/scram-common/src/main/java/com/ongres/scram/common/util/UsAsciiUtils.java b/scram-common/src/main/java/com/ongres/scram/common/util/UsAsciiUtils.java new file mode 100644 index 0000000..4f04f28 --- /dev/null +++ b/scram-common/src/main/java/com/ongres/scram/common/util/UsAsciiUtils.java @@ -0,0 +1,57 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import static com.ongres.scram.common.util.Preconditions.checkNotNull; + + +public class UsAsciiUtils { + + public UsAsciiUtils() { + } + + /** + * Removes non-printable characters from the US-ASCII String. + * @param value The original String + * @return The possibly modified String, without non-printable US-ASCII characters. + * @throws IllegalArgumentException If the String is null or contains non US-ASCII characters. + */ + public static String toPrintable(String value) throws IllegalArgumentException { + checkNotNull(value, "value"); + + char[] printable = new char[value.length()]; + int i = 0; + for(char chr : value.toCharArray()) { + int c = (int) chr; + if (c < 0 || c >= 127) { + throw new IllegalArgumentException("value contains character '" + chr + "' which is non US-ASCII"); + } else if (c > 32) { + printable[i++] = chr; + } + } + + return i == value.length() ? value : new String(printable, 0, i); + } +} diff --git a/scram-common/src/main/java/module-info.java b/scram-common/src/main/java/module-info.java new file mode 100644 index 0000000..ecd7151 --- /dev/null +++ b/scram-common/src/main/java/module-info.java @@ -0,0 +1,12 @@ +module org.xbib.scram.common { + requires transitive java.security.sasl; + requires org.xbib.stringprep; + exports com.ongres.scram.common; + exports com.ongres.scram.common.bouncycastle.base64; + exports com.ongres.scram.common.bouncycastle.pbkdf2; + exports com.ongres.scram.common.exception; + exports com.ongres.scram.common.gssapi; + exports com.ongres.scram.common.message; + exports com.ongres.scram.common.stringprep; + exports com.ongres.scram.common.util; +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha1.java b/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha1.java new file mode 100644 index 0000000..c0c3f97 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha1.java @@ -0,0 +1,49 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +public class RfcExampleSha1 { + public static final String USER = "user"; + public static final String PASSWORD = "pencil"; + public static final String CLIENT_NONCE = "fyko+d2lbbFgONRv9qkxdawL"; + public static final String CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER = "n=" + USER + ",r=" + CLIENT_NONCE; + public static final String CLIENT_FIRST_MESSAGE = "n," + "," + CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER; + public static final String SERVER_SALT = "QSXCR+Q6sek8bf92"; + public static final int SERVER_ITERATIONS = 4096; + public static final String SERVER_NONCE = "3rfcNHYJY1ZVvWVs7j"; + public static final String FULL_NONCE = CLIENT_NONCE + SERVER_NONCE; + public static final String SERVER_FIRST_MESSAGE = "r=" + FULL_NONCE + ",s=" + SERVER_SALT + + ",i=" + SERVER_ITERATIONS; + public static final String GS2_HEADER_BASE64 = "biws"; + public static final String CLIENT_FINAL_MESSAGE_WITHOUT_PROOF = "c=" + GS2_HEADER_BASE64 + + ",r=" + FULL_NONCE; + public static final String AUTH_MESSAGE = CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER + "," + + SERVER_FIRST_MESSAGE + "," + + CLIENT_FINAL_MESSAGE_WITHOUT_PROOF; + public static final String CLIENT_FINAL_MESSAGE_PROOF = "v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="; + public static final String CLIENT_FINAL_MESSAGE = CLIENT_FINAL_MESSAGE_WITHOUT_PROOF + + ",p=" + CLIENT_FINAL_MESSAGE_PROOF; + public static final String SERVER_FINAL_MESSAGE = "v=rmF9pqV8S7suAoZWja4dJRkFsKQ="; +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha256.java b/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha256.java new file mode 100644 index 0000000..be05cd3 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha256.java @@ -0,0 +1,49 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +public class RfcExampleSha256 { + public static final String USER = "user"; + public static final String PASSWORD = "pencil"; + public static final String CLIENT_NONCE = "rOprNGfwEbeRWgbNEkqO"; + public static final String CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER = "n=" + USER + ",r=" + CLIENT_NONCE; + public static final String CLIENT_FIRST_MESSAGE = "n," + "," + CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER; + public static final String SERVER_SALT = "W22ZaJ0SNY7soEsUEjb6gQ=="; + public static final int SERVER_ITERATIONS = 4096; + public static final String SERVER_NONCE = "%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0"; + public static final String FULL_NONCE = CLIENT_NONCE + SERVER_NONCE; + public static final String SERVER_FIRST_MESSAGE = "r=" + FULL_NONCE + ",s=" + SERVER_SALT + + ",i=" + SERVER_ITERATIONS; + public static final String GS2_HEADER_BASE64 = "biws"; + public static final String CLIENT_FINAL_MESSAGE_WITHOUT_PROOF = "c=" + GS2_HEADER_BASE64 + + ",r=" + FULL_NONCE; + public static final String AUTH_MESSAGE = CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER + "," + + SERVER_FIRST_MESSAGE + "," + + CLIENT_FINAL_MESSAGE_WITHOUT_PROOF; + public static final String CLIENT_FINAL_MESSAGE_PROOF = "dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ="; + public static final String CLIENT_FINAL_MESSAGE = CLIENT_FINAL_MESSAGE_WITHOUT_PROOF + + ",p=" + CLIENT_FINAL_MESSAGE_PROOF; + public static final String SERVER_FINAL_MESSAGE = "v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4="; +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramAttributeValueTest.java b/scram-common/src/test/java/com/ongres/scram/common/ScramAttributeValueTest.java new file mode 100644 index 0000000..1046396 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/ScramAttributeValueTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.message.ServerFinalMessage; +import org.junit.Test; + +import static com.ongres.scram.common.RfcExampleSha1.*; +import static com.ongres.scram.common.ScramAttributes.CLIENT_PROOF; +import static com.ongres.scram.common.ScramAttributes.USERNAME; +import static org.junit.Assert.*; + + +public class ScramAttributeValueTest { + @Test + public void constructorDoesNotAllowNullValue() { + try { + assertNotNull(new ScramAttributeValue(USERNAME, null)); + } catch(IllegalArgumentException e) { + return; + } + + fail("A null value must throw an IllegalArgumentException"); + } + + @Test + public void parseIllegalValuesStructure() { + String[] values = new String[] { + null, "", "asdf", "asdf=a", CLIENT_PROOF.getChar() + "=", CLIENT_PROOF.getChar() + ",a" + }; + int n = 0; + for(String value : values) { + try { + assertNotNull(ScramAttributeValue.parse(value)); + } catch(ScramParseException e) { + n++; + } + } + + assertEquals("Not every illegal value thrown ScramParseException", values.length, n); + } + + @Test + public void parseIllegalValuesInvalidSCRAMAttibute() { + // SCRAM allows for extensions. If a new attribute is supported and its value has been used below, + // test will fail and will need to be fixed + String[] values = new String[] { "z=asdfasdf", "!=value" }; + + int n = 0; + for(String value : values) { + try { + assertNotNull(ScramAttributeValue.parse(value)); + } catch(ScramParseException e) { + n++; + } + } + + assertEquals("Not every illegal value thrown ScramParseException", values.length, n); + } + + @Test + public void parseLegalValues() throws ScramParseException { + String[] legalValues = new String[] { + CLIENT_PROOF.getChar() + "=" + "proof", + USERNAME.getChar() + "=" + "username", + "n=" + USER, + "r=" + CLIENT_NONCE, + "r=" + FULL_NONCE, + "s=" + SERVER_SALT, + "i=" + SERVER_ITERATIONS, + "c=" + GS2_HEADER_BASE64, + "p=" + CLIENT_FINAL_MESSAGE_PROOF, + SERVER_FINAL_MESSAGE, + }; + for(String value : legalValues) { + assertNotNull(ScramAttributeValue.parse(value)); + } + + // Test all possible error messages + for(ServerFinalMessage.Error e : ServerFinalMessage.Error.values()) { + assertNotNull(ScramAttributeValue.parse("e=" + e.getErrorMessage())); + } + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramFunctionsTest.java b/scram-common/src/test/java/com/ongres/scram/common/ScramFunctionsTest.java new file mode 100644 index 0000000..275f00a --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/ScramFunctionsTest.java @@ -0,0 +1,255 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import com.ongres.scram.common.bouncycastle.base64.Base64; +import com.ongres.scram.common.stringprep.StringPreparations; +import org.junit.Test; + +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + + +public class ScramFunctionsTest { + private void assertBytesEqualsBase64(String expected, byte[] actual) { + assertArrayEquals(Base64.decode(expected), actual); + } + + @Test + public void hmac() throws UnsupportedEncodingException { + String message = "The quick brown fox jumps over the lazy dog"; + byte[] key = "key".getBytes(StandardCharsets.UTF_8); + + assertBytesEqualsBase64( + "3nybhbi3iqa8ino29wqQcBydtNk=", + ScramFunctions.hmac(ScramMechanisms.SCRAM_SHA_1, message.getBytes(StandardCharsets.US_ASCII), key) + ); + assertBytesEqualsBase64( + "97yD9DBThCSxMpjmqm+xQ+9NWaFJRhdZl0edvC0aPNg=", + ScramFunctions.hmac(ScramMechanisms.SCRAM_SHA_256, message.getBytes(StandardCharsets.US_ASCII), key) + ); + } + + private byte[] generateSaltedPassword() { + return ScramFunctions.saltedPassword( + ScramMechanisms.SCRAM_SHA_1, StringPreparations.NO_PREPARATION, "pencil", + Base64.decode("QSXCR+Q6sek8bf92"), 4096 + ); + } + + private byte[] generateSaltedPasswordSha256() { + return ScramFunctions.saltedPassword( + ScramMechanisms.SCRAM_SHA_256, StringPreparations.NO_PREPARATION, "pencil", + Base64.decode("W22ZaJ0SNY7soEsUEjb6gQ=="), 4096 + ); + } + + @Test + public void saltedPassword() { + assertBytesEqualsBase64("HZbuOlKbWl+eR8AfIposuKbhX30=", generateSaltedPassword()); + } + + @Test + public void saltedPasswordWithSaslPrep() { + assertBytesEqualsBase64("YniLes+b8WFMvBhtSACZyyvxeCc=", ScramFunctions.saltedPassword( + ScramMechanisms.SCRAM_SHA_1, StringPreparations.SASL_PREPARATION, "\u2168\u3000a\u0300", + Base64.decode("0BojBCBE6P2/N4bQ"), 6400 + )); + assertBytesEqualsBase64("YniLes+b8WFMvBhtSACZyyvxeCc=", ScramFunctions.saltedPassword( + ScramMechanisms.SCRAM_SHA_1, StringPreparations.SASL_PREPARATION, "\u00ADIX \u00E0", + Base64.decode("0BojBCBE6P2/N4bQ"), 6400 + )); + assertBytesEqualsBase64("YniLes+b8WFMvBhtSACZyyvxeCc=", ScramFunctions.saltedPassword( + ScramMechanisms.SCRAM_SHA_1, StringPreparations.SASL_PREPARATION, "IX \u00E0", + Base64.decode("0BojBCBE6P2/N4bQ"), 6400 + )); + assertBytesEqualsBase64("HZbuOlKbWl+eR8AfIposuKbhX30=", ScramFunctions.saltedPassword( + ScramMechanisms.SCRAM_SHA_1, StringPreparations.SASL_PREPARATION, "\u0070enc\u1806il", + Base64.decode("QSXCR+Q6sek8bf92"), 4096 + )); + try { + ScramFunctions.saltedPassword( + ScramMechanisms.SCRAM_SHA_1, StringPreparations.SASL_PREPARATION, "\u2168\u3000a\u0300\u0007", + Base64.decode("QSXCR+Q6sek8bf92"), 6400); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("Prohibited character \u0007", e.getMessage()); + } + } + + @Test + public void saltedPasswordSha256() { + assertBytesEqualsBase64("xKSVEDI6tPlSysH6mUQZOeeOp01r6B3fcJbodRPcYV0=", generateSaltedPasswordSha256()); + } + + private byte[] generateClientKey() { + return ScramFunctions.clientKey(ScramMechanisms.SCRAM_SHA_1, generateSaltedPassword()); + } + + private byte[] generateClientKeySha256() { + return ScramFunctions.clientKey(ScramMechanisms.SCRAM_SHA_256, generateSaltedPasswordSha256()); + } + + @Test + public void clientKey() { + assertBytesEqualsBase64("4jTEe/bDZpbdbYUrmaqiuiZVVyg=", generateClientKey()); + } + + @Test + public void clientKeySha256() { + assertBytesEqualsBase64("pg/JI9Z+hkSpLRa5btpe9GVrDHJcSEN0viVTVXaZbos=", generateClientKeySha256()); + } + + private byte[] generateStoredKey() { + return ScramFunctions.storedKey(ScramMechanisms.SCRAM_SHA_1, generateClientKey()); + } + + private byte[] generateStoredKeySha256() { + return ScramFunctions.storedKey(ScramMechanisms.SCRAM_SHA_256, generateClientKeySha256()); + } + + @Test + public void storedKey() { + assertBytesEqualsBase64("6dlGYMOdZcOPutkcNY8U2g7vK9Y=", generateStoredKey()); + } + + @Test + public void storedKeySha256() { + assertBytesEqualsBase64("WG5d8oPm3OtcPnkdi4Uo7BkeZkBFzpcXkuLmtbsT4qY=", generateStoredKeySha256()); + } + + private byte[] generateServerKey() { + return ScramFunctions.serverKey(ScramMechanisms.SCRAM_SHA_1, generateSaltedPassword()); + } + + private byte[] generateServerKeySha256() { + return ScramFunctions.serverKey(ScramMechanisms.SCRAM_SHA_256, generateSaltedPasswordSha256()); + } + + @Test + public void serverKey() { + assertBytesEqualsBase64("D+CSWLOshSulAsxiupA+qs2/fTE=", generateServerKey()); + } + + @Test + public void serverKeySha256() { + assertBytesEqualsBase64("wfPLwcE6nTWhTAmQ7tl2KeoiWGPlZqQxSrmfPwDl2dU=", generateServerKeySha256()); + } + + private byte[] generateClientSignature() { + return ScramFunctions.clientSignature(ScramMechanisms.SCRAM_SHA_1, generateStoredKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE); + } + + private byte[] generateClientSignatureSha256() { + return ScramFunctions.clientSignature(ScramMechanisms.SCRAM_SHA_256, generateStoredKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE); + } + + @Test + public void clientSignature() { + assertBytesEqualsBase64("XXE4xIawv6vfSePi2ovW5cedthM=", generateClientSignature()); + } + + @Test + public void clientSignatureSha256() { + assertBytesEqualsBase64("0nMSRnwopAqKfwXHPA3jPrPL+0qDeDtYFEzxmsa+G98=", generateClientSignatureSha256()); + } + + private byte[] generateClientProof() { + return ScramFunctions.clientProof(generateClientKey(), generateClientSignature()); + } + + private byte[] generateClientProofSha256() { + return ScramFunctions.clientProof(generateClientKeySha256(), generateClientSignatureSha256()); + } + + @Test + public void clientProof() { + assertBytesEqualsBase64("v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", generateClientProof()); + } + + @Test + public void clientProofSha256() { + assertBytesEqualsBase64("dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ=", generateClientProofSha256()); + } + + private byte[] generateServerSignature() { + return ScramFunctions.serverSignature(ScramMechanisms.SCRAM_SHA_1, generateServerKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE); + } + + private byte[] generateServerSignatureSha256() { + return ScramFunctions.serverSignature(ScramMechanisms.SCRAM_SHA_256, generateServerKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE); + } + + @Test + public void serverSignature() { + assertBytesEqualsBase64("rmF9pqV8S7suAoZWja4dJRkFsKQ=", generateServerSignature()); + } + + @Test + public void serverSignatureSha256() { + assertBytesEqualsBase64("6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4=", generateServerSignatureSha256()); + } + + @Test + public void verifyClientProof() { + assertTrue( + ScramFunctions.verifyClientProof( + ScramMechanisms.SCRAM_SHA_1, generateClientProof(), generateStoredKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE + ) + ); + } + + @Test + public void verifyClientProofSha256() { + assertTrue( + ScramFunctions.verifyClientProof( + ScramMechanisms.SCRAM_SHA_256, generateClientProofSha256(), generateStoredKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE + ) + ); + } + + @Test + public void verifyServerSignature() { + assertTrue( + ScramFunctions.verifyServerSignature( + ScramMechanisms.SCRAM_SHA_1, generateServerKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE, generateServerSignature() + ) + ); + } + + @Test + public void verifyServerSignatureSha256() { + assertTrue( + ScramFunctions.verifyServerSignature( + ScramMechanisms.SCRAM_SHA_256, generateServerKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE, generateServerSignatureSha256() + ) + ); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramMechanismsTest.java b/scram-common/src/test/java/com/ongres/scram/common/ScramMechanismsTest.java new file mode 100644 index 0000000..8271604 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/ScramMechanismsTest.java @@ -0,0 +1,133 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import org.junit.Test; + + +public class ScramMechanismsTest { + @Test + public void TestHashSupportedByJVM() { + byte[] digest; + for(ScramMechanisms scramMechanism : ScramMechanisms.values()) { + digest = scramMechanism.digest(new byte[0]); + assertNotNull("got a null digest", digest); + } + } + + @Test + public void TestHMACSupportedByJVM() { + byte[] hmac; + for(ScramMechanisms scramMechanism : ScramMechanisms.values()) { + hmac = scramMechanism.hmac(new byte[] { 0 }, new byte[0]); + assertNotNull("got a null HMAC", hmac); + } + } + + private interface Predicate { + boolean test(T t); + } + + + private void testNames(String[] names, Predicate predicate) { + int count = 0; + for (String name : names) { + if (predicate.test(ScramMechanisms.byName(name))) { + count++; + } + } + assertEquals( + names.length, + count + ); + } + + @Test + public void byNameValid() { + testNames( + new String[] { "SCRAM-SHA-1", "SCRAM-SHA-1-PLUS", "SCRAM-SHA-256", "SCRAM-SHA-256-PLUS" }, + new Predicate() { + @Override + public boolean test(ScramMechanisms scramMechanisms) { + return scramMechanisms != null; + } + } + ); + } + + @Test + public void byNameInvalid() { + testNames( + new String[] { "SCRAM-SHA", "SHA-1-PLUS", "SCRAM-SHA-256-", "SCRAM-SHA-256-PLUS!" }, + new Predicate() { + @Override + public boolean test(ScramMechanisms scramMechanisms) { + return scramMechanisms == null; + } + } + ); + } + + private void selectMatchingMechanismTest(ScramMechanisms scramMechanisms, boolean channelBinding, String... names) { + assertEquals( + scramMechanisms, ScramMechanisms.selectMatchingMechanism(channelBinding, names) + ); + } + + @Test + public void selectMatchingMechanism() { + selectMatchingMechanismTest( + ScramMechanisms.SCRAM_SHA_1, false, + "SCRAM-SHA-1" + ); + selectMatchingMechanismTest( + ScramMechanisms.SCRAM_SHA_256_PLUS, true, + "SCRAM-SHA-256-PLUS" + ); + selectMatchingMechanismTest( + ScramMechanisms.SCRAM_SHA_256, false, + "SCRAM-SHA-1", "SCRAM-SHA-256" + ); + selectMatchingMechanismTest( + ScramMechanisms.SCRAM_SHA_256, false, + "SCRAM-SHA-1", "SCRAM-SHA-256", "SCRAM-SHA-256-PLUS" + ); + selectMatchingMechanismTest( + ScramMechanisms.SCRAM_SHA_1_PLUS, true, + "SCRAM-SHA-1", "SCRAM-SHA-1-PLUS", "SCRAM-SHA-256" + ); + selectMatchingMechanismTest( + ScramMechanisms.SCRAM_SHA_256_PLUS, true, + "SCRAM-SHA-1", "SCRAM-SHA-1-PLUS", "SCRAM-SHA-256", "SCRAM-SHA-256-PLUS" + ); + selectMatchingMechanismTest( + null, true, + "SCRAM-SHA-1", "SCRAM-SHA-256" + ); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramStringFormattingTest.java b/scram-common/src/test/java/com/ongres/scram/common/ScramStringFormattingTest.java new file mode 100644 index 0000000..9b15d3b --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/ScramStringFormattingTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common; + + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + + +public class ScramStringFormattingTest { + private static final String[] VALUES_NO_CHARS_TO_BE_ESCAPED = new String[] { "asdf", "''--%%21", " ttt???" }; + private static final String[] VALUES_TO_BE_ESCAPED = new String[] { + ",", "=", "a,b", "===", "a=", ",=,", "=2C", "=3D" + }; + private static final String[] ESCAPED_VALUES = new String[] { + "=2C", "=3D", "a=2Cb", "=3D=3D=3D", "a=3D", "=2C=3D=2C", "=3D2C", "=3D3D" + }; + private static final String[] INVALID_SASL_NAMES = new String[] { "=", "as,df", "a=b", " ttt???=2D" }; + + @Test + public void toSaslNameNoCharactersToBeEscaped() { + for(String s : VALUES_NO_CHARS_TO_BE_ESCAPED) { + assertEquals(s, ScramStringFormatting.toSaslName(s)); + } + } + + @Test + public void toSaslNameWithCharactersToBeEscaped() { + for(int i = 0; i < VALUES_TO_BE_ESCAPED.length; i++) { + assertEquals(ESCAPED_VALUES[i], ScramStringFormatting.toSaslName(VALUES_TO_BE_ESCAPED[i])); + } + } + + @Test + public void fromSaslNameNoCharactersToBeEscaped() { + for(String s : VALUES_NO_CHARS_TO_BE_ESCAPED) { + assertEquals(s, ScramStringFormatting.fromSaslName(s)); + } + } + + @Test + public void fromSaslNameWithCharactersToBeUnescaped() { + for(int i = 0; i < ESCAPED_VALUES.length; i++) { + assertEquals(VALUES_TO_BE_ESCAPED[i], ScramStringFormatting.fromSaslName(ESCAPED_VALUES[i])); + } + } + + @Test + public void fromSaslNameWithInvalidCharacters() { + int n = 0; + for(String s : INVALID_SASL_NAMES) { + try { + assertEquals(s, ScramStringFormatting.fromSaslName(s)); + } catch (IllegalArgumentException e) { + n++; + } + } + + assertTrue("Not all values produced IllegalArgumentException", n == INVALID_SASL_NAMES.length); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2AttributeValueTest.java b/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2AttributeValueTest.java new file mode 100644 index 0000000..2bd6ecf --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2AttributeValueTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.gssapi; + + +import org.junit.Test; + +import static org.junit.Assert.*; + + +public class Gs2AttributeValueTest { + @Test + public void constructorAllowsNullValue() { + try { + assertNotNull(new Gs2AttributeValue(Gs2Attributes.CHANNEL_BINDING_REQUIRED, null)); + } catch(IllegalArgumentException e) { + fail("A null value is valid and cannot throw an IllegalArgumentException"); + } + } + + @Test + public void parseNullValue() { + assertNull(Gs2AttributeValue.parse(null)); + } + + @Test + public void parseIllegalValuesStructure() { + String[] values = new String[] { "", "as", "asdfjkl", Gs2Attributes.CHANNEL_BINDING_REQUIRED.getChar() + "=" }; + int n = 0; + for(String value : values) { + try { + assertNotNull(Gs2AttributeValue.parse(value)); + } catch(IllegalArgumentException e) { + n++; + } + } + + assertEquals("Not every illegal value thrown IllegalArgumentException", values.length, n); + } + + @Test + public void parseIllegalValuesInvalidGS2Attibute() { + String[] values = new String[] { "z=asdfasdf", "i=value" }; + + int n = 0; + for(String value : values) { + try { + assertNotNull(Gs2AttributeValue.parse(value)); + } catch(IllegalArgumentException e) { + n++; + } + } + + assertEquals("Not every illegal value thrown IllegalArgumentException", values.length, n); + } + + @Test + public void parseLegalValues() { + String[] values = new String[] { "n", "y", "p=value", "a=authzid" }; + for(String value : values) { + assertNotNull(Gs2AttributeValue.parse(value)); + } + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2HeaderTest.java b/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2HeaderTest.java new file mode 100644 index 0000000..a195ea8 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2HeaderTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.gssapi; + + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + + +public class Gs2HeaderTest { + private static final String[] VALID_GS2HEADER_STRINGS = new String[] { + "n,", "y,", "n,a=blah", "p=cb,", "p=cb,a=b" + }; + private static final Gs2Header[] VALID_GS_2_HEADERS = new Gs2Header[] { + new Gs2Header(Gs2CbindFlag.CLIENT_NOT), + new Gs2Header(Gs2CbindFlag.CLIENT_YES_SERVER_NOT), + new Gs2Header(Gs2CbindFlag.CLIENT_NOT, null, "blah"), + new Gs2Header(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, "cb"), + new Gs2Header(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, "cb", "b") + }; + + private void assertGS2Header(String expected, Gs2Header gs2Header) { + assertEquals(expected, gs2Header.writeTo(new StringBuffer()).toString()); + } + + @Test + public void constructorValid() { + for(int i = 0; i < VALID_GS2HEADER_STRINGS.length; i++) { + assertGS2Header(VALID_GS2HEADER_STRINGS[i], VALID_GS_2_HEADERS[i]); + } + } + + @Test(expected = IllegalArgumentException.class) + public void constructorInvalid1() { + new Gs2Header(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, null); + } + + @Test(expected = IllegalArgumentException.class) + public void constructorInvalid2() { + new Gs2Header(Gs2CbindFlag.CLIENT_NOT, "blah"); + } + + @Test(expected = IllegalArgumentException.class) + public void constructorInvalid3() { + new Gs2Header(Gs2CbindFlag.CLIENT_YES_SERVER_NOT, "blah"); + } + + @Test(expected = IllegalArgumentException.class) + public void constructorInvalid4() { + new Gs2Header(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, null, "b"); + } + + @Test + public void parseFromInvalid() { + String[] invalids = new String[] { "Z,", "n,Z=blah", "p,", "n=a," }; + int n = 0; + for(String invalid : invalids) { + try { + Gs2Header.parseFrom(invalid); + System.out.println(invalid); + } catch (IllegalArgumentException e) { + n++; + } + } + + assertEquals(invalids.length, n); + } + + @Test + public void parseFromValid() { + for(int i = 0; i < VALID_GS2HEADER_STRINGS.length; i++) { + assertGS2Header( + VALID_GS_2_HEADERS[i].writeTo(new StringBuffer()).toString(), + Gs2Header.parseFrom(VALID_GS2HEADER_STRINGS[i]) + ); + } + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ClientFinalMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/message/ClientFinalMessageTest.java new file mode 100644 index 0000000..10271ec --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/message/ClientFinalMessageTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import com.ongres.scram.common.RfcExampleSha1; +import com.ongres.scram.common.gssapi.Gs2CbindFlag; +import com.ongres.scram.common.gssapi.Gs2Header; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + + +public class ClientFinalMessageTest { + @Test + public void writeToWithoutProofValid() { + StringBuffer sb = ClientFinalMessage.writeToWithoutProof( + new Gs2Header(Gs2CbindFlag.CLIENT_NOT), null, RfcExampleSha1.FULL_NONCE + ); + + assertEquals(RfcExampleSha1.CLIENT_FINAL_MESSAGE_WITHOUT_PROOF, sb.toString()); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ClientFirstMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/message/ClientFirstMessageTest.java new file mode 100644 index 0000000..7df7c01 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/message/ClientFirstMessageTest.java @@ -0,0 +1,137 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.gssapi.Gs2CbindFlag; +import org.junit.Test; + +import static com.ongres.scram.common.RfcExampleSha1.CLIENT_NONCE; +import static org.junit.Assert.*; + + +public class ClientFirstMessageTest { + + @Test(expected = IllegalArgumentException.class) + public void constructorTestInvalid1() { + assertNotNull(new ClientFirstMessage(null, "a", CLIENT_NONCE)); + } + + @Test(expected = IllegalArgumentException.class) + public void constructorTestInvalid2() { + assertNotNull( + new ClientFirstMessage(Gs2CbindFlag.CLIENT_NOT, null, "cbind", "a", CLIENT_NONCE) + ); + } + + @Test(expected = IllegalArgumentException.class) + public void constructorTestInvalid3() { + assertNotNull( + new ClientFirstMessage(Gs2CbindFlag.CLIENT_YES_SERVER_NOT, null, "cbind", "a", CLIENT_NONCE) + ); + } + + @Test(expected = IllegalArgumentException.class) + public void constructorTestInvalid4() { + assertNotNull(new ClientFirstMessage(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, null, null, "a", CLIENT_NONCE)); + } + + @Test(expected = IllegalArgumentException.class) + public void constructorTestInvalid5() { + assertNotNull(new ClientFirstMessage(Gs2CbindFlag.CLIENT_NOT, "authzid", null, null, CLIENT_NONCE)); + } + + private void assertClientFirstMessage(String expected, ClientFirstMessage clientFirstMessage) { + assertEquals(expected, clientFirstMessage.writeTo(new StringBuffer()).toString()); + } + + @Test + public void writeToValidValues() { + assertClientFirstMessage( + "n,,n=user,r=" + CLIENT_NONCE, + new ClientFirstMessage("user", CLIENT_NONCE) + ); + assertClientFirstMessage( + "y,,n=user,r=" + CLIENT_NONCE, + new ClientFirstMessage(Gs2CbindFlag.CLIENT_YES_SERVER_NOT, null, null, "user", CLIENT_NONCE) + ); + assertClientFirstMessage( + "p=blah,,n=user,r=" + CLIENT_NONCE, + new ClientFirstMessage(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, null, "blah", "user", CLIENT_NONCE) + ); + assertClientFirstMessage( + "p=blah,a=authzid,n=user,r=" + CLIENT_NONCE, + new ClientFirstMessage(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, "authzid", "blah", "user", CLIENT_NONCE) + ); + } + + @Test + public void parseFromValidValues() throws ScramParseException { + ClientFirstMessage m1 = ClientFirstMessage.parseFrom("n,,n=user,r=" + CLIENT_NONCE); + assertTrue( + ! m1.isChannelBinding() && m1.getChannelBindingFlag() == Gs2CbindFlag.CLIENT_NOT + && null == m1.getAuthzid() && "user".equals(m1.getUser()) && CLIENT_NONCE.equals(m1.getNonce()) + ); + + ClientFirstMessage m2 = ClientFirstMessage.parseFrom("y,,n=user,r=" + CLIENT_NONCE); + assertTrue( + ! m2.isChannelBinding() && m2.getChannelBindingFlag() == Gs2CbindFlag.CLIENT_YES_SERVER_NOT + && null == m2.getAuthzid() && "user".equals(m2.getUser()) && CLIENT_NONCE.equals(m2.getNonce()) + ); + + ClientFirstMessage m3 = ClientFirstMessage.parseFrom("y,a=user2,n=user,r=" + CLIENT_NONCE); + assertTrue( + ! m3.isChannelBinding() && m3.getChannelBindingFlag() == Gs2CbindFlag.CLIENT_YES_SERVER_NOT + && null != m3.getAuthzid() && "user2".equals(m3.getAuthzid()) + && "user".equals(m3.getUser()) && CLIENT_NONCE.equals(m3.getNonce()) + ); + + ClientFirstMessage m4 = ClientFirstMessage.parseFrom("p=channel,a=user2,n=user,r=" + CLIENT_NONCE); + assertTrue( + m4.isChannelBinding() && m4.getChannelBindingFlag() == Gs2CbindFlag.CHANNEL_BINDING_REQUIRED + && null != m4.getChannelBindingName() && "channel".equals(m4.getChannelBindingName()) + && null != m4.getAuthzid() && "user2".equals(m4.getAuthzid()) + && "user".equals(m4.getUser()) && CLIENT_NONCE.equals(m4.getNonce()) + ); + } + + @Test + public void parseFromInvalidValues() { + String[] invalidValues = new String[] { + "n,,r=user,r=" + CLIENT_NONCE, "n,,z=user,r=" + CLIENT_NONCE, "n,,n=user", "n,", "n,,", "n,,n=user,r", "n,,n=user,r=" + }; + + int n = 0; + for(String s : invalidValues) { + try { + assertNotNull(ClientFirstMessage.parseFrom(s)); + } catch (ScramParseException e) { + n++; + } + } + + assertEquals(invalidValues.length, n); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ServerFinalMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/message/ServerFinalMessageTest.java new file mode 100644 index 0000000..a476b4a --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/message/ServerFinalMessageTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import com.ongres.scram.common.ScramAttributes; +import com.ongres.scram.common.ScramFunctions; +import com.ongres.scram.common.ScramMechanisms; +import com.ongres.scram.common.bouncycastle.base64.Base64; +import com.ongres.scram.common.exception.ScramParseException; +import com.ongres.scram.common.stringprep.StringPreparations; +import org.junit.Test; + +import static com.ongres.scram.common.RfcExampleSha1.*; +import static junit.framework.TestCase.assertFalse; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + + +public class ServerFinalMessageTest { + @Test + public void validConstructor() { + byte[] serverKey = ScramFunctions.serverKey( + ScramMechanisms.SCRAM_SHA_1, + StringPreparations.NO_PREPARATION, + PASSWORD, + Base64.decode(SERVER_SALT), + SERVER_ITERATIONS + ); + ServerFinalMessage serverFinalMessage1 = new ServerFinalMessage( + ScramFunctions.serverSignature(ScramMechanisms.SCRAM_SHA_1, serverKey, AUTH_MESSAGE) + ); + assertEquals(SERVER_FINAL_MESSAGE, serverFinalMessage1.toString()); + assertFalse(serverFinalMessage1.isError()); + + ServerFinalMessage serverFinalMessage2 = new ServerFinalMessage(ServerFinalMessage.Error.UNKNOWN_USER); + assertEquals(ScramAttributes.ERROR.getChar() + "=" + "unknown-user", serverFinalMessage2.toString()); + assertTrue(serverFinalMessage2.isError()); + } + + @Test + public void validParseFrom() throws ScramParseException { + ServerFinalMessage serverFinalMessage1 = ServerFinalMessage.parseFrom(SERVER_FINAL_MESSAGE); + assertEquals(SERVER_FINAL_MESSAGE, serverFinalMessage1.toString()); + assertFalse(serverFinalMessage1.isError()); + + ServerFinalMessage serverFinalMessage2 = ServerFinalMessage.parseFrom("e=channel-binding-not-supported"); + assertEquals("e=channel-binding-not-supported", serverFinalMessage2.toString()); + assertTrue(serverFinalMessage2.isError()); + assertTrue(serverFinalMessage2.getError() == ServerFinalMessage.Error.CHANNEL_BINDING_NOT_SUPPORTED); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ServerFirstMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/message/ServerFirstMessageTest.java new file mode 100644 index 0000000..1967777 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/message/ServerFirstMessageTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.message; + + +import com.ongres.scram.common.exception.ScramParseException; +import org.junit.Test; + +import static com.ongres.scram.common.RfcExampleSha1.CLIENT_NONCE; +import static com.ongres.scram.common.RfcExampleSha1.SERVER_FIRST_MESSAGE; +import static org.junit.Assert.assertEquals; + + +public class ServerFirstMessageTest { + @Test + public void validConstructor() { + ServerFirstMessage serverFirstMessage = new ServerFirstMessage( + CLIENT_NONCE, + "3rfcNHYJY1ZVvWVs7j", + "QSXCR+Q6sek8bf92", + 4096 + ); + + assertEquals(SERVER_FIRST_MESSAGE, serverFirstMessage.toString()); + } + + @Test + public void validParseFrom() throws ScramParseException { + ServerFirstMessage serverFirstMessage = ServerFirstMessage.parseFrom(SERVER_FIRST_MESSAGE, CLIENT_NONCE); + + assertEquals(SERVER_FIRST_MESSAGE, serverFirstMessage.toString()); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/stringprep/SaslPrepTest.java b/scram-common/src/test/java/com/ongres/scram/common/stringprep/SaslPrepTest.java new file mode 100644 index 0000000..4d32f86 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/stringprep/SaslPrepTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.stringprep; + +import com.ongres.saslprep.SaslPrep; +import com.ongres.stringprep.StringPrep; +import java.io.IOException; + +import org.junit.Assert; +import org.junit.Test; + +public class SaslPrepTest { + + @Test + public void rfc4013Examples() throws IOException { + // Taken from https://tools.ietf.org/html/rfc4013#section-3 + Assert.assertEquals("IX", SaslPrep.saslPrep("I\u00ADX", true)); + Assert.assertEquals("user", SaslPrep.saslPrep("user", true)); + Assert.assertEquals("USER", SaslPrep.saslPrep("USER", true)); + Assert.assertEquals("a", SaslPrep.saslPrep("\u00AA", true)); + Assert.assertEquals("IX", SaslPrep.saslPrep("\u2168", true)); + try { + SaslPrep.saslPrep("\u0007", true); + Assert.fail("Should throw IllegalArgumentException"); + } catch (IllegalArgumentException e) { + Assert.assertEquals("Prohibited character ", e.getMessage()); + } + try { + SaslPrep.saslPrep("\u0627\u0031", true); + Assert.fail("Should thow IllegalArgumentException"); + } catch (IllegalArgumentException e) { + Assert.assertEquals("The string contains any RandALCat character but a RandALCat character " + + "is not the first and the last characters", e.getMessage()); + } + } + + @Test + public void unassigned() throws IOException { + int unassignedCodepoint; + for (unassignedCodepoint = Character.MAX_CODE_POINT; + unassignedCodepoint >= Character.MIN_CODE_POINT; + unassignedCodepoint--) { + if (!Character.isDefined(unassignedCodepoint) && + !StringPrep.prohibitionAsciiControl(unassignedCodepoint) && + !StringPrep.prohibitionAsciiSpace(unassignedCodepoint) && + !StringPrep.prohibitionChangeDisplayProperties(unassignedCodepoint) && + !StringPrep.prohibitionInappropriateCanonicalRepresentation(unassignedCodepoint) && + !StringPrep.prohibitionInappropriatePlainText(unassignedCodepoint) && + !StringPrep.prohibitionNonAsciiControl(unassignedCodepoint) && + !StringPrep.prohibitionNonAsciiSpace(unassignedCodepoint) && + !StringPrep.prohibitionNonCharacterCodePoints(unassignedCodepoint) && + !StringPrep.prohibitionPrivateUse(unassignedCodepoint) && + !StringPrep.prohibitionSurrogateCodes(unassignedCodepoint) && + !StringPrep.prohibitionTaggingCharacters(unassignedCodepoint)) { + break; + } + } + String withUnassignedChar = "abc"+new String(Character.toChars(unassignedCodepoint)); + //Assert.assertEquals(withUnassignedChar, saslPrepQuery(withUnassignedChar)); + try { + SaslPrep.saslPrep(withUnassignedChar, true); + Assert.fail("Should thow IllegalArgumentException"); + } catch (IllegalArgumentException e) { + Assert.assertEquals("Prohibited character 󯿽", e.getMessage()); + } + } +} \ No newline at end of file diff --git a/scram-common/src/test/java/com/ongres/scram/common/stringprep/StringPreparationTest.java b/scram-common/src/test/java/com/ongres/scram/common/stringprep/StringPreparationTest.java new file mode 100644 index 0000000..ed4c432 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/stringprep/StringPreparationTest.java @@ -0,0 +1,155 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.stringprep; + + +import org.junit.Test; + +import java.util.Random; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + + +public class StringPreparationTest { + private static final String[] ONLY_NON_PRINTABLE_STRINGS = new String[] { " ", (char) 13 + "", (char) 13 + " " }; + + @Test + public void doNormalizeNullEmpty() { + String[] nullEmpty = new String[] { null, "" }; + int n = 0; + for(StringPreparation stringPreparation : StringPreparations.values()) { + for(String s : nullEmpty) { + try { + stringPreparation.normalize(s); + } catch (IllegalArgumentException e) { + n++; + } + } + } + + assertTrue( + "IllegalArgumentException not thrown for either null or empty input", + n == nullEmpty.length * StringPreparations.values().length + ); + } + + @Test + public void doNormalizeValidAsciiCases() { + // 200 usernames from http://jimpix.co.uk/words/random-username-list.asp + String[] validAsciiUsernames = new String[] { + "toastingxenotime", "infecttolerant", "cobblerjack", "zekedigital", "freshscarisdale", "lamwaylon", + "lagopodousmonkeys", "fanfarecheesy", "willowfinnegan", "canoeamoeba", "stinkeroddball", "terracecomet", + "cakebrazos", "headersidesaddle", "cloudultracrepidarian", "grimegastropub", "stallchilli", + "shawnapentagon", "chapeltarp", "rydbergninja", "differencegym", "europiummuscle", "swilledonce", + "defensivesyntaxis", "desktopredundant", "stakingsky", "goofywaiting", "boundsemm", "pipermonstrous", + "faintfrog", "riskinsist", "constantjunkie", "rejectbroth", "ceilbeau", "ponyjaialai", "burnishselfies", + "unamusedglenmore", "parmesanporcupine", "suteconcerto", "ribstony", "sassytwelve", "coursesnasturtium", + "singlecinders", "kinkben", "chiefpussface", "unknownivery", "robterra", "wearycubes", "bearcontent", + "aquifertrip", "insulinlick", "batterypeace", "rubigloo", "fixessnizort", "coalorecheesy", "logodarthvader", + "equipmentbizarre", "charitycolne", "gradecomputer", "incrediblegases", "ingotflyingfish", "abaftmounting", + "kissingfluke", "chesterdinky", "anthropicdip", "portalcairo", "purebredhighjump", "jamaicansteeping", + "skaterscoins", "chondrulelocust", "modespretty", "otisnadrid", "lagoonone", "arrivepayday", "lawfulpatsy", + "customersdeleted", "superiorarod", "abackwarped", "footballcyclic", "sawtshortstop", "waskerleysanidine", + "polythenehead", "carpacciosierra", "gnashgabcheviot", "plunkarnisdale", "surfacebased", "wickedpark", + "capitalistivan", "kinglassmuse", "adultsceiriog", "medrones", "climaxshops", "archeangolfer", "tomfront", + "kobeshift", "nettleaugustus", "bitesizedlion", "crickedbunting", "englishrichard", "dangerousdelmonico", + "sparklemicrosoft", "kneepadsfold", "enunciatesunglasses", "parchmentsteak", "meigpiton", "puttingcitrusy", + "eyehash", "newtonatomiser", "witchesburberry", "positionwobbly", "clipboardamber", "ricolobster", + "calendarpetal", "shinywound", "dealemral", "moonrakerfinnish", "banditliberated", "whippedfanatical", + "jargongreasy", "yumlayla", "dwarfismtransition", "doleriteduce", "sikickball", + "columngymnastics", "draybowmont", "jupitersnorkling", "siderealmolding", "dowdyrosary", "novaskeeter", + "whickerpulley", "rutlandsliders", "categoryflossed", "coiltiedogfish", "brandwaren", "altairlatigo", + "acruxyouthscape", "harmonicdash", "jasperserver", "slicedaggie", "gravityfern", "bitsstorm", + "readymadehobby", "surfeitgrape", "pantheonslabs", "ammandecent", "skicrackers", "speyfashions", + "languagedeeno", "pettyconfit", "minutesshimmering", "thinhopeangellist", "sleevelesscadmium", "controlarc", + "robinvolvox", "postboxskylark", "tortepleasing", "lutzdillinger", "amnioteperl", "burntmaximize", + "gamblingearn", "bumsouch", "coronagraphdown", "bodgeelearning", "hackingscraper", "hartterbium", + "mindyurgonian", "leidlebalki", "labelthumbs", "lincolncrisps", "pearhamster", "termsfiona", + "tickingsomber", "hatellynfi", "northumberlandgrotesque", "harpistcaramel", "gentryswiss", "illusionnooks", + "easilyrows", "highgluten", "backedallegiance", "laelsitesearch", "methodfix", "teethminstral", + "chemicalchildish", "likablepace", "alikealeph", "nalasincere", "investbaroque", "conditionenvelope", + "splintsmccue", "carnonprompt", "resultharvey", "acceptsheba", "redditmonsoon", "multiplepostbox", + "invitationchurch", "drinksgaliath", "ordersvivid", "mugsgit", "clumpingfreak" + }; + + for(StringPreparation stringPreparation : StringPreparations.values()) { + for(String s : validAsciiUsernames) { + assertEquals(s, stringPreparation.normalize(s)); + } + } + } + + /* + * Some simple random testing won't hurt. If a test would fail, create new test with the generated word. + */ + @Test + public void doNormalizeValidAsciiRandom() { + int n = 10 * 1000; + int maxLenght = 64; + Random random = new Random(); + String[] values = new String[n]; + for(int i = 0; i < n; i++) { + char[] charValue = new char[random.nextInt(maxLenght) + 1]; + for(int j = 0; j < charValue.length; j++) { + charValue[j] = (char) (random.nextInt(127 - 33) + 33); + } + values[i] = new String(charValue); + } + + for(StringPreparation stringPreparation : StringPreparations.values()) { + for(String s : values) { + assertEquals( + "'" + s + "' is a printable ASCII string, should not be changed by normalize()", + s, + stringPreparation.normalize(s) + ); + } + } + } + + @Test + public void doNormalizeNoPreparationEmptyAfterNormalization() { + int n = 0; + for(String s : ONLY_NON_PRINTABLE_STRINGS) { + try { + StringPreparations.NO_PREPARATION.normalize(s); + } catch (IllegalArgumentException e) { + n++; + } + } + + assertTrue( + "IllegalArgumentException not thrown for either null or empty output after normalization", + n == ONLY_NON_PRINTABLE_STRINGS.length + ); + } + + @Test + public void doNormalizeNoPreparationNonEmptyAfterNormalization() { + // No exception should be thrown + for(String s : ONLY_NON_PRINTABLE_STRINGS) { + StringPreparations.NO_PREPARATION.normalize(s + "a"); + } + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/AbstractCharAttributeValueTest.java b/scram-common/src/test/java/com/ongres/scram/common/util/AbstractCharAttributeValueTest.java new file mode 100644 index 0000000..b5b6204 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/util/AbstractCharAttributeValueTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import org.junit.Test; + +import static org.junit.Assert.*; + + +public class AbstractCharAttributeValueTest { + private class MockCharAttribute implements CharAttribute { + private final char c; + + public MockCharAttribute(char c) { + this.c = c; + } + + @Override + public char getChar() { + return c; + } + } + + @Test + public void constructorNullAttribute() { + try { + assertNotNull(new AbstractCharAttributeValue((CharAttribute) null, "value")); + } catch(IllegalArgumentException e) { + return; + } + + fail("IllegalArgumentException must be thrown if the CharAttribute is null"); + } + + @Test + public void constructorEmptyValue() { + try { + assertNotNull(new AbstractCharAttributeValue(new MockCharAttribute('c'), "")); + } catch(IllegalArgumentException e) { + return; + } + + fail("IllegalArgumentException must be thrown if the value is empty"); + } + + @Test + public void writeToNonNullValues() { + String[] legalValues = new String[] { "a", "----", "value" }; + char c = 'c'; + for(String s : legalValues) { + assertEquals( + "" + c + '=' + s, + new AbstractCharAttributeValue(new MockCharAttribute(c), s).toString() + ); + } + } + + @Test + public void writeToNullValue() { + char c = 'd'; + assertEquals( + "" + c, + new AbstractCharAttributeValue(new MockCharAttribute(c), null).toString() + ); + } + + @Test + public void writeToEscapedValues() { + char c = 'a'; + MockCharAttribute mockCharAttribute = new MockCharAttribute(c); + String[] values = new String[] { "a=b", "c,a", ",", "=,", "=,,=" }; + for(int i = 0; i < values.length; i++) { + assertEquals( + "" + c + '=' + values[i], + new AbstractCharAttributeValue(mockCharAttribute, values[i]).toString() + ); + } + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/Base64Test.java b/scram-common/src/test/java/com/ongres/scram/common/util/Base64Test.java new file mode 100644 index 0000000..3431061 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/util/Base64Test.java @@ -0,0 +1,51 @@ +/* + * Copyright 2019, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package com.ongres.scram.common.util; + +import java.nio.charset.StandardCharsets; + +import org.junit.Assert; +import org.junit.Test; + +import com.ongres.scram.common.bouncycastle.base64.Base64; + +public class Base64Test { + + @Test + public void rfcTest() { + Assert.assertEquals("", new String(Base64.decode(""), StandardCharsets.UTF_8)); + Assert.assertEquals("f", new String(Base64.decode("Zg=="), StandardCharsets.UTF_8)); + Assert.assertEquals("fo", new String(Base64.decode("Zm8="), StandardCharsets.UTF_8)); + Assert.assertEquals("foo", new String(Base64.decode("Zm9v"), StandardCharsets.UTF_8)); + Assert.assertEquals("foob", new String(Base64.decode("Zm9vYg=="), StandardCharsets.UTF_8)); + Assert.assertEquals("fooba", new String(Base64.decode("Zm9vYmE="), StandardCharsets.UTF_8)); + Assert.assertEquals("foobar", new String(Base64.decode("Zm9vYmFy"), StandardCharsets.UTF_8)); + Assert.assertEquals("", Base64.toBase64String("".getBytes(StandardCharsets.UTF_8))); + Assert.assertEquals("Zg==", Base64.toBase64String("f".getBytes(StandardCharsets.UTF_8))); + Assert.assertEquals("Zm8=", Base64.toBase64String("fo".getBytes(StandardCharsets.UTF_8))); + Assert.assertEquals("Zm9v", Base64.toBase64String("foo".getBytes(StandardCharsets.UTF_8))); + Assert.assertEquals("Zm9vYg==", Base64.toBase64String("foob".getBytes(StandardCharsets.UTF_8))); + Assert.assertEquals("Zm9vYmE=", Base64.toBase64String("fooba".getBytes(StandardCharsets.UTF_8))); + Assert.assertEquals("Zm9vYmFy", Base64.toBase64String("foobar".getBytes(StandardCharsets.UTF_8))); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/CryptoUtilTest.java b/scram-common/src/test/java/com/ongres/scram/common/util/CryptoUtilTest.java new file mode 100644 index 0000000..b21b0d0 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/util/CryptoUtilTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import org.junit.Test; + +import java.security.SecureRandom; +import java.util.Random; + +import static org.junit.Assert.fail; + + +public class CryptoUtilTest { + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + @Test(expected = IllegalArgumentException.class) + public void nonceInvalidSize1() { + CryptoUtil.nonce(0, SECURE_RANDOM); + } + + @Test(expected = IllegalArgumentException.class) + public void nonceInvalidSize2() { + CryptoUtil.nonce(-1, SECURE_RANDOM); + } + + @Test + public void nonceValid() { + int nNonces = 1000; + int nonceMaxSize = 100; + Random random = new Random(); + + // Some more random testing + for(int i = 0; i < nNonces; i++) { + for(char c : CryptoUtil.nonce(random.nextInt(nonceMaxSize) + 1, SECURE_RANDOM).toCharArray()) { + if(c == ',' || c < (char) 33 || c > (char) 126) { + fail("Character c='" + c + "' is not allowed on a nonce"); + } + } + } + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/StringWritableCsvTest.java b/scram-common/src/test/java/com/ongres/scram/common/util/StringWritableCsvTest.java new file mode 100644 index 0000000..ed5c051 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/util/StringWritableCsvTest.java @@ -0,0 +1,146 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import com.ongres.scram.common.ScramAttributes; +import com.ongres.scram.common.ScramAttributeValue; +import com.ongres.scram.common.gssapi.Gs2AttributeValue; +import com.ongres.scram.common.gssapi.Gs2Attributes; +import org.junit.Test; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + + +public class StringWritableCsvTest { + private static final String[] ONE_ARG_VALUES = new String[] { "c=channel", "i=4096", "a=authzid", "n" }; + private static final String SEVERAL_VALUES_STRING = "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"; + + @Test + public void writeToNullOrEmpty() { + assertTrue(StringWritableCsv.writeTo(new StringBuffer()).length() == 0); + assertTrue(StringWritableCsv.writeTo(new StringBuffer(), new CharAttributeValue[]{}).length() == 0); + } + + @Test + public void writeToOneArg() { + CharAttributeValue[] pairs = new CharAttributeValue[] { + new ScramAttributeValue(ScramAttributes.CHANNEL_BINDING, "channel"), + new ScramAttributeValue(ScramAttributes.ITERATION, "" + 4096), + new Gs2AttributeValue(Gs2Attributes.AUTHZID, "authzid"), + new Gs2AttributeValue(Gs2Attributes.CLIENT_NOT, null) + }; + + for(int i = 0; i < pairs.length; i++) { + assertEquals(ONE_ARG_VALUES[i], StringWritableCsv.writeTo(new StringBuffer(), pairs[i]).toString()); + } + } + + @Test + public void writeToSeveralArgs() { + assertEquals( + SEVERAL_VALUES_STRING, + StringWritableCsv.writeTo( + new StringBuffer(), + new Gs2AttributeValue(Gs2Attributes.CLIENT_NOT, null), + null, + new ScramAttributeValue(ScramAttributes.USERNAME, "user"), + new ScramAttributeValue(ScramAttributes.NONCE, "fyko+d2lbbFgONRv9qkxdawL") + + ).toString() + ); + } + + @Test + public void parseFromEmpty() { + assertArrayEquals(new String[]{}, StringWritableCsv.parseFrom("")); + } + + @Test + public void parseFromOneArgWithLimitsOffsets() { + for(String s : ONE_ARG_VALUES) { + assertArrayEquals(new String[] {s}, StringWritableCsv.parseFrom(s)); + } + + int[] numberEntries = new int[] { 0, 1 }; + for(int n : numberEntries) { + for(String s : ONE_ARG_VALUES) { + assertArrayEquals(new String[] {s}, StringWritableCsv.parseFrom(s, n)); + } + } + for(String s : ONE_ARG_VALUES) { + assertArrayEquals(new String[] {s, null, null}, StringWritableCsv.parseFrom(s, 3)); + } + + for(int n : numberEntries) { + for(String s : ONE_ARG_VALUES) { + assertArrayEquals(new String[] {s}, StringWritableCsv.parseFrom(s, n, 0)); + } + } + for(String s : ONE_ARG_VALUES) { + assertArrayEquals(new String[] {s, null, null}, StringWritableCsv.parseFrom(s, 3, 0)); + } + + for(int n : numberEntries) { + for(String s : ONE_ARG_VALUES) { + assertArrayEquals(new String[] { null }, StringWritableCsv.parseFrom(s, n, 1)); + } + } + } + + @Test + public void parseFromSeveralArgsWithLimitsOffsets() { + assertArrayEquals( + new String[] { "n", "", "n=user", "r=fyko+d2lbbFgONRv9qkxdawL" }, + StringWritableCsv.parseFrom(SEVERAL_VALUES_STRING) + ); + + assertArrayEquals( + new String[] { "n", "" }, + StringWritableCsv.parseFrom(SEVERAL_VALUES_STRING, 2) + ); + + assertArrayEquals( + new String[] { "", "n=user" }, + StringWritableCsv.parseFrom(SEVERAL_VALUES_STRING, 2, 1) + ); + + assertArrayEquals( + new String[] { "r=fyko+d2lbbFgONRv9qkxdawL", null }, + StringWritableCsv.parseFrom(SEVERAL_VALUES_STRING, 2, 3) + ); + + assertArrayEquals( + new String[] { null, null }, + StringWritableCsv.parseFrom(SEVERAL_VALUES_STRING, 2, 4) + ); + + assertArrayEquals( + new String[] { "n", "", "n=user", "r=fyko+d2lbbFgONRv9qkxdawL", null }, + StringWritableCsv.parseFrom(SEVERAL_VALUES_STRING, 5) + ); + } +} diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/UsAsciiUtilsTest.java b/scram-common/src/test/java/com/ongres/scram/common/util/UsAsciiUtilsTest.java new file mode 100644 index 0000000..1c7b6a9 --- /dev/null +++ b/scram-common/src/test/java/com/ongres/scram/common/util/UsAsciiUtilsTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2017, OnGres. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the + * following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + + +package com.ongres.scram.common.util; + + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.*; + + +public class UsAsciiUtilsTest { + @Test + public void toPrintableNull() { + try { + UsAsciiUtils.toPrintable(null); + } catch(IllegalArgumentException ex) { + return; + } + + fail("Calling with null value must throw IllegalArgumentException"); + } + + @Test + public void toPrintableNonASCII() { + String[] nonASCIIStrings = new String[] { "abcdé", "ñ", "€", "Наташа", (char) 127 + "" }; + int n = 0; + for(String s : nonASCIIStrings) { + try { + UsAsciiUtils.toPrintable(s); + } catch(IllegalArgumentException ex) { + n++; + } + } + + assertTrue( + "String(s) with non-ASCII characters not throwing IllegalArgumentException", + n == nonASCIIStrings.length + ); + } + + @Test + public void toPrintableNonPrintable() { + String[] original = new String[] { " u ", "a" + (char) 12, (char) 0 + "ttt" + (char) 31 }; + String[] expected = new String[] { "u", "a", "ttt" }; + + for(int i = 0; i < original.length; i++) { + assertEquals("", expected[i], UsAsciiUtils.toPrintable(original[i])); + } + } + + @Test + public void toPrintableAllPrintable() { + List values = new ArrayList(); + values.addAll(Arrays.asList( + new String[] { (char) 33 + "", "user", "!", "-,.=?", (char) 126 + "" }) + ); + for(int c = 33; c < 127; c++) { + values.add("---" + (char) c + "---"); + } + + for(String s : values) { + assertEquals( + "All printable String '" + s + "' not returning the same value", + s, + UsAsciiUtils.toPrintable(s) + ); + } + } +} diff --git a/settings.gradle b/settings.gradle new file mode 100644 index 0000000..a7f7bd3 --- /dev/null +++ b/settings.gradle @@ -0,0 +1,35 @@ +pluginManagement { + repositories { + mavenLocal() + mavenCentral { + metadataSources { + mavenPom() + artifact() + ignoreGradleMetadataRedirection() + } + } + gradlePluginPortal() + } +} + +dependencyResolutionManagement { + versionCatalogs { + libs { + version('gradle', '8.5') + } + testLibs { + version('junit', '5.10.2') + library('junit-jupiter-api', 'org.junit.jupiter', 'junit-jupiter-api').versionRef('junit') + library('junit-jupiter-params', 'org.junit.jupiter', 'junit-jupiter-params').versionRef('junit') + library('junit-jupiter-engine', 'org.junit.jupiter', 'junit-jupiter-engine').versionRef('junit') + library('junit-jupiter-platform-launcher', 'org.junit.platform', 'junit-platform-launcher').version('1.10.1') + library('hamcrest', 'org.hamcrest', 'hamcrest-library').version('2.2') + } + } +} + +include 'pgjdbc' +include 'scram-common' +include 'scram-client' +include 'saslprep' +include 'stringprep' diff --git a/stringprep/src/main/java/com/ongres/stringprep/Option.java b/stringprep/src/main/java/com/ongres/stringprep/Option.java new file mode 100644 index 0000000..b14b215 --- /dev/null +++ b/stringprep/src/main/java/com/ongres/stringprep/Option.java @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2021 OnGres, Inc. + * SPDX-License-Identifier: BSD-2-Clause + */ + +package com.ongres.stringprep; + +/** + * Options to define a StringPrep profile. + * + * @since 2.0 + */ +public enum Option { + + /** + * B.1 Commonly mapped to nothing + */ + MAP_TO_NOTHING, + /** + * Any additional mapping tables specific to the profile. + */ + ADDITIONAL_MAPPING, + /** + * B.2 Mapping for case-folding used with NFKC. + */ + CASE_FOLD_NFKC, + /** + * B.3 Mapping for case-folding used with no normalization. + */ + CASE_FOLD_NO_NORMALIZATION, + /** + * Unicode normalization with form KC. + */ + NORMALIZE_KC, + /** + * Bidirectional tables. + */ + CHECK_BIDI, + /** + * Any additional characters that are prohibited as output specific to the profile. + */ + FORBID_ADDITIONAL_CHARACTERS, + /** + * C.1.1 ASCII space characters + */ + FORBID_ASCII_SPACES, + /** + * C.1.2 Non-ASCII space characters + */ + FORBID_NON_ASCII_SPACES, + /** + * C.2.1 ASCII control characters + */ + FORBID_ASCII_CONTROL, + /** + * C.2.2 Non-ASCII control characters + */ + FORBID_NON_ASCII_CONTROL, + /** + * C.3 Private use + */ + FORBID_PRIVATE_USE, + /** + * C.4 Non-character code points. + */ + FORBID_NON_CHARACTER, + /** + * C.5 Surrogate codes. + */ + FORBID_SURROGATE, + /** + * C.6 Inappropriate for plain text. + */ + FORBID_INAPPROPRIATE_FOR_PLAIN_TEXT, + /** + * C.7 Inappropriate for canonical representation + */ + FORBID_INAPPROPRIATE_FOR_CANON_REP, + /** + * C.8 Change display properties or are deprecated + */ + FORBID_CHANGE_DISPLAY_AND_DEPRECATED, + /** + * C.9 Tagging characters + */ + FORBID_TAGGING; + +} diff --git a/stringprep/src/main/java/com/ongres/stringprep/Profile.java b/stringprep/src/main/java/com/ongres/stringprep/Profile.java new file mode 100644 index 0000000..a443a52 --- /dev/null +++ b/stringprep/src/main/java/com/ongres/stringprep/Profile.java @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2021 OnGres, Inc. + * SPDX-License-Identifier: BSD-2-Clause + */ + +package com.ongres.stringprep; + +import java.util.Set; + +/** + * The {@code Profile} interface is used to define stringprep profiles in order to fully specify the + * processing options. + * + *

Stringprep profiles can also exclude characters that should not normally appear in text that + * is used in the protocol. The profile can prevent such characters by changing the characters to be + * excluded to other characters, by removing those characters, or by causing an error if the + * characters would appear in the output. + * + * @since 2.0 + */ +public interface Profile { + + /** + * Set of options used by the profile. + * + * @return profile options. + */ + Set

A profile with {@code @ProfileName("SASLprep")} can can be obtained by calling + * {@code Stringprep.getProvider("SASLprep")}. + */ +@Documented +@Retention(RUNTIME) +@Target(TYPE) +public @interface ProfileName { + + /** + * Define the Stringprep profile name as defined by the RFC. + * + * @return profile name + */ + String value(); +} diff --git a/stringprep/src/main/java/com/ongres/stringprep/Stringprep.java b/stringprep/src/main/java/com/ongres/stringprep/Stringprep.java new file mode 100644 index 0000000..5d9b066 --- /dev/null +++ b/stringprep/src/main/java/com/ongres/stringprep/Stringprep.java @@ -0,0 +1,275 @@ +/* + * Copyright (C) 2019 OnGres, Inc. + * SPDX-License-Identifier: BSD-2-Clause + */ + +package com.ongres.stringprep; + +import java.nio.CharBuffer; +import java.text.Normalizer; +import java.util.EnumSet; +import java.util.Locale; +import java.util.Objects; +import java.util.function.IntPredicate; + +/** + * Java implementation of StringPrep (RFC 3454). + * + * @see RFC 3454 + * @version 2.0 + */ +public final class Stringprep { + + private final Profile profile; + private final boolean mapToNothing; + private final boolean additionalMapping; + private final boolean caseFoldNfkc; + private final boolean caseFoldNoNormalization; + private final boolean normalizeKc; + private final boolean checkBidi; + private final boolean forbidAdditionalCharacters; + private final boolean forbidAsciiSpaces; + private final boolean forbidNonAsciiSpaces; + private final boolean forbidAsciiControl; + private final boolean forbidNonAsciiControl; + private final boolean forbidPrivateUse; + private final boolean forbidNonCharacter; + private final boolean forbidSurrogate; + private final boolean forbidInappropriatePlainText; + private final boolean forbidInappropriateCanonRep; + private final boolean forbidChangeDisplayDeprecated; + private final boolean forbidTagging; + private final boolean forbidUnassigned; + + /** + * Create a Stringprep instance based on a {@link Profile} {@link Option}s and the two different + * types of strings in typical protocols where internationalized strings are used: "stored + * strings" and "queries". + * + * @param profile includes the options to implement a Stringprep profile. + * @param storedStrings "stored strings" MUST NOT contain unassigned code points, "queries" MAY + * include them. + * @throws NullPointerException if {@code profile} is {@code null}. + */ + Stringprep(Profile profile, boolean storedStrings) { + Objects.requireNonNull(profile); + EnumSet

The steps for preparing strings are: + * + *

  1. Map -- For each character in the input, check if it has a mapping and, if so, replace + * it with its mapping. This is described in section 3. + * + *
  2. Normalize -- Possibly normalize the result of step 1 using Unicode normalization. This is + * described in section 4. + * + *
  3. Prohibit -- Check for any characters that are not allowed in the output. If any are found, + * return an error. This is described in section 5. + * + *
  4. Check bidi -- Possibly check for right-to-left characters, and if any are found, make sure + * that the whole string satisfies the requirements for bidirectional strings. If the string does + * not satisfy the requirements for bidirectional strings, return an error. This is described in + * section 6.
+ * + * @param string to prepare. + * + * @return The prepared string following the options of the profile. + * @throws IllegalArgumentException if there are prohibited or bidi characters depending on the + * profile used. + * @throws NullPointerException if {@code string} is {@code null}. + */ + char[] prepare(final char[] string) { + Objects.requireNonNull(string, "The string to prepare must not be null"); + if (string.length == 0) { + return string; + } + + char[] value = string.clone(); + + // 1) Map -- For each character in the input, check if it has a mapping + // and, if so, replace it with its mapping. + value = map(value); + + // 2) Normalize -- Possibly normalize the result of step 1 using Unicode + // normalization. + if (normalizeKc) { + value = Normalizer.normalize(CharBuffer.wrap(value), Normalizer.Form.NFKC).toCharArray(); + } + + boolean firstRandAlCat = Tables.bidirectionalPropertyRorAL(Character.codePointAt(value, 0)); + boolean lastRandAlCat = + Tables.bidirectionalPropertyRorAL(Character.codePointAt(value, value.length - 1)); + boolean containsRandAlCat = false; + boolean containsLcat = false; + int codePoint; + for (int i = 0; i < value.length; i += Character.charCount(codePoint)) { + codePoint = Character.codePointAt(value, i); + + // 3) Prohibit -- Check for any characters that are not allowed in the + // output. If any are found, return an error. + prohibitedOutput(codePoint); + + // 4) Check bidi -- Possibly check for right-to-left characters, and if + // any are found, make sure that the whole string satisfies the + // requirements for bidirectional strings. If the string does not + // satisfy the requirements for bidirectional strings, return an + // error. + if (checkBidi) { + // 1) The characters in section 5.8 MUST be prohibited. + checkProhibited(true, Tables::prohibitionChangeDisplayProperties, + codePoint, "Prohibited control character"); + + if (Tables.bidirectionalPropertyRorAL(codePoint)) { + containsRandAlCat = true; + } + if (Tables.bidirectionalPropertyL(codePoint)) { + containsLcat = true; + } + // 2) If a string contains any RandALCat character, + // the string MUST NOT contain any LCat character. + if (containsRandAlCat && containsLcat) { + throw new IllegalArgumentException("Prohibited string with RandALCat and LCat"); + } + // 3) If a string contains any RandALCat character, a RandALCat + // character MUST be the first character of the string, and a + // RandALCat character MUST be the last character of the string. + if (containsRandAlCat && !(firstRandAlCat && lastRandAlCat)) { + throw new IllegalArgumentException( + "RandALCat character is not the first and the last character"); + } + } + } + + return value; + } + + private char[] map(char[] string) { + final StringBuilder mapping = new StringBuilder(string.length); + for (int codePoint, i = 0; i < string.length; i += Character.charCount(codePoint)) { + codePoint = Character.codePointAt(string, i); + + if (mapToNothing && Tables.mapToNothing(codePoint)) { // NOPMD + // The following characters are simply deleted from the input (that is, + // they are mapped to nothing) because their presence or absence in + // protocol identifiers should not make two strings different. They are + // listed in Table B.1. + } else if (normalizeKc && caseFoldNfkc) { + // appendix B.2 is for profiles that also use Unicode + // normalization form KC + for (int cp : Tables.mapWithNfkc(codePoint)) { + mapping.appendCodePoint(cp); + } + } else if (!normalizeKc && caseFoldNoNormalization) { + // while appendix B.3 is for profiles that do + // not use Unicode normalization + for (int cp : Tables.mapWithoutNormalization(codePoint)) { + mapping.appendCodePoint(cp); + } + } else if (additionalMapping) { + // - Any additional mapping tables specific to the profile + for (int cp : profile.additionalMappingTable(codePoint)) { + mapping.appendCodePoint(cp); + } + } else { + mapping.appendCodePoint(codePoint); + } + } + + char[] arr = new char[mapping.length()]; + mapping.getChars(0, mapping.length(), arr, 0); + return arr; + } + + private void prohibitedOutput(int codePoint) { + // - Any additional characters that are prohibited as output specific to + // the profile + checkProhibited(forbidAdditionalCharacters, profile::prohibitedAdditionalCharacters, + codePoint, "Prohibited code point"); + + checkProhibited(forbidAsciiSpaces, Tables::prohibitionAsciiSpace, + codePoint, "Prohibited ASCII space"); + checkProhibited(forbidNonAsciiSpaces, Tables::prohibitionNonAsciiSpace, + codePoint, "Prohibited non-ASCII space"); + checkProhibited(forbidAsciiControl, Tables::prohibitionAsciiControl, + codePoint, "Prohibited ASCII control"); + checkProhibited(forbidNonAsciiControl, Tables::prohibitionNonAsciiControl, + codePoint, "Prohibited non-ASCII control"); + checkProhibited(forbidPrivateUse, Tables::prohibitionPrivateUse, + codePoint, "Prohibited private use character"); + checkProhibited(forbidNonCharacter, Tables::prohibitionNonCharacterCodePoints, + codePoint, "Prohibited non-character code point"); + checkProhibited(forbidSurrogate, Tables::prohibitionSurrogateCodes, + codePoint, "Prohibited surrogate code point"); + checkProhibited(forbidInappropriatePlainText, Tables::prohibitionInappropriatePlainText, + codePoint, "Prohibited plain text code point"); + checkProhibited(forbidInappropriateCanonRep, + Tables::prohibitionInappropriateCanonicalRepresentation, + codePoint, "Prohibited non-canonical code point"); + checkProhibited(forbidChangeDisplayDeprecated, Tables::prohibitionChangeDisplayProperties, + codePoint, "Prohibited control character"); + checkProhibited(forbidTagging, Tables::prohibitionTaggingCharacters, + codePoint, "Prohibited tagging character"); + + // All code points not assigned in the character repertoire named in a + // stringprep profile are called "unassigned code points". Stored + // strings using the profile MUST NOT contain any unassigned code + // points. Queries for matching strings MAY contain unassigned code + // points. + checkProhibited(forbidUnassigned, Tables::unassignedCodePoints, + codePoint, "Unassigned code point"); + } + + private static void checkProhibited(boolean forbid, IntPredicate tableCheck, int codePoint, + String msg) { + if (forbid && tableCheck.test(codePoint)) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "%s \"0x%04X\"", msg, codePoint)); + } + } + +} diff --git a/stringprep/src/main/java/com/ongres/stringprep/StringprepLocator.java b/stringprep/src/main/java/com/ongres/stringprep/StringprepLocator.java new file mode 100644 index 0000000..feb2f72 --- /dev/null +++ b/stringprep/src/main/java/com/ongres/stringprep/StringprepLocator.java @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2021 OnGres, Inc. + * SPDX-License-Identifier: BSD-2-Clause + */ + +package com.ongres.stringprep; + +import java.util.ServiceLoader; + +final class StringprepLocator { + + private StringprepLocator() { + // Internal utility class + } + + static Profile getProfile(String profileName) { + for (Profile profile : ServiceLoader.load(Profile.class)) { + ProfileName annotation = profile.getClass().getDeclaredAnnotation(ProfileName.class); + if (annotation != null && annotation.value().equals(profileName)) { + return profile; + } + } + return null; + } + +} diff --git a/stringprep/src/main/java/com/ongres/stringprep/Tables.java b/stringprep/src/main/java/com/ongres/stringprep/Tables.java new file mode 100644 index 0000000..2f071f4 --- /dev/null +++ b/stringprep/src/main/java/com/ongres/stringprep/Tables.java @@ -0,0 +1,5549 @@ +/* + * Copyright (C) 2019 OnGres, Inc. + * SPDX-License-Identifier: BSD-2-Clause + */ + +package com.ongres.stringprep; + +// AUTO-GENERATED FILE - DO NOT EDIT! + +/** + * Expose tables parsed from the StringPrep RFC-3454. + */ +public final class Tables { + + private Tables() { + throw new AssertionError(); + } + + /** + * Unassigned code points in Unicode 3.2. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is + * "Unassigned code points in Unicode 3.2". + * @see RFC 3454, Appendix A.1 + */ + public static boolean unassignedCodePoints(int codePoint) { + return codePoint == 0x0221 + || (codePoint >= 0x0234 && codePoint <= 0x024F) + || (codePoint >= 0x02AE && codePoint <= 0x02AF) + || (codePoint >= 0x02EF && codePoint <= 0x02FF) + || (codePoint >= 0x0350 && codePoint <= 0x035F) + || (codePoint >= 0x0370 && codePoint <= 0x0373) + || (codePoint >= 0x0376 && codePoint <= 0x0379) + || (codePoint >= 0x037B && codePoint <= 0x037D) + || (codePoint >= 0x037F && codePoint <= 0x0383) + || codePoint == 0x038B + || codePoint == 0x038D + || codePoint == 0x03A2 + || codePoint == 0x03CF + || (codePoint >= 0x03F7 && codePoint <= 0x03FF) + || codePoint == 0x0487 + || codePoint == 0x04CF + || (codePoint >= 0x04F6 && codePoint <= 0x04F7) + || (codePoint >= 0x04FA && codePoint <= 0x04FF) + || (codePoint >= 0x0510 && codePoint <= 0x0530) + || (codePoint >= 0x0557 && codePoint <= 0x0558) + || codePoint == 0x0560 + || codePoint == 0x0588 + || (codePoint >= 0x058B && codePoint <= 0x0590) + || codePoint == 0x05A2 + || codePoint == 0x05BA + || (codePoint >= 0x05C5 && codePoint <= 0x05CF) + || (codePoint >= 0x05EB && codePoint <= 0x05EF) + || (codePoint >= 0x05F5 && codePoint <= 0x060B) + || (codePoint >= 0x060D && codePoint <= 0x061A) + || (codePoint >= 0x061C && codePoint <= 0x061E) + || codePoint == 0x0620 + || (codePoint >= 0x063B && codePoint <= 0x063F) + || (codePoint >= 0x0656 && codePoint <= 0x065F) + || (codePoint >= 0x06EE && codePoint <= 0x06EF) + || codePoint == 0x06FF + || codePoint == 0x070E + || (codePoint >= 0x072D && codePoint <= 0x072F) + || (codePoint >= 0x074B && codePoint <= 0x077F) + || (codePoint >= 0x07B2 && codePoint <= 0x0900) + || codePoint == 0x0904 + || (codePoint >= 0x093A && codePoint <= 0x093B) + || (codePoint >= 0x094E && codePoint <= 0x094F) + || (codePoint >= 0x0955 && codePoint <= 0x0957) + || (codePoint >= 0x0971 && codePoint <= 0x0980) + || codePoint == 0x0984 + || (codePoint >= 0x098D && codePoint <= 0x098E) + || (codePoint >= 0x0991 && codePoint <= 0x0992) + || codePoint == 0x09A9 + || codePoint == 0x09B1 + || (codePoint >= 0x09B3 && codePoint <= 0x09B5) + || (codePoint >= 0x09BA && codePoint <= 0x09BB) + || codePoint == 0x09BD + || (codePoint >= 0x09C5 && codePoint <= 0x09C6) + || (codePoint >= 0x09C9 && codePoint <= 0x09CA) + || (codePoint >= 0x09CE && codePoint <= 0x09D6) + || (codePoint >= 0x09D8 && codePoint <= 0x09DB) + || codePoint == 0x09DE + || (codePoint >= 0x09E4 && codePoint <= 0x09E5) + || (codePoint >= 0x09FB && codePoint <= 0x0A01) + || (codePoint >= 0x0A03 && codePoint <= 0x0A04) + || (codePoint >= 0x0A0B && codePoint <= 0x0A0E) + || (codePoint >= 0x0A11 && codePoint <= 0x0A12) + || codePoint == 0x0A29 + || codePoint == 0x0A31 + || codePoint == 0x0A34 + || codePoint == 0x0A37 + || (codePoint >= 0x0A3A && codePoint <= 0x0A3B) + || codePoint == 0x0A3D + || (codePoint >= 0x0A43 && codePoint <= 0x0A46) + || (codePoint >= 0x0A49 && codePoint <= 0x0A4A) + || (codePoint >= 0x0A4E && codePoint <= 0x0A58) + || codePoint == 0x0A5D + || (codePoint >= 0x0A5F && codePoint <= 0x0A65) + || (codePoint >= 0x0A75 && codePoint <= 0x0A80) + || codePoint == 0x0A84 + || codePoint == 0x0A8C + || codePoint == 0x0A8E + || codePoint == 0x0A92 + || codePoint == 0x0AA9 + || codePoint == 0x0AB1 + || codePoint == 0x0AB4 + || (codePoint >= 0x0ABA && codePoint <= 0x0ABB) + || codePoint == 0x0AC6 + || codePoint == 0x0ACA + || (codePoint >= 0x0ACE && codePoint <= 0x0ACF) + || (codePoint >= 0x0AD1 && codePoint <= 0x0ADF) + || (codePoint >= 0x0AE1 && codePoint <= 0x0AE5) + || (codePoint >= 0x0AF0 && codePoint <= 0x0B00) + || codePoint == 0x0B04 + || (codePoint >= 0x0B0D && codePoint <= 0x0B0E) + || (codePoint >= 0x0B11 && codePoint <= 0x0B12) + || codePoint == 0x0B29 + || codePoint == 0x0B31 + || (codePoint >= 0x0B34 && codePoint <= 0x0B35) + || (codePoint >= 0x0B3A && codePoint <= 0x0B3B) + || (codePoint >= 0x0B44 && codePoint <= 0x0B46) + || (codePoint >= 0x0B49 && codePoint <= 0x0B4A) + || (codePoint >= 0x0B4E && codePoint <= 0x0B55) + || (codePoint >= 0x0B58 && codePoint <= 0x0B5B) + || codePoint == 0x0B5E + || (codePoint >= 0x0B62 && codePoint <= 0x0B65) + || (codePoint >= 0x0B71 && codePoint <= 0x0B81) + || codePoint == 0x0B84 + || (codePoint >= 0x0B8B && codePoint <= 0x0B8D) + || codePoint == 0x0B91 + || (codePoint >= 0x0B96 && codePoint <= 0x0B98) + || codePoint == 0x0B9B + || codePoint == 0x0B9D + || (codePoint >= 0x0BA0 && codePoint <= 0x0BA2) + || (codePoint >= 0x0BA5 && codePoint <= 0x0BA7) + || (codePoint >= 0x0BAB && codePoint <= 0x0BAD) + || codePoint == 0x0BB6 + || (codePoint >= 0x0BBA && codePoint <= 0x0BBD) + || (codePoint >= 0x0BC3 && codePoint <= 0x0BC5) + || codePoint == 0x0BC9 + || (codePoint >= 0x0BCE && codePoint <= 0x0BD6) + || (codePoint >= 0x0BD8 && codePoint <= 0x0BE6) + || (codePoint >= 0x0BF3 && codePoint <= 0x0C00) + || codePoint == 0x0C04 + || codePoint == 0x0C0D + || codePoint == 0x0C11 + || codePoint == 0x0C29 + || codePoint == 0x0C34 + || (codePoint >= 0x0C3A && codePoint <= 0x0C3D) + || codePoint == 0x0C45 + || codePoint == 0x0C49 + || (codePoint >= 0x0C4E && codePoint <= 0x0C54) + || (codePoint >= 0x0C57 && codePoint <= 0x0C5F) + || (codePoint >= 0x0C62 && codePoint <= 0x0C65) + || (codePoint >= 0x0C70 && codePoint <= 0x0C81) + || codePoint == 0x0C84 + || codePoint == 0x0C8D + || codePoint == 0x0C91 + || codePoint == 0x0CA9 + || codePoint == 0x0CB4 + || (codePoint >= 0x0CBA && codePoint <= 0x0CBD) + || codePoint == 0x0CC5 + || codePoint == 0x0CC9 + || (codePoint >= 0x0CCE && codePoint <= 0x0CD4) + || (codePoint >= 0x0CD7 && codePoint <= 0x0CDD) + || codePoint == 0x0CDF + || (codePoint >= 0x0CE2 && codePoint <= 0x0CE5) + || (codePoint >= 0x0CF0 && codePoint <= 0x0D01) + || codePoint == 0x0D04 + || codePoint == 0x0D0D + || codePoint == 0x0D11 + || codePoint == 0x0D29 + || (codePoint >= 0x0D3A && codePoint <= 0x0D3D) + || (codePoint >= 0x0D44 && codePoint <= 0x0D45) + || codePoint == 0x0D49 + || (codePoint >= 0x0D4E && codePoint <= 0x0D56) + || (codePoint >= 0x0D58 && codePoint <= 0x0D5F) + || (codePoint >= 0x0D62 && codePoint <= 0x0D65) + || (codePoint >= 0x0D70 && codePoint <= 0x0D81) + || codePoint == 0x0D84 + || (codePoint >= 0x0D97 && codePoint <= 0x0D99) + || codePoint == 0x0DB2 + || codePoint == 0x0DBC + || (codePoint >= 0x0DBE && codePoint <= 0x0DBF) + || (codePoint >= 0x0DC7 && codePoint <= 0x0DC9) + || (codePoint >= 0x0DCB && codePoint <= 0x0DCE) + || codePoint == 0x0DD5 + || codePoint == 0x0DD7 + || (codePoint >= 0x0DE0 && codePoint <= 0x0DF1) + || (codePoint >= 0x0DF5 && codePoint <= 0x0E00) + || (codePoint >= 0x0E3B && codePoint <= 0x0E3E) + || (codePoint >= 0x0E5C && codePoint <= 0x0E80) + || codePoint == 0x0E83 + || (codePoint >= 0x0E85 && codePoint <= 0x0E86) + || codePoint == 0x0E89 + || (codePoint >= 0x0E8B && codePoint <= 0x0E8C) + || (codePoint >= 0x0E8E && codePoint <= 0x0E93) + || codePoint == 0x0E98 + || codePoint == 0x0EA0 + || codePoint == 0x0EA4 + || codePoint == 0x0EA6 + || (codePoint >= 0x0EA8 && codePoint <= 0x0EA9) + || codePoint == 0x0EAC + || codePoint == 0x0EBA + || (codePoint >= 0x0EBE && codePoint <= 0x0EBF) + || codePoint == 0x0EC5 + || codePoint == 0x0EC7 + || (codePoint >= 0x0ECE && codePoint <= 0x0ECF) + || (codePoint >= 0x0EDA && codePoint <= 0x0EDB) + || (codePoint >= 0x0EDE && codePoint <= 0x0EFF) + || codePoint == 0x0F48 + || (codePoint >= 0x0F6B && codePoint <= 0x0F70) + || (codePoint >= 0x0F8C && codePoint <= 0x0F8F) + || codePoint == 0x0F98 + || codePoint == 0x0FBD + || (codePoint >= 0x0FCD && codePoint <= 0x0FCE) + || (codePoint >= 0x0FD0 && codePoint <= 0x0FFF) + || codePoint == 0x1022 + || codePoint == 0x1028 + || codePoint == 0x102B + || (codePoint >= 0x1033 && codePoint <= 0x1035) + || (codePoint >= 0x103A && codePoint <= 0x103F) + || (codePoint >= 0x105A && codePoint <= 0x109F) + || (codePoint >= 0x10C6 && codePoint <= 0x10CF) + || (codePoint >= 0x10F9 && codePoint <= 0x10FA) + || (codePoint >= 0x10FC && codePoint <= 0x10FF) + || (codePoint >= 0x115A && codePoint <= 0x115E) + || (codePoint >= 0x11A3 && codePoint <= 0x11A7) + || (codePoint >= 0x11FA && codePoint <= 0x11FF) + || codePoint == 0x1207 + || codePoint == 0x1247 + || codePoint == 0x1249 + || (codePoint >= 0x124E && codePoint <= 0x124F) + || codePoint == 0x1257 + || codePoint == 0x1259 + || (codePoint >= 0x125E && codePoint <= 0x125F) + || codePoint == 0x1287 + || codePoint == 0x1289 + || (codePoint >= 0x128E && codePoint <= 0x128F) + || codePoint == 0x12AF + || codePoint == 0x12B1 + || (codePoint >= 0x12B6 && codePoint <= 0x12B7) + || codePoint == 0x12BF + || codePoint == 0x12C1 + || (codePoint >= 0x12C6 && codePoint <= 0x12C7) + || codePoint == 0x12CF + || codePoint == 0x12D7 + || codePoint == 0x12EF + || codePoint == 0x130F + || codePoint == 0x1311 + || (codePoint >= 0x1316 && codePoint <= 0x1317) + || codePoint == 0x131F + || codePoint == 0x1347 + || (codePoint >= 0x135B && codePoint <= 0x1360) + || (codePoint >= 0x137D && codePoint <= 0x139F) + || (codePoint >= 0x13F5 && codePoint <= 0x1400) + || (codePoint >= 0x1677 && codePoint <= 0x167F) + || (codePoint >= 0x169D && codePoint <= 0x169F) + || (codePoint >= 0x16F1 && codePoint <= 0x16FF) + || codePoint == 0x170D + || (codePoint >= 0x1715 && codePoint <= 0x171F) + || (codePoint >= 0x1737 && codePoint <= 0x173F) + || (codePoint >= 0x1754 && codePoint <= 0x175F) + || codePoint == 0x176D + || codePoint == 0x1771 + || (codePoint >= 0x1774 && codePoint <= 0x177F) + || (codePoint >= 0x17DD && codePoint <= 0x17DF) + || (codePoint >= 0x17EA && codePoint <= 0x17FF) + || codePoint == 0x180F + || (codePoint >= 0x181A && codePoint <= 0x181F) + || (codePoint >= 0x1878 && codePoint <= 0x187F) + || (codePoint >= 0x18AA && codePoint <= 0x1DFF) + || (codePoint >= 0x1E9C && codePoint <= 0x1E9F) + || (codePoint >= 0x1EFA && codePoint <= 0x1EFF) + || (codePoint >= 0x1F16 && codePoint <= 0x1F17) + || (codePoint >= 0x1F1E && codePoint <= 0x1F1F) + || (codePoint >= 0x1F46 && codePoint <= 0x1F47) + || (codePoint >= 0x1F4E && codePoint <= 0x1F4F) + || codePoint == 0x1F58 + || codePoint == 0x1F5A + || codePoint == 0x1F5C + || codePoint == 0x1F5E + || (codePoint >= 0x1F7E && codePoint <= 0x1F7F) + || codePoint == 0x1FB5 + || codePoint == 0x1FC5 + || (codePoint >= 0x1FD4 && codePoint <= 0x1FD5) + || codePoint == 0x1FDC + || (codePoint >= 0x1FF0 && codePoint <= 0x1FF1) + || codePoint == 0x1FF5 + || codePoint == 0x1FFF + || (codePoint >= 0x2053 && codePoint <= 0x2056) + || (codePoint >= 0x2058 && codePoint <= 0x205E) + || (codePoint >= 0x2064 && codePoint <= 0x2069) + || (codePoint >= 0x2072 && codePoint <= 0x2073) + || (codePoint >= 0x208F && codePoint <= 0x209F) + || (codePoint >= 0x20B2 && codePoint <= 0x20CF) + || (codePoint >= 0x20EB && codePoint <= 0x20FF) + || (codePoint >= 0x213B && codePoint <= 0x213C) + || (codePoint >= 0x214C && codePoint <= 0x2152) + || (codePoint >= 0x2184 && codePoint <= 0x218F) + || (codePoint >= 0x23CF && codePoint <= 0x23FF) + || (codePoint >= 0x2427 && codePoint <= 0x243F) + || (codePoint >= 0x244B && codePoint <= 0x245F) + || codePoint == 0x24FF + || (codePoint >= 0x2614 && codePoint <= 0x2615) + || codePoint == 0x2618 + || (codePoint >= 0x267E && codePoint <= 0x267F) + || (codePoint >= 0x268A && codePoint <= 0x2700) + || codePoint == 0x2705 + || (codePoint >= 0x270A && codePoint <= 0x270B) + || codePoint == 0x2728 + || codePoint == 0x274C + || codePoint == 0x274E + || (codePoint >= 0x2753 && codePoint <= 0x2755) + || codePoint == 0x2757 + || (codePoint >= 0x275F && codePoint <= 0x2760) + || (codePoint >= 0x2795 && codePoint <= 0x2797) + || codePoint == 0x27B0 + || (codePoint >= 0x27BF && codePoint <= 0x27CF) + || (codePoint >= 0x27EC && codePoint <= 0x27EF) + || (codePoint >= 0x2B00 && codePoint <= 0x2E7F) + || codePoint == 0x2E9A + || (codePoint >= 0x2EF4 && codePoint <= 0x2EFF) + || (codePoint >= 0x2FD6 && codePoint <= 0x2FEF) + || (codePoint >= 0x2FFC && codePoint <= 0x2FFF) + || codePoint == 0x3040 + || (codePoint >= 0x3097 && codePoint <= 0x3098) + || (codePoint >= 0x3100 && codePoint <= 0x3104) + || (codePoint >= 0x312D && codePoint <= 0x3130) + || codePoint == 0x318F + || (codePoint >= 0x31B8 && codePoint <= 0x31EF) + || (codePoint >= 0x321D && codePoint <= 0x321F) + || (codePoint >= 0x3244 && codePoint <= 0x3250) + || (codePoint >= 0x327C && codePoint <= 0x327E) + || (codePoint >= 0x32CC && codePoint <= 0x32CF) + || codePoint == 0x32FF + || (codePoint >= 0x3377 && codePoint <= 0x337A) + || (codePoint >= 0x33DE && codePoint <= 0x33DF) + || codePoint == 0x33FF + || (codePoint >= 0x4DB6 && codePoint <= 0x4DFF) + || (codePoint >= 0x9FA6 && codePoint <= 0x9FFF) + || (codePoint >= 0xA48D && codePoint <= 0xA48F) + || (codePoint >= 0xA4C7 && codePoint <= 0xABFF) + || (codePoint >= 0xD7A4 && codePoint <= 0xD7FF) + || (codePoint >= 0xFA2E && codePoint <= 0xFA2F) + || (codePoint >= 0xFA6B && codePoint <= 0xFAFF) + || (codePoint >= 0xFB07 && codePoint <= 0xFB12) + || (codePoint >= 0xFB18 && codePoint <= 0xFB1C) + || codePoint == 0xFB37 + || codePoint == 0xFB3D + || codePoint == 0xFB3F + || codePoint == 0xFB42 + || codePoint == 0xFB45 + || (codePoint >= 0xFBB2 && codePoint <= 0xFBD2) + || (codePoint >= 0xFD40 && codePoint <= 0xFD4F) + || (codePoint >= 0xFD90 && codePoint <= 0xFD91) + || (codePoint >= 0xFDC8 && codePoint <= 0xFDCF) + || (codePoint >= 0xFDFD && codePoint <= 0xFDFF) + || (codePoint >= 0xFE10 && codePoint <= 0xFE1F) + || (codePoint >= 0xFE24 && codePoint <= 0xFE2F) + || (codePoint >= 0xFE47 && codePoint <= 0xFE48) + || codePoint == 0xFE53 + || codePoint == 0xFE67 + || (codePoint >= 0xFE6C && codePoint <= 0xFE6F) + || codePoint == 0xFE75 + || (codePoint >= 0xFEFD && codePoint <= 0xFEFE) + || codePoint == 0xFF00 + || (codePoint >= 0xFFBF && codePoint <= 0xFFC1) + || (codePoint >= 0xFFC8 && codePoint <= 0xFFC9) + || (codePoint >= 0xFFD0 && codePoint <= 0xFFD1) + || (codePoint >= 0xFFD8 && codePoint <= 0xFFD9) + || (codePoint >= 0xFFDD && codePoint <= 0xFFDF) + || codePoint == 0xFFE7 + || (codePoint >= 0xFFEF && codePoint <= 0xFFF8) + || (codePoint >= 0x10000 && codePoint <= 0x102FF) + || codePoint == 0x1031F + || (codePoint >= 0x10324 && codePoint <= 0x1032F) + || (codePoint >= 0x1034B && codePoint <= 0x103FF) + || (codePoint >= 0x10426 && codePoint <= 0x10427) + || (codePoint >= 0x1044E && codePoint <= 0x1CFFF) + || (codePoint >= 0x1D0F6 && codePoint <= 0x1D0FF) + || (codePoint >= 0x1D127 && codePoint <= 0x1D129) + || (codePoint >= 0x1D1DE && codePoint <= 0x1D3FF) + || codePoint == 0x1D455 + || codePoint == 0x1D49D + || (codePoint >= 0x1D4A0 && codePoint <= 0x1D4A1) + || (codePoint >= 0x1D4A3 && codePoint <= 0x1D4A4) + || (codePoint >= 0x1D4A7 && codePoint <= 0x1D4A8) + || codePoint == 0x1D4AD + || codePoint == 0x1D4BA + || codePoint == 0x1D4BC + || codePoint == 0x1D4C1 + || codePoint == 0x1D4C4 + || codePoint == 0x1D506 + || (codePoint >= 0x1D50B && codePoint <= 0x1D50C) + || codePoint == 0x1D515 + || codePoint == 0x1D51D + || codePoint == 0x1D53A + || codePoint == 0x1D53F + || codePoint == 0x1D545 + || (codePoint >= 0x1D547 && codePoint <= 0x1D549) + || codePoint == 0x1D551 + || (codePoint >= 0x1D6A4 && codePoint <= 0x1D6A7) + || (codePoint >= 0x1D7CA && codePoint <= 0x1D7CD) + || (codePoint >= 0x1D800 && codePoint <= 0x1FFFD) + || (codePoint >= 0x2A6D7 && codePoint <= 0x2F7FF) + || (codePoint >= 0x2FA1E && codePoint <= 0x2FFFD) + || (codePoint >= 0x30000 && codePoint <= 0x3FFFD) + || (codePoint >= 0x40000 && codePoint <= 0x4FFFD) + || (codePoint >= 0x50000 && codePoint <= 0x5FFFD) + || (codePoint >= 0x60000 && codePoint <= 0x6FFFD) + || (codePoint >= 0x70000 && codePoint <= 0x7FFFD) + || (codePoint >= 0x80000 && codePoint <= 0x8FFFD) + || (codePoint >= 0x90000 && codePoint <= 0x9FFFD) + || (codePoint >= 0xA0000 && codePoint <= 0xAFFFD) + || (codePoint >= 0xB0000 && codePoint <= 0xBFFFD) + || (codePoint >= 0xC0000 && codePoint <= 0xCFFFD) + || (codePoint >= 0xD0000 && codePoint <= 0xDFFFD) + || codePoint == 0xE0000 + || (codePoint >= 0xE0002 && codePoint <= 0xE001F) + || (codePoint >= 0xE0080 && codePoint <= 0xEFFFD) + + ; + } + + /** + * Commonly mapped to nothing. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "Commonly mapped to nothing". + * @see RFC 3454, Appendix B.1 + */ + public static boolean mapToNothing(int codePoint) { + return codePoint == 0x00AD + || codePoint == 0x034F + || codePoint == 0x1806 + || codePoint == 0x180B + || codePoint == 0x180C + || codePoint == 0x180D + || codePoint == 0x200B + || codePoint == 0x200C + || codePoint == 0x200D + || codePoint == 0x2060 + || codePoint == 0xFE00 + || codePoint == 0xFE01 + || codePoint == 0xFE02 + || codePoint == 0xFE03 + || codePoint == 0xFE04 + || codePoint == 0xFE05 + || codePoint == 0xFE06 + || codePoint == 0xFE07 + || codePoint == 0xFE08 + || codePoint == 0xFE09 + || codePoint == 0xFE0A + || codePoint == 0xFE0B + || codePoint == 0xFE0C + || codePoint == 0xFE0D + || codePoint == 0xFE0E + || codePoint == 0xFE0F + || codePoint == 0xFEFF + + ; + } + + /** + * Mapping for case-folding used with NFKC. + * + * @param codePoint the character (Unicode code point) to be mapped. + * @return Case-folding used with NFKC for the given {@code codePoint}. + * @see RFC 3454, Appendix B.2 + */ + public static int[] mapWithNfkc(int codePoint) { + switch (codePoint) { + case 0x00C5: + return new int[] {0x00E5}; + case 0x00C6: + return new int[] {0x00E6}; + case 0x00C3: + return new int[] {0x00E3}; + case 0x00C4: + return new int[] {0x00E4}; + case 0x00C9: + return new int[] {0x00E9}; + case 0x00C7: + return new int[] {0x00E7}; + case 0x00C8: + return new int[] {0x00E8}; + case 0x1D6D3: + return new int[] {0x03C3}; + case 0x00C1: + return new int[] {0x00E1}; + case 0x00C2: + return new int[] {0x00E2}; + case 0x00C0: + return new int[] {0x00E0}; + case 0x00D6: + return new int[] {0x00F6}; + case 0x00D4: + return new int[] {0x00F4}; + case 0x00D5: + return new int[] {0x00F5}; + case 0x00D8: + return new int[] {0x00F8}; + case 0x00D9: + return new int[] {0x00F9}; + case 0x1D4A2: + return new int[] {0x0067}; + case 0x1D6E4: + return new int[] {0x03B3}; + case 0x1D6E5: + return new int[] {0x03B4}; + case 0x1D6E2: + return new int[] {0x03B1}; + case 0x1D6E3: + return new int[] {0x03B2}; + case 0x00D2: + return new int[] {0x00F2}; + case 0x1D4A6: + return new int[] {0x006B}; + case 0x1D6E8: + return new int[] {0x03B7}; + case 0x00D3: + return new int[] {0x00F3}; + case 0x1D6E9: + return new int[] {0x03B8}; + case 0x00D0: + return new int[] {0x00F0}; + case 0x1D6E6: + return new int[] {0x03B5}; + case 0x00D1: + return new int[] {0x00F1}; + case 0x1D4A5: + return new int[] {0x006A}; + case 0x1D6E7: + return new int[] {0x03B6}; + case 0x00CE: + return new int[] {0x00EE}; + case 0x00CF: + return new int[] {0x00EF}; + case 0x00CC: + return new int[] {0x00EC}; + case 0x00CD: + return new int[] {0x00ED}; + case 0x1D4A9: + return new int[] {0x006E}; + case 0x1D6EA: + return new int[] {0x03B9}; + case 0x1D4AB: + return new int[] {0x0070}; + case 0x1D6ED: + return new int[] {0x03BC}; + case 0x1D4AC: + return new int[] {0x0071}; + case 0x1D6EE: + return new int[] {0x03BD}; + case 0x1D6EB: + return new int[] {0x03BA}; + case 0x1D4AA: + return new int[] {0x006F}; + case 0x1D6EC: + return new int[] {0x03BB}; + case 0x00CA: + return new int[] {0x00EA}; + case 0x1D4AF: + return new int[] {0x0074}; + case 0x00CB: + return new int[] {0x00EB}; + case 0x1D6EF: + return new int[] {0x03BE}; + case 0x1D4AE: + return new int[] {0x0073}; + case 0x33B5: + return new int[] {0x006E, 0x0076}; + case 0x33B4: + return new int[] {0x0070, 0x0076}; + case 0x1D6AF: + return new int[] {0x03B8}; + case 0x33B9: + return new int[] {0x006D, 0x0076}; + case 0x33B8: + return new int[] {0x006B, 0x0076}; + case 0x33B7: + return new int[] {0x006D, 0x0076}; + case 0x33B6: + return new int[] {0x03BC, 0x0076}; + case 0x1D6B1: + return new int[] {0x03BA}; + case 0x1D6B2: + return new int[] {0x03BB}; + case 0x1D6B0: + return new int[] {0x03B9}; + case 0x1D6B5: + return new int[] {0x03BE}; + case 0x1D6B6: + return new int[] {0x03BF}; + case 0x1D6B3: + return new int[] {0x03BC}; + case 0x1D6B4: + return new int[] {0x03BD}; + case 0x00DF: + return new int[] {0x0073, 0x0073}; + case 0x1D6B9: + return new int[] {0x03B8}; + case 0x33AC: + return new int[] {0x0067, 0x0070, 0x0061}; + case 0x00DD: + return new int[] {0x00FD}; + case 0x33AB: + return new int[] {0x006D, 0x0070, 0x0061}; + case 0x1D6B7: + return new int[] {0x03C0}; + case 0x00DE: + return new int[] {0x00FE}; + case 0x33AA: + return new int[] {0x006B, 0x0070, 0x0061}; + case 0x1D6B8: + return new int[] {0x03C1}; + case 0x1D6BA: + return new int[] {0x03C3}; + case 0x1D6BB: + return new int[] {0x03C4}; + case 0x33A9: + return new int[] {0x0070, 0x0061}; + case 0x00DB: + return new int[] {0x00FB}; + case 0x1D6BE: + return new int[] {0x03C7}; + case 0x00DC: + return new int[] {0x00FC}; + case 0x1D6BF: + return new int[] {0x03C8}; + case 0x1D6BC: + return new int[] {0x03C5}; + case 0x00DA: + return new int[] {0x00FA}; + case 0x1D6BD: + return new int[] {0x03C6}; + case 0x33C6: + return new int[] {0x0063, 0x2215, 0x006B, 0x0067}; + case 0x33C3: + return new int[] {0x0062, 0x0071}; + case 0x33C9: + return new int[] {0x0067, 0x0079}; + case 0x33C8: + return new int[] {0x0064, 0x0062}; + case 0x33C7: + return new int[] {0x0063, 0x006F, 0x002E}; + case 0x1D6C0: + return new int[] {0x03C9}; + case 0x33C1: + return new int[] {0x006D, 0x03C9}; + case 0x33C0: + return new int[] {0x006B, 0x03C9}; + case 0x33BE: + return new int[] {0x006B, 0x0077}; + case 0x33BD: + return new int[] {0x006D, 0x0077}; + case 0x33BC: + return new int[] {0x03BC, 0x0077}; + case 0x33BB: + return new int[] {0x006E, 0x0077}; + case 0x33BF: + return new int[] {0x006D, 0x0077}; + case 0x33BA: + return new int[] {0x0070, 0x0077}; + case 0x24B7: + return new int[] {0x24D1}; + case 0x24B6: + return new int[] {0x24D0}; + case 0x1D6A8: + return new int[] {0x03B1}; + case 0x1D6A9: + return new int[] {0x03B2}; + case 0x1D6AA: + return new int[] {0x03B3}; + case 0x1D6AD: + return new int[] {0x03B6}; + case 0x1D6AE: + return new int[] {0x03B7}; + case 0x1D6AB: + return new int[] {0x03B4}; + case 0x1D6AC: + return new int[] {0x03B5}; + case 0x24C4: + return new int[] {0x24DE}; + case 0x24C3: + return new int[] {0x24DD}; + case 0x1D49C: + return new int[] {0x0061}; + case 0x24C2: + return new int[] {0x24DC}; + case 0x24C1: + return new int[] {0x24DB}; + case 0x24C8: + return new int[] {0x24E2}; + case 0x1D49F: + return new int[] {0x0064}; + case 0x24C7: + return new int[] {0x24E1}; + case 0x24C6: + return new int[] {0x24E0}; + case 0x24C5: + return new int[] {0x24DF}; + case 0x1D49E: + return new int[] {0x0063}; + case 0x24C0: + return new int[] {0x24DA}; + case 0x24BC: + return new int[] {0x24D6}; + case 0x24BB: + return new int[] {0x24D5}; + case 0x24BA: + return new int[] {0x24D4}; + case 0x24BF: + return new int[] {0x24D9}; + case 0x24BE: + return new int[] {0x24D8}; + case 0x24BD: + return new int[] {0x24D7}; + case 0x24B9: + return new int[] {0x24D3}; + case 0x24B8: + return new int[] {0x24D2}; + case 0x00B5: + return new int[] {0x03BC}; + case 0x24CD: + return new int[] {0x24E7}; + case 0x24CC: + return new int[] {0x24E6}; + case 0x24CB: + return new int[] {0x24E5}; + case 0x24CA: + return new int[] {0x24E4}; + case 0x24CF: + return new int[] {0x24E9}; + case 0x24CE: + return new int[] {0x24E8}; + case 0x24C9: + return new int[] {0x24E3}; + case 0x054A: + return new int[] {0x057A}; + case 0x054B: + return new int[] {0x057B}; + case 0x1E10: + return new int[] {0x1E11}; + case 0x054E: + return new int[] {0x057E}; + case 0x054F: + return new int[] {0x057F}; + case 0x1E14: + return new int[] {0x1E15}; + case 0x054C: + return new int[] {0x057C}; + case 0x054D: + return new int[] {0x057D}; + case 0x1E12: + return new int[] {0x1E13}; + case 0x0549: + return new int[] {0x0579}; + case 0x0547: + return new int[] {0x0577}; + case 0x0548: + return new int[] {0x0578}; + case 0x1E0E: + return new int[] {0x1E0F}; + case 0x0541: + return new int[] {0x0571}; + case 0x0542: + return new int[] {0x0572}; + case 0x0540: + return new int[] {0x0570}; + case 0x0545: + return new int[] {0x0575}; + case 0x0546: + return new int[] {0x0576}; + case 0x1E0C: + return new int[] {0x1E0D}; + case 0x0543: + return new int[] {0x0573}; + case 0x0544: + return new int[] {0x0574}; + case 0x1E0A: + return new int[] {0x1E0B}; + case 0x1E06: + return new int[] {0x1E07}; + case 0x053F: + return new int[] {0x056F}; + case 0x1E04: + return new int[] {0x1E05}; + case 0x1E08: + return new int[] {0x1E09}; + case 0x1E20: + return new int[] {0x1E21}; + case 0x1E24: + return new int[] {0x1E25}; + case 0x1E22: + return new int[] {0x1E23}; + case 0x1E1E: + return new int[] {0x1E1F}; + case 0x0552: + return new int[] {0x0582}; + case 0x0553: + return new int[] {0x0583}; + case 0x0550: + return new int[] {0x0580}; + case 0x0551: + return new int[] {0x0581}; + case 0x0556: + return new int[] {0x0586}; + case 0x1E1C: + return new int[] {0x1E1D}; + case 0x0554: + return new int[] {0x0584}; + case 0x1E1A: + return new int[] {0x1E1B}; + case 0x0555: + return new int[] {0x0585}; + case 0x1E18: + return new int[] {0x1E19}; + case 0x1E16: + return new int[] {0x1E17}; + case 0x1E32: + return new int[] {0x1E33}; + case 0x1E30: + return new int[] {0x1E31}; + case 0x1E36: + return new int[] {0x1E37}; + case 0x1E34: + return new int[] {0x1E35}; + case 0x1E2A: + return new int[] {0x1E2B}; + case 0x1E2E: + return new int[] {0x1E2F}; + case 0x1E2C: + return new int[] {0x1E2D}; + case 0x1E28: + return new int[] {0x1E29}; + case 0x1E26: + return new int[] {0x1E27}; + case 0x1E42: + return new int[] {0x1E43}; + case 0x1E40: + return new int[] {0x1E41}; + case 0x1E46: + return new int[] {0x1E47}; + case 0x1E44: + return new int[] {0x1E45}; + case 0x1E3A: + return new int[] {0x1E3B}; + case 0x1E3E: + return new int[] {0x1E3F}; + case 0x1E3C: + return new int[] {0x1E3D}; + case 0x1E38: + return new int[] {0x1E39}; + case 0x33D7: + return new int[] {0x0070, 0x0068}; + case 0x050A: + return new int[] {0x050B}; + case 0x1D4D1: + return new int[] {0x0062}; + case 0x1D4D2: + return new int[] {0x0063}; + case 0x20A8: + return new int[] {0x0072, 0x0073}; + case 0x33D9: + return new int[] {0x0070, 0x0070, 0x006D}; + case 0x1D4D0: + return new int[] {0x0061}; + case 0x1D4D5: + return new int[] {0x0066}; + case 0x0506: + return new int[] {0x0507}; + case 0x1D4D6: + return new int[] {0x0067}; + case 0x1D4D3: + return new int[] {0x0064}; + case 0x0504: + return new int[] {0x0505}; + case 0x1D4D4: + return new int[] {0x0065}; + case 0x1D4D9: + return new int[] {0x006A}; + case 0x1D4D7: + return new int[] {0x0068}; + case 0x0508: + return new int[] {0x0509}; + case 0x1D4D8: + return new int[] {0x0069}; + case 0x33CE: + return new int[] {0x006B, 0x006D}; + case 0x33CD: + return new int[] {0x006B, 0x006B}; + case 0x1D4DA: + return new int[] {0x006B}; + case 0x0502: + return new int[] {0x0503}; + case 0x1D4DB: + return new int[] {0x006C}; + case 0x0500: + return new int[] {0x0501}; + case 0x1D4DE: + return new int[] {0x006F}; + case 0x1D4DF: + return new int[] {0x0070}; + case 0x1D4DC: + return new int[] {0x006D}; + case 0x1D4DD: + return new int[] {0x006E}; + case 0x33CB: + return new int[] {0x0068, 0x0070}; + case 0x1D4E2: + return new int[] {0x0073}; + case 0x1D4E3: + return new int[] {0x0074}; + case 0x1D4E0: + return new int[] {0x0071}; + case 0x1D4E1: + return new int[] {0x0072}; + case 0x1D4E6: + return new int[] {0x0077}; + case 0x1D4E7: + return new int[] {0x0078}; + case 0x1D4E4: + return new int[] {0x0075}; + case 0x1D4E5: + return new int[] {0x0076}; + case 0x1D4E8: + return new int[] {0x0079}; + case 0x1D4E9: + return new int[] {0x007A}; + case 0x33DD: + return new int[] {0x0077, 0x0062}; + case 0x050E: + return new int[] {0x050F}; + case 0x050C: + return new int[] {0x050D}; + case 0x33DC: + return new int[] {0x0073, 0x0076}; + case 0x33DA: + return new int[] {0x0070, 0x0072}; + case 0xFF30: + return new int[] {0xFF50}; + case 0xFF31: + return new int[] {0xFF51}; + case 0xFF32: + return new int[] {0xFF52}; + case 0x1D6F1: + return new int[] {0x03C0}; + case 0x1D4B0: + return new int[] {0x0075}; + case 0x1D6F2: + return new int[] {0x03C1}; + case 0x1D6F0: + return new int[] {0x03BF}; + case 0xFF37: + return new int[] {0xFF57}; + case 0x1D4B3: + return new int[] {0x0078}; + case 0x1D6F5: + return new int[] {0x03C4}; + case 0xFF38: + return new int[] {0xFF58}; + case 0x1D4B4: + return new int[] {0x0079}; + case 0x1D6F6: + return new int[] {0x03C5}; + case 0xFF39: + return new int[] {0xFF59}; + case 0x1D4B1: + return new int[] {0x0076}; + case 0x1D6F3: + return new int[] {0x03B8}; + case 0x1D4B2: + return new int[] {0x0077}; + case 0x1D6F4: + return new int[] {0x03C3}; + case 0xFF33: + return new int[] {0xFF53}; + case 0x1D6F9: + return new int[] {0x03C8}; + case 0xFF34: + return new int[] {0xFF54}; + case 0xFF35: + return new int[] {0xFF55}; + case 0x1D4B5: + return new int[] {0x007A}; + case 0x1D6F7: + return new int[] {0x03C6}; + case 0xFF36: + return new int[] {0xFF56}; + case 0x1D6F8: + return new int[] {0x03C7}; + case 0xFF3A: + return new int[] {0xFF5A}; + case 0x1D6FA: + return new int[] {0x03C9}; + case 0x053A: + return new int[] {0x056A}; + case 0xFF21: + return new int[] {0xFF41}; + case 0x053D: + return new int[] {0x056D}; + case 0x1E02: + return new int[] {0x1E03}; + case 0x053E: + return new int[] {0x056E}; + case 0x053B: + return new int[] {0x056B}; + case 0x1E00: + return new int[] {0x1E01}; + case 0x053C: + return new int[] {0x056C}; + case 0x0538: + return new int[] {0x0568}; + case 0xFF26: + return new int[] {0xFF46}; + case 0x0539: + return new int[] {0x0569}; + case 0xFF27: + return new int[] {0xFF47}; + case 0x0536: + return new int[] {0x0566}; + case 0xFF28: + return new int[] {0xFF48}; + case 0x0537: + return new int[] {0x0567}; + case 0xFF29: + return new int[] {0xFF49}; + case 0xFF22: + return new int[] {0xFF42}; + case 0xFF23: + return new int[] {0xFF43}; + case 0xFF24: + return new int[] {0xFF44}; + case 0xFF25: + return new int[] {0xFF45}; + case 0x0531: + return new int[] {0x0561}; + case 0xFF2A: + return new int[] {0xFF4A}; + case 0x0534: + return new int[] {0x0564}; + case 0x0535: + return new int[] {0x0565}; + case 0x0532: + return new int[] {0x0562}; + case 0x0533: + return new int[] {0x0563}; + case 0xFF2F: + return new int[] {0xFF4F}; + case 0xFF2B: + return new int[] {0xFF4B}; + case 0xFF2C: + return new int[] {0xFF4C}; + case 0xFF2D: + return new int[] {0xFF4D}; + case 0xFF2E: + return new int[] {0xFF4E}; + case 0x014E: + return new int[] {0x014F}; + case 0x1E97: + return new int[] {0x0074, 0x0308}; + case 0x1E98: + return new int[] {0x0077, 0x030A}; + case 0x014C: + return new int[] {0x014D}; + case 0x038E: + return new int[] {0x03CD}; + case 0x038F: + return new int[] {0x03CE}; + case 0x1E96: + return new int[] {0x0068, 0x0331}; + case 0x1E99: + return new int[] {0x0079, 0x030A}; + case 0x1E90: + return new int[] {0x1E91}; + case 0x1D63C: + return new int[] {0x0061}; + case 0x014A: + return new int[] {0x014B}; + case 0x038C: + return new int[] {0x03CC}; + case 0x1D63F: + return new int[] {0x0064}; + case 0x1E94: + return new int[] {0x1E95}; + case 0x038A: + return new int[] {0x03AF}; + case 0x1D63D: + return new int[] {0x0062}; + case 0x1E92: + return new int[] {0x1E93}; + case 0x1D63E: + return new int[] {0x0063}; + case 0x0145: + return new int[] {0x0146}; + case 0x0388: + return new int[] {0x03AD}; + case 0x0143: + return new int[] {0x0144}; + case 0x0386: + return new int[] {0x03AC}; + case 0x1E8E: + return new int[] {0x1E8F}; + case 0x0149: + return new int[] {0x02BC, 0x006E}; + case 0x1D640: + return new int[] {0x0065}; + case 0x0147: + return new int[] {0x0148}; + case 0x0389: + return new int[] {0x03AE}; + case 0x1D401: + return new int[] {0x0062}; + case 0x1D643: + return new int[] {0x0068}; + case 0x1D402: + return new int[] {0x0063}; + case 0x1D644: + return new int[] {0x0069}; + case 0x1D641: + return new int[] {0x0066}; + case 0x1D400: + return new int[] {0x0061}; + case 0x1D642: + return new int[] {0x0067}; + case 0x0141: + return new int[] {0x0142}; + case 0x1D405: + return new int[] {0x0066}; + case 0x1D647: + return new int[] {0x006C}; + case 0x1E8C: + return new int[] {0x1E8D}; + case 0x1D406: + return new int[] {0x0067}; + case 0x1D648: + return new int[] {0x006D}; + case 0x1D403: + return new int[] {0x0064}; + case 0x1D645: + return new int[] {0x006A}; + case 0x1E8A: + return new int[] {0x1E8B}; + case 0x1D404: + return new int[] {0x0065}; + case 0x1D646: + return new int[] {0x006B}; + case 0x1D409: + return new int[] {0x006A}; + case 0x039F: + return new int[] {0x03BF}; + case 0x1D407: + return new int[] {0x0068}; + case 0x1D649: + return new int[] {0x006E}; + case 0x015E: + return new int[] {0x015F}; + case 0x1D408: + return new int[] {0x0069}; + case 0x1D40A: + return new int[] {0x006B}; + case 0x1D64C: + return new int[] {0x0071}; + case 0x039A: + return new int[] {0x03BA}; + case 0x1D40B: + return new int[] {0x006C}; + case 0x1D64D: + return new int[] {0x0072}; + case 0x1D64A: + return new int[] {0x006F}; + case 0x1D64B: + return new int[] {0x0070}; + case 0x039D: + return new int[] {0x03BD}; + case 0x1D40E: + return new int[] {0x006F}; + case 0x015C: + return new int[] {0x015D}; + case 0x039E: + return new int[] {0x03BE}; + case 0x1D40F: + return new int[] {0x0070}; + case 0x039B: + return new int[] {0x03BB}; + case 0x1D40C: + return new int[] {0x006D}; + case 0x1D64E: + return new int[] {0x0073}; + case 0x015A: + return new int[] {0x015B}; + case 0x039C: + return new int[] {0x03BC}; + case 0x1D40D: + return new int[] {0x006E}; + case 0x1D64F: + return new int[] {0x0074}; + case 0x0156: + return new int[] {0x0157}; + case 0x0398: + return new int[] {0x03B8}; + case 0x0399: + return new int[] {0x03B9}; + case 0x0154: + return new int[] {0x0155}; + case 0x0396: + return new int[] {0x03B6}; + case 0x0397: + return new int[] {0x03B7}; + case 0x1D650: + return new int[] {0x0075}; + case 0x1D651: + return new int[] {0x0076}; + case 0x0158: + return new int[] {0x0159}; + case 0x0390: + return new int[] {0x03B9, 0x0308, 0x0301}; + case 0x1D412: + return new int[] {0x0073}; + case 0x1D654: + return new int[] {0x0079}; + case 0x0391: + return new int[] {0x03B1}; + case 0x1D413: + return new int[] {0x0074}; + case 0x1D655: + return new int[] {0x007A}; + case 0x1D410: + return new int[] {0x0071}; + case 0x1D652: + return new int[] {0x0077}; + case 0x1D411: + return new int[] {0x0072}; + case 0x1D653: + return new int[] {0x0078}; + case 0x0152: + return new int[] {0x0153}; + case 0x0394: + return new int[] {0x03B4}; + case 0x1D416: + return new int[] {0x0077}; + case 0x0395: + return new int[] {0x03B5}; + case 0x1D417: + return new int[] {0x0078}; + case 0x0150: + return new int[] {0x0151}; + case 0x0392: + return new int[] {0x03B2}; + case 0x1E9A: + return new int[] {0x0061, 0x02BE}; + case 0x1D414: + return new int[] {0x0075}; + case 0x0393: + return new int[] {0x03B3}; + case 0x1E9B: + return new int[] {0x1E61}; + case 0x1D415: + return new int[] {0x0076}; + case 0x210D: + return new int[] {0x0068}; + case 0x1D618: + return new int[] {0x0071}; + case 0x210C: + return new int[] {0x0068}; + case 0x1D619: + return new int[] {0x0072}; + case 0x016E: + return new int[] {0x016F}; + case 0x210B: + return new int[] {0x0068}; + case 0x1D616: + return new int[] {0x006F}; + case 0x1D617: + return new int[] {0x0070}; + case 0x1D61A: + return new int[] {0x0073}; + case 0x2109: + return new int[] {0x00B0, 0x0066}; + case 0x016C: + return new int[] {0x016D}; + case 0x1D61D: + return new int[] {0x0076}; + case 0x1D61E: + return new int[] {0x0077}; + case 0x016A: + return new int[] {0x016B}; + case 0x1D61B: + return new int[] {0x0074}; + case 0x1D61C: + return new int[] {0x0075}; + case 0x0168: + return new int[] {0x0169}; + case 0x2103: + return new int[] {0x00B0, 0x0063}; + case 0x2102: + return new int[] {0x0063}; + case 0x1D61F: + return new int[] {0x0078}; + case 0x0166: + return new int[] {0x0167}; + case 0x2107: + return new int[] {0x025B}; + case 0x1D621: + return new int[] {0x007A}; + case 0x0160: + return new int[] {0x0161}; + case 0x1D620: + return new int[] {0x0079}; + case 0x0164: + return new int[] {0x0165}; + case 0x0162: + return new int[] {0x0163}; + case 0x211D: + return new int[] {0x0072}; + case 0x017F: + return new int[] {0x0073}; + case 0x211C: + return new int[] {0x0072}; + case 0x211B: + return new int[] {0x0072}; + case 0x017D: + return new int[] {0x017E}; + case 0x211A: + return new int[] {0x0071}; + case 0x017B: + return new int[] {0x017C}; + case 0x0178: + return new int[] {0x00FF}; + case 0x2115: + return new int[] {0x006E}; + case 0x0179: + return new int[] {0x017A}; + case 0x0176: + return new int[] {0x0177}; + case 0x2112: + return new int[] {0x006C}; + case 0x2119: + return new int[] {0x0070}; + case 0x2116: + return new int[] {0x006E, 0x006F}; + case 0x0170: + return new int[] {0x0171}; + case 0x0174: + return new int[] {0x0175}; + case 0x2111: + return new int[] {0x0069}; + case 0x2110: + return new int[] {0x0069}; + case 0x0172: + return new int[] {0x0173}; + case 0x010A: + return new int[] {0x010B}; + case 0x1E54: + return new int[] {0x1E55}; + case 0xFB13: + return new int[] {0x0574, 0x0576}; + case 0x1E52: + return new int[] {0x1E53}; + case 0xFB14: + return new int[] {0x0574, 0x0565}; + case 0x010E: + return new int[] {0x010F}; + case 0x1E58: + return new int[] {0x1E59}; + case 0x010C: + return new int[] {0x010D}; + case 0x1E56: + return new int[] {0x1E57}; + case 0x0108: + return new int[] {0x0109}; + case 0xFB15: + return new int[] {0x0574, 0x056B}; + case 0x1E50: + return new int[] {0x1E51}; + case 0xFB16: + return new int[] {0x057E, 0x0576}; + case 0xFB17: + return new int[] {0x0574, 0x056D}; + case 0x0102: + return new int[] {0x0103}; + case 0x1E4C: + return new int[] {0x1E4D}; + case 0x0100: + return new int[] {0x0101}; + case 0x1E4A: + return new int[] {0x1E4B}; + case 0x0106: + return new int[] {0x0107}; + case 0x0345: + return new int[] {0x03B9}; + case 0x0587: + return new int[] {0x0565, 0x0582}; + case 0x0104: + return new int[] {0x0105}; + case 0x1E4E: + return new int[] {0x1E4F}; + case 0x1E48: + return new int[] {0x1E49}; + case 0x1E64: + return new int[] {0x1E65}; + case 0xFB00: + return new int[] {0x0066, 0x0066}; + case 0x011C: + return new int[] {0x011D}; + case 0xFB01: + return new int[] {0x0066, 0x0069}; + case 0x1D608: + return new int[] {0x0061}; + case 0x1E62: + return new int[] {0x1E63}; + case 0xFB02: + return new int[] {0x0066, 0x006C}; + case 0x011A: + return new int[] {0x011B}; + case 0xFB03: + return new int[] {0x0066, 0x0066, 0x0069}; + case 0x1E68: + return new int[] {0x1E69}; + case 0x1E66: + return new int[] {0x1E67}; + case 0x1D609: + return new int[] {0x0062}; + case 0x011E: + return new int[] {0x011F}; + case 0x0118: + return new int[] {0x0119}; + case 0x1E60: + return new int[] {0x1E61}; + case 0xFB04: + return new int[] {0x0066, 0x0066, 0x006C}; + case 0x1D60C: + return new int[] {0x0065}; + case 0xFB05: + return new int[] {0x0073, 0x0074}; + case 0x1D60D: + return new int[] {0x0066}; + case 0xFB06: + return new int[] {0x0073, 0x0074}; + case 0x1D60A: + return new int[] {0x0063}; + case 0x1D60B: + return new int[] {0x0064}; + case 0x0112: + return new int[] {0x0113}; + case 0x1E5C: + return new int[] {0x1E5D}; + case 0x0110: + return new int[] {0x0111}; + case 0x1E5A: + return new int[] {0x1E5B}; + case 0x1D60E: + return new int[] {0x0067}; + case 0x1D60F: + return new int[] {0x0068}; + case 0x0116: + return new int[] {0x0117}; + case 0x0114: + return new int[] {0x0115}; + case 0x1E5E: + return new int[] {0x1E5F}; + case 0x1D610: + return new int[] {0x0069}; + case 0x1D611: + return new int[] {0x006A}; + case 0x1D614: + return new int[] {0x006D}; + case 0x1D615: + return new int[] {0x006E}; + case 0x1D612: + return new int[] {0x006B}; + case 0x1D613: + return new int[] {0x006C}; + case 0x012C: + return new int[] {0x012D}; + case 0x1E76: + return new int[] {0x1E77}; + case 0x012A: + return new int[] {0x012B}; + case 0x1E74: + return new int[] {0x1E75}; + case 0x012E: + return new int[] {0x012F}; + case 0x1E78: + return new int[] {0x1E79}; + case 0x1E72: + return new int[] {0x1E73}; + case 0x1E70: + return new int[] {0x1E71}; + case 0x0124: + return new int[] {0x0125}; + case 0x1E6E: + return new int[] {0x1E6F}; + case 0x0122: + return new int[] {0x0123}; + case 0x1E6C: + return new int[] {0x1E6D}; + case 0x0128: + return new int[] {0x0129}; + case 0x0126: + return new int[] {0x0127}; + case 0x0120: + return new int[] {0x0121}; + case 0x1E6A: + return new int[] {0x1E6B}; + case 0x013D: + return new int[] {0x013E}; + case 0x1E86: + return new int[] {0x1E87}; + case 0x013B: + return new int[] {0x013C}; + case 0x1E84: + return new int[] {0x1E85}; + case 0x013F: + return new int[] {0x0140}; + case 0x1E88: + return new int[] {0x1E89}; + case 0x1E82: + return new int[] {0x1E83}; + case 0x1E80: + return new int[] {0x1E81}; + case 0x037A: + return new int[] {0x0020, 0x03B9}; + case 0x0134: + return new int[] {0x0135}; + case 0x1E7E: + return new int[] {0x1E7F}; + case 0x0132: + return new int[] {0x0133}; + case 0x1E7C: + return new int[] {0x1E7D}; + case 0x0139: + return new int[] {0x013A}; + case 0x0136: + return new int[] {0x0137}; + case 0x0130: + return new int[] {0x0069, 0x0307}; + case 0x1E7A: + return new int[] {0x1E7B}; + case 0x03A0: + return new int[] {0x03C0}; + case 0x03A1: + return new int[] {0x03C1}; + case 0x1D47A: + return new int[] {0x0073}; + case 0x03A4: + return new int[] {0x03C4}; + case 0x1EAC: + return new int[] {0x1EAD}; + case 0x1D47D: + return new int[] {0x0076}; + case 0x03A5: + return new int[] {0x03C5}; + case 0x1D47E: + return new int[] {0x0077}; + case 0x1EAA: + return new int[] {0x1EAB}; + case 0x1D47B: + return new int[] {0x0074}; + case 0x03A3: + return new int[] {0x03C3}; + case 0x1D47C: + return new int[] {0x0075}; + case 0x216B: + return new int[] {0x217B}; + case 0x1EA8: + return new int[] {0x1EA9}; + case 0x216A: + return new int[] {0x217A}; + case 0x10425: + return new int[] {0x1044D}; + case 0x1D47F: + return new int[] {0x0078}; + case 0x1EA6: + return new int[] {0x1EA7}; + case 0x216F: + return new int[] {0x217F}; + case 0x216E: + return new int[] {0x217E}; + case 0x216D: + return new int[] {0x217D}; + case 0x216C: + return new int[] {0x217C}; + case 0x1D481: + return new int[] {0x007A}; + case 0x1EA0: + return new int[] {0x1EA1}; + case 0x2169: + return new int[] {0x2179}; + case 0x2168: + return new int[] {0x2178}; + case 0x10420: + return new int[] {0x10448}; + case 0x2167: + return new int[] {0x2177}; + case 0x1D480: + return new int[] {0x0079}; + case 0x10422: + return new int[] {0x1044A}; + case 0x1EA4: + return new int[] {0x1EA5}; + case 0x10421: + return new int[] {0x10449}; + case 0x10424: + return new int[] {0x1044C}; + case 0x1EA2: + return new int[] {0x1EA3}; + case 0x10423: + return new int[] {0x1044B}; + case 0x2162: + return new int[] {0x2172}; + case 0x1041E: + return new int[] {0x10446}; + case 0x2161: + return new int[] {0x2171}; + case 0x1041D: + return new int[] {0x10445}; + case 0x2160: + return new int[] {0x2170}; + case 0x1041F: + return new int[] {0x10447}; + case 0x2166: + return new int[] {0x2176}; + case 0x2165: + return new int[] {0x2175}; + case 0x2164: + return new int[] {0x2174}; + case 0x2163: + return new int[] {0x2173}; + case 0x1EBA: + return new int[] {0x1EBB}; + case 0x03B0: + return new int[] {0x03C5, 0x0308, 0x0301}; + case 0x1041A: + return new int[] {0x10442}; + case 0x1EBE: + return new int[] {0x1EBF}; + case 0x1041C: + return new int[] {0x10444}; + case 0x1EBC: + return new int[] {0x1EBD}; + case 0x1041B: + return new int[] {0x10443}; + case 0x1EB8: + return new int[] {0x1EB9}; + case 0x10415: + return new int[] {0x1043D}; + case 0x10414: + return new int[] {0x1043C}; + case 0x1EB6: + return new int[] {0x1EB7}; + case 0x10417: + return new int[] {0x1043F}; + case 0x10416: + return new int[] {0x1043E}; + case 0x10419: + return new int[] {0x10441}; + case 0x10418: + return new int[] {0x10440}; + case 0x1EB0: + return new int[] {0x1EB1}; + case 0x03AA: + return new int[] {0x03CA}; + case 0x1EB4: + return new int[] {0x1EB5}; + case 0x10411: + return new int[] {0x10439}; + case 0x10410: + return new int[] {0x10438}; + case 0x03AB: + return new int[] {0x03CB}; + case 0x1EB2: + return new int[] {0x1EB3}; + case 0x10413: + return new int[] {0x1043B}; + case 0x10412: + return new int[] {0x1043A}; + case 0x03A8: + return new int[] {0x03C8}; + case 0x1040D: + return new int[] {0x10435}; + case 0x03A9: + return new int[] {0x03C9}; + case 0x1040C: + return new int[] {0x10434}; + case 0x03A6: + return new int[] {0x03C6}; + case 0x1EAE: + return new int[] {0x1EAF}; + case 0x1040F: + return new int[] {0x10437}; + case 0x03A7: + return new int[] {0x03C7}; + case 0x1040E: + return new int[] {0x10436}; + case 0x03C2: + return new int[] {0x03C3}; + case 0x1ECA: + return new int[] {0x1ECB}; + case 0x1ECE: + return new int[] {0x1ECF}; + case 0x1ECC: + return new int[] {0x1ECD}; + case 0x1EC8: + return new int[] {0x1EC9}; + case 0x1EC2: + return new int[] {0x1EC3}; + case 0x1EC0: + return new int[] {0x1EC1}; + case 0x1EC6: + return new int[] {0x1EC7}; + case 0x1EC4: + return new int[] {0x1EC5}; + case 0x1D468: + return new int[] {0x0061}; + case 0x1D469: + return new int[] {0x0062}; + case 0x03D3: + return new int[] {0x03CD}; + case 0x03D4: + return new int[] {0x03CB}; + case 0x1EDC: + return new int[] {0x1EDD}; + case 0x03D1: + return new int[] {0x03B8}; + case 0x03D2: + return new int[] {0x03C5}; + case 0x1EDA: + return new int[] {0x1EDB}; + case 0x1D46C: + return new int[] {0x0065}; + case 0x03D8: + return new int[] {0x03D9}; + case 0x1D46D: + return new int[] {0x0066}; + case 0x03D5: + return new int[] {0x03C6}; + case 0x1D46A: + return new int[] {0x0063}; + case 0x03D6: + return new int[] {0x03C0}; + case 0x1EDE: + return new int[] {0x1EDF}; + case 0x1D46B: + return new int[] {0x0064}; + case 0x1ED8: + return new int[] {0x1ED9}; + case 0x1D46E: + return new int[] {0x0067}; + case 0x1D46F: + return new int[] {0x0068}; + case 0x03D0: + return new int[] {0x03B2}; + case 0x1ED2: + return new int[] {0x1ED3}; + case 0x1D470: + return new int[] {0x0069}; + case 0x1D471: + return new int[] {0x006A}; + case 0x1ED0: + return new int[] {0x1ED1}; + case 0x1ED6: + return new int[] {0x1ED7}; + case 0x1D474: + return new int[] {0x006D}; + case 0x1D475: + return new int[] {0x006E}; + case 0x1ED4: + return new int[] {0x1ED5}; + case 0x1D472: + return new int[] {0x006B}; + case 0x1D473: + return new int[] {0x006C}; + case 0x1D478: + return new int[] {0x0071}; + case 0x1D479: + return new int[] {0x0072}; + case 0x1D476: + return new int[] {0x006F}; + case 0x1D477: + return new int[] {0x0070}; + case 0x212D: + return new int[] {0x0063}; + case 0x212C: + return new int[] {0x0062}; + case 0x1D67B: + return new int[] {0x006C}; + case 0x1D43A: + return new int[] {0x0067}; + case 0x1D67C: + return new int[] {0x006D}; + case 0x1D67A: + return new int[] {0x006B}; + case 0x018A: + return new int[] {0x0257}; + case 0x1D43D: + return new int[] {0x006A}; + case 0x1D67F: + return new int[] {0x0070}; + case 0x018B: + return new int[] {0x018C}; + case 0x1D43E: + return new int[] {0x006B}; + case 0x1D43B: + return new int[] {0x0068}; + case 0x1D67D: + return new int[] {0x006E}; + case 0x1D43C: + return new int[] {0x0069}; + case 0x1D67E: + return new int[] {0x006F}; + case 0x018E: + return new int[] {0x01DD}; + case 0x212B: + return new int[] {0x00E5}; + case 0x018F: + return new int[] {0x0259}; + case 0x212A: + return new int[] {0x006B}; + case 0x1D43F: + return new int[] {0x006C}; + case 0x0189: + return new int[] {0x0256}; + case 0x2126: + return new int[] {0x03C9}; + case 0x1D680: + return new int[] {0x0071}; + case 0x0187: + return new int[] {0x0188}; + case 0x2124: + return new int[] {0x007A}; + case 0x1D441: + return new int[] {0x006E}; + case 0x1D683: + return new int[] {0x0074}; + case 0x1D442: + return new int[] {0x006F}; + case 0x1D684: + return new int[] {0x0075}; + case 0x2128: + return new int[] {0x007A}; + case 0x1D681: + return new int[] {0x0072}; + case 0x1D440: + return new int[] {0x006D}; + case 0x1D682: + return new int[] {0x0073}; + case 0x0181: + return new int[] {0x0253}; + case 0x1D445: + return new int[] {0x0072}; + case 0x1D687: + return new int[] {0x0078}; + case 0x0182: + return new int[] {0x0183}; + case 0x1D446: + return new int[] {0x0073}; + case 0x1D688: + return new int[] {0x0079}; + case 0x1D443: + return new int[] {0x0070}; + case 0x1D685: + return new int[] {0x0076}; + case 0x1D444: + return new int[] {0x0071}; + case 0x1D686: + return new int[] {0x0077}; + case 0x2122: + return new int[] {0x0074, 0x006D}; + case 0x1D449: + return new int[] {0x0076}; + case 0x0186: + return new int[] {0x0254}; + case 0x2121: + return new int[] {0x0074, 0x0065, 0x006C}; + case 0x2120: + return new int[] {0x0073, 0x006D}; + case 0x1D447: + return new int[] {0x0074}; + case 0x1D689: + return new int[] {0x007A}; + case 0x0184: + return new int[] {0x0185}; + case 0x1D448: + return new int[] {0x0075}; + case 0x213F: + return new int[] {0x03C0}; + case 0x213E: + return new int[] {0x03B3}; + case 0x1D44A: + return new int[] {0x0077}; + case 0x1D44B: + return new int[] {0x0078}; + case 0x019C: + return new int[] {0x026F}; + case 0x1D44C: + return new int[] {0x0079}; + case 0x1D44D: + return new int[] {0x007A}; + case 0x019F: + return new int[] {0x0275}; + case 0x019D: + return new int[] {0x0272}; + case 0x0198: + return new int[] {0x0199}; + case 0x0193: + return new int[] {0x0260}; + case 0x0190: + return new int[] {0x025B}; + case 0x0191: + return new int[] {0x0192}; + case 0x0196: + return new int[] {0x0269}; + case 0x2133: + return new int[] {0x006D}; + case 0x0197: + return new int[] {0x0268}; + case 0x0194: + return new int[] {0x0263}; + case 0x2131: + return new int[] {0x0066}; + case 0x2130: + return new int[] {0x0065}; + case 0x1D418: + return new int[] {0x0079}; + case 0x1D419: + return new int[] {0x007A}; + case 0x1040B: + return new int[] {0x10433}; + case 0x1040A: + return new int[] {0x10432}; + case 0x10404: + return new int[] {0x1042C}; + case 0x10403: + return new int[] {0x1042B}; + case 0x10406: + return new int[] {0x1042E}; + case 0x10405: + return new int[] {0x1042D}; + case 0x10408: + return new int[] {0x10430}; + case 0x10407: + return new int[] {0x1042F}; + case 0x10409: + return new int[] {0x10431}; + case 0x2145: + return new int[] {0x0064}; + case 0x10400: + return new int[] {0x10428}; + case 0x10402: + return new int[] {0x1042A}; + case 0x10401: + return new int[] {0x10429}; + case 0x1D672: + return new int[] {0x0063}; + case 0x1D673: + return new int[] {0x0064}; + case 0x1D670: + return new int[] {0x0061}; + case 0x1D671: + return new int[] {0x0062}; + case 0x1D434: + return new int[] {0x0061}; + case 0x1D676: + return new int[] {0x0067}; + case 0x1D435: + return new int[] {0x0062}; + case 0x1D677: + return new int[] {0x0068}; + case 0x1D674: + return new int[] {0x0065}; + case 0x1D675: + return new int[] {0x0066}; + case 0x1D438: + return new int[] {0x0065}; + case 0x1D439: + return new int[] {0x0066}; + case 0x1D436: + return new int[] {0x0063}; + case 0x1D678: + return new int[] {0x0069}; + case 0x1D437: + return new int[] {0x0064}; + case 0x1D679: + return new int[] {0x006A}; + case 0x01E6: + return new int[] {0x01E7}; + case 0x01E4: + return new int[] {0x01E5}; + case 0x01E8: + return new int[] {0x01E9}; + case 0x1D5B2: + return new int[] {0x0073}; + case 0x1D5B3: + return new int[] {0x0074}; + case 0x1D5B0: + return new int[] {0x0071}; + case 0x1D5B1: + return new int[] {0x0072}; + case 0x01E2: + return new int[] {0x01E3}; + case 0x1D5B6: + return new int[] {0x0077}; + case 0x1D5B7: + return new int[] {0x0078}; + case 0x01E0: + return new int[] {0x01E1}; + case 0x1D5B4: + return new int[] {0x0075}; + case 0x1D5B5: + return new int[] {0x0076}; + case 0x01DE: + return new int[] {0x01DF}; + case 0x1D5B8: + return new int[] {0x0079}; + case 0x1D5B9: + return new int[] {0x007A}; + case 0x01DB: + return new int[] {0x01DC}; + case 0x01F7: + return new int[] {0x01BF}; + case 0x01F8: + return new int[] {0x01F9}; + case 0x01F6: + return new int[] {0x0195}; + case 0x01F0: + return new int[] {0x006A, 0x030C}; + case 0x01F4: + return new int[] {0x01F5}; + case 0x01F1: + return new int[] {0x01F3}; + case 0x01F2: + return new int[] {0x01F3}; + case 0x01EE: + return new int[] {0x01EF}; + case 0x01EC: + return new int[] {0x01ED}; + case 0x01EA: + return new int[] {0x01EB}; + case 0x01FE: + return new int[] {0x01FF}; + case 0x01FC: + return new int[] {0x01FD}; + case 0x01FA: + return new int[] {0x01FB}; + case 0x1D5A1: + return new int[] {0x0062}; + case 0x1D5A2: + return new int[] {0x0063}; + case 0x1D5A0: + return new int[] {0x0061}; + case 0x1D5A5: + return new int[] {0x0066}; + case 0x1D5A6: + return new int[] {0x0067}; + case 0x1D5A3: + return new int[] {0x0064}; + case 0x1D5A4: + return new int[] {0x0065}; + case 0x1D5A9: + return new int[] {0x006A}; + case 0x1D5A7: + return new int[] {0x0068}; + case 0x1D5A8: + return new int[] {0x0069}; + case 0x1D5AA: + return new int[] {0x006B}; + case 0x1D5AB: + return new int[] {0x006C}; + case 0x1D5AE: + return new int[] {0x006F}; + case 0x1D5AF: + return new int[] {0x0070}; + case 0x1D5AC: + return new int[] {0x006D}; + case 0x1D5AD: + return new int[] {0x006E}; + case 0x01A2: + return new int[] {0x01A3}; + case 0x03E4: + return new int[] {0x03E5}; + case 0x1EEC: + return new int[] {0x1EED}; + case 0x01A0: + return new int[] {0x01A1}; + case 0x03E2: + return new int[] {0x03E3}; + case 0x1EEA: + return new int[] {0x1EEB}; + case 0x01A6: + return new int[] {0x0280}; + case 0x03E8: + return new int[] {0x03E9}; + case 0x01A7: + return new int[] {0x01A8}; + case 0x01A4: + return new int[] {0x01A5}; + case 0x03E6: + return new int[] {0x03E7}; + case 0x1EEE: + return new int[] {0x1EEF}; + case 0x03E0: + return new int[] {0x03E1}; + case 0x03DC: + return new int[] {0x03DD}; + case 0x1EE4: + return new int[] {0x1EE5}; + case 0x03DA: + return new int[] {0x03DB}; + case 0x1EE2: + return new int[] {0x1EE3}; + case 0x1EE8: + return new int[] {0x1EE9}; + case 0x03DE: + return new int[] {0x03DF}; + case 0x1EE6: + return new int[] {0x1EE7}; + case 0x1EE0: + return new int[] {0x1EE1}; + case 0x1D7BB: + return new int[] {0x03C3}; + case 0x01B3: + return new int[] {0x01B4}; + case 0x03F5: + return new int[] {0x03B5}; + case 0x01B1: + return new int[] {0x028A}; + case 0x01B2: + return new int[] {0x028B}; + case 0x03F4: + return new int[] {0x03B8}; + case 0x01B7: + return new int[] {0x0292}; + case 0x01B8: + return new int[] {0x01B9}; + case 0x01B5: + return new int[] {0x01B6}; + case 0x03F1: + return new int[] {0x03C1}; + case 0x03F2: + return new int[] {0x03C3}; + case 0x03F0: + return new int[] {0x03BA}; + case 0x1EF4: + return new int[] {0x1EF5}; + case 0x01AC: + return new int[] {0x01AD}; + case 0x03EE: + return new int[] {0x03EF}; + case 0x1EF2: + return new int[] {0x1EF3}; + case 0x03EC: + return new int[] {0x03ED}; + case 0x01AF: + return new int[] {0x01B0}; + case 0x1EF8: + return new int[] {0x1EF9}; + case 0x1EF6: + return new int[] {0x1EF7}; + case 0x01AE: + return new int[] {0x0288}; + case 0x01A9: + return new int[] {0x0283}; + case 0x1EF0: + return new int[] {0x1EF1}; + case 0x03EA: + return new int[] {0x03EB}; + case 0x01C4: + return new int[] {0x01C6}; + case 0x01C5: + return new int[] {0x01C6}; + case 0x01C8: + return new int[] {0x01C9}; + case 0x01C7: + return new int[] {0x01C9}; + case 0x01BC: + return new int[] {0x01BD}; + case 0x01D5: + return new int[] {0x01D6}; + case 0x01D3: + return new int[] {0x01D4}; + case 0x01D9: + return new int[] {0x01DA}; + case 0x01D7: + return new int[] {0x01D8}; + case 0x1D7A0: + return new int[] {0x03C1}; + case 0x01D1: + return new int[] {0x01D2}; + case 0x1D7A3: + return new int[] {0x03C4}; + case 0x1D7A4: + return new int[] {0x03C5}; + case 0x1D7A1: + return new int[] {0x03B8}; + case 0x1D7A2: + return new int[] {0x03C3}; + case 0x01CD: + return new int[] {0x01CE}; + case 0x1D7A7: + return new int[] {0x03C8}; + case 0x1D7A8: + return new int[] {0x03C9}; + case 0x01CB: + return new int[] {0x01CC}; + case 0x1D7A5: + return new int[] {0x03C6}; + case 0x1D7A6: + return new int[] {0x03C7}; + case 0x01CF: + return new int[] {0x01D0}; + case 0x01CA: + return new int[] {0x01CC}; + case 0x042A: + return new int[] {0x044A}; + case 0x042D: + return new int[] {0x044D}; + case 0x042E: + return new int[] {0x044E}; + case 0x042B: + return new int[] {0x044B}; + case 0x042C: + return new int[] {0x044C}; + case 0x0428: + return new int[] {0x0448}; + case 0x0429: + return new int[] {0x0449}; + case 0x0426: + return new int[] {0x0446}; + case 0x1F2E: + return new int[] {0x1F26}; + case 0x0427: + return new int[] {0x0447}; + case 0x1F2F: + return new int[] {0x1F27}; + case 0x0420: + return new int[] {0x0440}; + case 0x0421: + return new int[] {0x0441}; + case 0x0424: + return new int[] {0x0444}; + case 0x1F2C: + return new int[] {0x1F24}; + case 0x0425: + return new int[] {0x0445}; + case 0x1F2D: + return new int[] {0x1F25}; + case 0x0422: + return new int[] {0x0442}; + case 0x1F2A: + return new int[] {0x1F22}; + case 0x0423: + return new int[] {0x0443}; + case 0x1F2B: + return new int[] {0x1F23}; + case 0x1F28: + return new int[] {0x1F20}; + case 0x041E: + return new int[] {0x043E}; + case 0x041F: + return new int[] {0x043F}; + case 0x1F29: + return new int[] {0x1F21}; + case 0x1F3F: + return new int[] {0x1F37}; + case 0x1F3A: + return new int[] {0x1F32}; + case 0x1F3D: + return new int[] {0x1F35}; + case 0x1F3E: + return new int[] {0x1F36}; + case 0x1F3B: + return new int[] {0x1F33}; + case 0x1F3C: + return new int[] {0x1F34}; + case 0x1F38: + return new int[] {0x1F30}; + case 0x1F39: + return new int[] {0x1F31}; + case 0x042F: + return new int[] {0x044F}; + case 0x1F52: + return new int[] {0x03C5, 0x0313, 0x0300}; + case 0x020A: + return new int[] {0x020B}; + case 0x1F50: + return new int[] {0x03C5, 0x0313}; + case 0x1F56: + return new int[] {0x03C5, 0x0313, 0x0342}; + case 0x020E: + return new int[] {0x020F}; + case 0x1F54: + return new int[] {0x03C5, 0x0313, 0x0301}; + case 0x020C: + return new int[] {0x020D}; + case 0x0208: + return new int[] {0x0209}; + case 0x0206: + return new int[] {0x0207}; + case 0x0200: + return new int[] {0x0201}; + case 0x1F4A: + return new int[] {0x1F42}; + case 0x1F4B: + return new int[] {0x1F43}; + case 0x0204: + return new int[] {0x0205}; + case 0x0202: + return new int[] {0x0203}; + case 0x1F4C: + return new int[] {0x1F44}; + case 0x1F4D: + return new int[] {0x1F45}; + case 0x1F49: + return new int[] {0x1F41}; + case 0x1F48: + return new int[] {0x1F40}; + case 0x021A: + return new int[] {0x021B}; + case 0x021E: + return new int[] {0x021F}; + case 0x1F68: + return new int[] {0x1F60}; + case 0x021C: + return new int[] {0x021D}; + case 0x0218: + return new int[] {0x0219}; + case 0x1F5B: + return new int[] {0x1F53}; + case 0x0212: + return new int[] {0x0213}; + case 0x0210: + return new int[] {0x0211}; + case 0x1F5F: + return new int[] {0x1F57}; + case 0x0216: + return new int[] {0x0217}; + case 0x1F5D: + return new int[] {0x1F55}; + case 0x0214: + return new int[] {0x0215}; + case 0x1F59: + return new int[] {0x1F51}; + case 0x040B: + return new int[] {0x045B}; + case 0x040C: + return new int[] {0x045C}; + case 0x040A: + return new int[] {0x045A}; + case 0x0406: + return new int[] {0x0456}; + case 0x1F0E: + return new int[] {0x1F06}; + case 0x1D5D4: + return new int[] {0x0061}; + case 0x0407: + return new int[] {0x0457}; + case 0x1F0F: + return new int[] {0x1F07}; + case 0x1D5D5: + return new int[] {0x0062}; + case 0x0404: + return new int[] {0x0454}; + case 0x1F0C: + return new int[] {0x1F04}; + case 0x0405: + return new int[] {0x0455}; + case 0x1F0D: + return new int[] {0x1F05}; + case 0x1D5D8: + return new int[] {0x0065}; + case 0x1D5D9: + return new int[] {0x0066}; + case 0x0408: + return new int[] {0x0458}; + case 0x1D5D6: + return new int[] {0x0063}; + case 0x0409: + return new int[] {0x0459}; + case 0x1D5D7: + return new int[] {0x0064}; + case 0x0402: + return new int[] {0x0452}; + case 0x1F0A: + return new int[] {0x1F02}; + case 0x0403: + return new int[] {0x0453}; + case 0x1F0B: + return new int[] {0x1F03}; + case 0x1D5DA: + return new int[] {0x0067}; + case 0x0400: + return new int[] {0x0450}; + case 0x0401: + return new int[] {0x0451}; + case 0x1D5DD: + return new int[] {0x006A}; + case 0x1D5DE: + return new int[] {0x006B}; + case 0x1D5DB: + return new int[] {0x0068}; + case 0x1D5DC: + return new int[] {0x0069}; + case 0x1F09: + return new int[] {0x1F01}; + case 0x1D5DF: + return new int[] {0x006C}; + case 0x1F08: + return new int[] {0x1F00}; + case 0x041C: + return new int[] {0x043C}; + case 0x1D5E1: + return new int[] {0x006E}; + case 0x041D: + return new int[] {0x043D}; + case 0x1D5E2: + return new int[] {0x006F}; + case 0x041A: + return new int[] {0x043A}; + case 0x041B: + return new int[] {0x043B}; + case 0x1D5E0: + return new int[] {0x006D}; + case 0x0417: + return new int[] {0x0437}; + case 0x1D5E5: + return new int[] {0x0072}; + case 0x0418: + return new int[] {0x0438}; + case 0x1D5E6: + return new int[] {0x0073}; + case 0x0415: + return new int[] {0x0435}; + case 0x1F1D: + return new int[] {0x1F15}; + case 0x1D5E3: + return new int[] {0x0070}; + case 0x0416: + return new int[] {0x0436}; + case 0x1D5E4: + return new int[] {0x0071}; + case 0x1D5E9: + return new int[] {0x0076}; + case 0x0419: + return new int[] {0x0439}; + case 0x1D5E7: + return new int[] {0x0074}; + case 0x1D5E8: + return new int[] {0x0075}; + case 0x0410: + return new int[] {0x0430}; + case 0x0413: + return new int[] {0x0433}; + case 0x1F1B: + return new int[] {0x1F13}; + case 0x1D5EA: + return new int[] {0x0077}; + case 0x0414: + return new int[] {0x0434}; + case 0x1F1C: + return new int[] {0x1F14}; + case 0x1D5EB: + return new int[] {0x0078}; + case 0x0411: + return new int[] {0x0431}; + case 0x0412: + return new int[] {0x0432}; + case 0x1F1A: + return new int[] {0x1F12}; + case 0x040F: + return new int[] {0x045F}; + case 0x040D: + return new int[] {0x045D}; + case 0x1D5EC: + return new int[] {0x0079}; + case 0x040E: + return new int[] {0x045E}; + case 0x1D5ED: + return new int[] {0x007A}; + case 0x1F18: + return new int[] {0x1F10}; + case 0x1F19: + return new int[] {0x1F11}; + case 0x1D519: + return new int[] {0x0076}; + case 0x1D517: + return new int[] {0x0074}; + case 0x1D759: + return new int[] {0x03B4}; + case 0x1D518: + return new int[] {0x0075}; + case 0x1D51A: + return new int[] {0x0077}; + case 0x1D75C: + return new int[] {0x03B7}; + case 0x1D51B: + return new int[] {0x0078}; + case 0x1D75D: + return new int[] {0x03B8}; + case 0x1D75A: + return new int[] {0x03B5}; + case 0x1D75B: + return new int[] {0x03B6}; + case 0x1D51C: + return new int[] {0x0079}; + case 0x1D75E: + return new int[] {0x03B9}; + case 0x1D75F: + return new int[] {0x03BA}; + case 0x1D760: + return new int[] {0x03BB}; + case 0x1D761: + return new int[] {0x03BC}; + case 0x1D764: + return new int[] {0x03BF}; + case 0x1D765: + return new int[] {0x03C0}; + case 0x1D762: + return new int[] {0x03BD}; + case 0x1D763: + return new int[] {0x03BE}; + case 0x1D768: + return new int[] {0x03C3}; + case 0x1D769: + return new int[] {0x03C4}; + case 0x1D766: + return new int[] {0x03C1}; + case 0x1D767: + return new int[] {0x03B8}; + case 0x1D76A: + return new int[] {0x03C5}; + case 0x1D76D: + return new int[] {0x03C8}; + case 0x1D76E: + return new int[] {0x03C9}; + case 0x1D76B: + return new int[] {0x03C6}; + case 0x1D76C: + return new int[] {0x03C7}; + case 0x1D538: + return new int[] {0x0061}; + case 0x004F: + return new int[] {0x006F}; + case 0x004D: + return new int[] {0x006D}; + case 0x004E: + return new int[] {0x006E}; + case 0x004B: + return new int[] {0x006B}; + case 0x004C: + return new int[] {0x006C}; + case 0x004A: + return new int[] {0x006A}; + case 0x0046: + return new int[] {0x0066}; + case 0x0047: + return new int[] {0x0067}; + case 0x0044: + return new int[] {0x0064}; + case 0x0045: + return new int[] {0x0065}; + case 0x0048: + return new int[] {0x0068}; + case 0x0049: + return new int[] {0x0069}; + case 0x0042: + return new int[] {0x0062}; + case 0x1D504: + return new int[] {0x0061}; + case 0x0043: + return new int[] {0x0063}; + case 0x1D505: + return new int[] {0x0062}; + case 0x1D747: + return new int[] {0x03C3}; + case 0x0041: + return new int[] {0x0061}; + case 0x1D508: + return new int[] {0x0065}; + case 0x1D509: + return new int[] {0x0066}; + case 0x1D507: + return new int[] {0x0064}; + case 0x1D50A: + return new int[] {0x0067}; + case 0x1D50D: + return new int[] {0x006A}; + case 0x1D50E: + return new int[] {0x006B}; + case 0x005A: + return new int[] {0x007A}; + case 0x0057: + return new int[] {0x0077}; + case 0x0058: + return new int[] {0x0078}; + case 0x0055: + return new int[] {0x0075}; + case 0x1D50F: + return new int[] {0x006C}; + case 0x0056: + return new int[] {0x0076}; + case 0x0059: + return new int[] {0x0079}; + case 0x1D511: + return new int[] {0x006E}; + case 0x0050: + return new int[] {0x0070}; + case 0x1D512: + return new int[] {0x006F}; + case 0x1D510: + return new int[] {0x006D}; + case 0x0053: + return new int[] {0x0073}; + case 0x1D757: + return new int[] {0x03B2}; + case 0x0054: + return new int[] {0x0074}; + case 0x1D516: + return new int[] {0x0073}; + case 0x1D758: + return new int[] {0x03B3}; + case 0x0051: + return new int[] {0x0071}; + case 0x1D513: + return new int[] {0x0070}; + case 0x0052: + return new int[] {0x0072}; + case 0x1D514: + return new int[] {0x0071}; + case 0x1D756: + return new int[] {0x03B1}; + case 0x022C: + return new int[] {0x022D}; + case 0x046E: + return new int[] {0x046F}; + case 0x022A: + return new int[] {0x022B}; + case 0x046C: + return new int[] {0x046D}; + case 0x022E: + return new int[] {0x022F}; + case 0x0228: + return new int[] {0x0229}; + case 0x1D71C: + return new int[] {0x03B1}; + case 0x046A: + return new int[] {0x046B}; + case 0x1D71D: + return new int[] {0x03B2}; + case 0x0222: + return new int[] {0x0223}; + case 0x0464: + return new int[] {0x0465}; + case 0x1F6C: + return new int[] {0x1F64}; + case 0x1F6D: + return new int[] {0x1F65}; + case 0x0220: + return new int[] {0x019E}; + case 0x0462: + return new int[] {0x0463}; + case 0x1F6A: + return new int[] {0x1F62}; + case 0x1D71E: + return new int[] {0x03B3}; + case 0x1F6B: + return new int[] {0x1F63}; + case 0x1D71F: + return new int[] {0x03B4}; + case 0x0226: + return new int[] {0x0227}; + case 0x0468: + return new int[] {0x0469}; + case 0x0224: + return new int[] {0x0225}; + case 0x0466: + return new int[] {0x0467}; + case 0x1F6E: + return new int[] {0x1F66}; + case 0x1F6F: + return new int[] {0x1F67}; + case 0x1D720: + return new int[] {0x03B5}; + case 0x1D721: + return new int[] {0x03B6}; + case 0x1F69: + return new int[] {0x1F61}; + case 0x0460: + return new int[] {0x0461}; + case 0x1D724: + return new int[] {0x03B9}; + case 0x1D725: + return new int[] {0x03BA}; + case 0x1D722: + return new int[] {0x03B7}; + case 0x1D723: + return new int[] {0x03B8}; + case 0x047E: + return new int[] {0x047F}; + case 0x1F85: + return new int[] {0x1F05, 0x03B9}; + case 0x1D728: + return new int[] {0x03BD}; + case 0x1F86: + return new int[] {0x1F06, 0x03B9}; + case 0x1D729: + return new int[] {0x03BE}; + case 0x047C: + return new int[] {0x047D}; + case 0x1F83: + return new int[] {0x1F03, 0x03B9}; + case 0x1D726: + return new int[] {0x03BB}; + case 0x1F84: + return new int[] {0x1F04, 0x03B9}; + case 0x1D727: + return new int[] {0x03BC}; + case 0x1F89: + return new int[] {0x1F01, 0x03B9}; + case 0x1F87: + return new int[] {0x1F07, 0x03B9}; + case 0x1F88: + return new int[] {0x1F00, 0x03B9}; + case 0x1D72A: + return new int[] {0x03BF}; + case 0x047A: + return new int[] {0x047B}; + case 0x1F81: + return new int[] {0x1F01, 0x03B9}; + case 0x1D72D: + return new int[] {0x03B8}; + case 0x1F82: + return new int[] {0x1F02, 0x03B9}; + case 0x1D72E: + return new int[] {0x03C3}; + case 0x1D72B: + return new int[] {0x03C0}; + case 0x1F80: + return new int[] {0x1F00, 0x03B9}; + case 0x1D72C: + return new int[] {0x03C1}; + case 0x0476: + return new int[] {0x0477}; + case 0x1D72F: + return new int[] {0x03C4}; + case 0x0232: + return new int[] {0x0233}; + case 0x0474: + return new int[] {0x0475}; + case 0x0478: + return new int[] {0x0479}; + case 0x1D731: + return new int[] {0x03C6}; + case 0x1D732: + return new int[] {0x03C7}; + case 0x1D730: + return new int[] {0x03C5}; + case 0x0230: + return new int[] {0x0231}; + case 0x0472: + return new int[] {0x0473}; + case 0x1D733: + return new int[] {0x03C8}; + case 0x0470: + return new int[] {0x0471}; + case 0x1D734: + return new int[] {0x03C9}; + case 0x1F96: + return new int[] {0x1F26, 0x03B9}; + case 0x1F97: + return new int[] {0x1F27, 0x03B9}; + case 0x1F94: + return new int[] {0x1F24, 0x03B9}; + case 0x048E: + return new int[] {0x048F}; + case 0x1F95: + return new int[] {0x1F25, 0x03B9}; + case 0x1F98: + return new int[] {0x1F20, 0x03B9}; + case 0x1F99: + return new int[] {0x1F21, 0x03B9}; + case 0x1F92: + return new int[] {0x1F22, 0x03B9}; + case 0x048C: + return new int[] {0x048D}; + case 0x1F93: + return new int[] {0x1F23, 0x03B9}; + case 0x1F90: + return new int[] {0x1F20, 0x03B9}; + case 0x048A: + return new int[] {0x048B}; + case 0x1F91: + return new int[] {0x1F21, 0x03B9}; + case 0x1F8E: + return new int[] {0x1F06, 0x03B9}; + case 0x1F8F: + return new int[] {0x1F07, 0x03B9}; + case 0x1F8C: + return new int[] {0x1F04, 0x03B9}; + case 0x1F8D: + return new int[] {0x1F05, 0x03B9}; + case 0x1F8A: + return new int[] {0x1F02, 0x03B9}; + case 0x1F8B: + return new int[] {0x1F03, 0x03B9}; + case 0x0480: + return new int[] {0x0481}; + case 0x049E: + return new int[] {0x049F}; + case 0x049C: + return new int[] {0x049D}; + case 0x049A: + return new int[] {0x049B}; + case 0x1F9F: + return new int[] {0x1F27, 0x03B9}; + case 0x0498: + return new int[] {0x0499}; + case 0x1F9D: + return new int[] {0x1F25, 0x03B9}; + case 0x1D70D: + return new int[] {0x03C3}; + case 0x0496: + return new int[] {0x0497}; + case 0x1F9E: + return new int[] {0x1F26, 0x03B9}; + case 0x0490: + return new int[] {0x0491}; + case 0x1F9B: + return new int[] {0x1F23, 0x03B9}; + case 0x0494: + return new int[] {0x0495}; + case 0x1F9C: + return new int[] {0x1F24, 0x03B9}; + case 0x0492: + return new int[] {0x0493}; + case 0x1F9A: + return new int[] {0x1F22, 0x03B9}; + case 0x04C1: + return new int[] {0x04C2}; + case 0x1FCA: + return new int[] {0x1F74}; + case 0x04C5: + return new int[] {0x04C6}; + case 0x04C3: + return new int[] {0x04C4}; + case 0x1FCB: + return new int[] {0x1F75}; + case 0x1FCC: + return new int[] {0x03B7, 0x03B9}; + case 0x1FC8: + return new int[] {0x1F72}; + case 0x1FC9: + return new int[] {0x1F73}; + case 0x1FC6: + return new int[] {0x03B7, 0x0342}; + case 0x1FC7: + return new int[] {0x03B7, 0x0342, 0x03B9}; + case 0x04BA: + return new int[] {0x04BB}; + case 0x1FC4: + return new int[] {0x03AE, 0x03B9}; + case 0x04BE: + return new int[] {0x04BF}; + case 0x1FC2: + return new int[] {0x1F74, 0x03B9}; + case 0x04BC: + return new int[] {0x04BD}; + case 0x1FC3: + return new int[] {0x03B7, 0x03B9}; + case 0x04B8: + return new int[] {0x04B9}; + case 0x3371: + return new int[] {0x0068, 0x0070, 0x0061}; + case 0x04B6: + return new int[] {0x04B7}; + case 0x1FBE: + return new int[] {0x03B9}; + case 0x3375: + return new int[] {0x006F, 0x0076}; + case 0x3373: + return new int[] {0x0061, 0x0075}; + case 0x04D2: + return new int[] {0x04D3}; + case 0x1FDA: + return new int[] {0x1F76}; + case 0x1FDB: + return new int[] {0x1F77}; + case 0x04D0: + return new int[] {0x04D1}; + case 0x04D6: + return new int[] {0x04D7}; + case 0x3390: + return new int[] {0x0068, 0x007A}; + case 0x04D4: + return new int[] {0x04D5}; + case 0x1FD9: + return new int[] {0x1FD1}; + case 0x338C: + return new int[] {0x03BC, 0x0066}; + case 0x338B: + return new int[] {0x006E, 0x0066}; + case 0x1FD7: + return new int[] {0x03B9, 0x0308, 0x0342}; + case 0x338A: + return new int[] {0x0070, 0x0066}; + case 0x1FD8: + return new int[] {0x1FD0}; + case 0x04CB: + return new int[] {0x04CC}; + case 0x1FD2: + return new int[] {0x03B9, 0x0308, 0x0300}; + case 0x1FD6: + return new int[] {0x03B9, 0x0342}; + case 0x1FD3: + return new int[] {0x03B9, 0x0308, 0x0301}; + case 0x04CD: + return new int[] {0x04CE}; + case 0x04C9: + return new int[] {0x04CA}; + case 0x3383: + return new int[] {0x006D, 0x0061}; + case 0x3382: + return new int[] {0x03BC, 0x0061}; + case 0x04C7: + return new int[] {0x04C8}; + case 0x3381: + return new int[] {0x006E, 0x0061}; + case 0x3380: + return new int[] {0x0070, 0x0061}; + case 0x3387: + return new int[] {0x0067, 0x0062}; + case 0x3386: + return new int[] {0x006D, 0x0062}; + case 0x3385: + return new int[] {0x006B, 0x0062}; + case 0x3384: + return new int[] {0x006B, 0x0061}; + case 0x1FEB: + return new int[] {0x1F7B}; + case 0x04E4: + return new int[] {0x04E5}; + case 0x1FEC: + return new int[] {0x1FE5}; + case 0x04E2: + return new int[] {0x04E3}; + case 0x1FEA: + return new int[] {0x1F7A}; + case 0x1D57C: + return new int[] {0x0071}; + case 0x04E8: + return new int[] {0x04E9}; + case 0x1D57D: + return new int[] {0x0072}; + case 0x1D57A: + return new int[] {0x006F}; + case 0x04E6: + return new int[] {0x04E7}; + case 0x1D57B: + return new int[] {0x0070}; + case 0x1FE8: + return new int[] {0x1FE0}; + case 0x1D57E: + return new int[] {0x0073}; + case 0x1FE9: + return new int[] {0x1FE1}; + case 0x1D57F: + return new int[] {0x0074}; + case 0x04E0: + return new int[] {0x04E1}; + case 0x1FE2: + return new int[] {0x03C5, 0x0308, 0x0300}; + case 0x1D580: + return new int[] {0x0075}; + case 0x04DC: + return new int[] {0x04DD}; + case 0x1FE3: + return new int[] {0x03C5, 0x0308, 0x0301}; + case 0x1D581: + return new int[] {0x0076}; + case 0x04DA: + return new int[] {0x04DB}; + case 0x1FE6: + return new int[] {0x03C5, 0x0342}; + case 0x1D584: + return new int[] {0x0079}; + case 0x1FE7: + return new int[] {0x03C5, 0x0308, 0x0342}; + case 0x1D585: + return new int[] {0x007A}; + case 0x1FE4: + return new int[] {0x03C1, 0x0313}; + case 0x1D582: + return new int[] {0x0077}; + case 0x04DE: + return new int[] {0x04DF}; + case 0x1D583: + return new int[] {0x0078}; + case 0x3394: + return new int[] {0x0074, 0x0068, 0x007A}; + case 0x3393: + return new int[] {0x0067, 0x0068, 0x007A}; + case 0x04D8: + return new int[] {0x04D9}; + case 0x3392: + return new int[] {0x006D, 0x0068, 0x007A}; + case 0x3391: + return new int[] {0x006B, 0x0068, 0x007A}; + case 0x04F4: + return new int[] {0x04F5}; + case 0x1FFC: + return new int[] {0x03C9, 0x03B9}; + case 0x04F2: + return new int[] {0x04F3}; + case 0x1FFA: + return new int[] {0x1F7C}; + case 0x1FFB: + return new int[] {0x1F7D}; + case 0x04F8: + return new int[] {0x04F9}; + case 0x1FF9: + return new int[] {0x1F79}; + case 0x04F0: + return new int[] {0x04F1}; + case 0x04EC: + return new int[] {0x04ED}; + case 0x1FF3: + return new int[] {0x03C9, 0x03B9}; + case 0x1FF4: + return new int[] {0x03CE, 0x03B9}; + case 0x04EA: + return new int[] {0x04EB}; + case 0x1FF2: + return new int[] {0x1F7C, 0x03B9}; + case 0x1FF7: + return new int[] {0x03C9, 0x0342, 0x03B9}; + case 0x1FF8: + return new int[] {0x1F78}; + case 0x04EE: + return new int[] {0x04EF}; + case 0x1FF6: + return new int[] {0x03C9, 0x0342}; + case 0x1D79C: + return new int[] {0x03BD}; + case 0x1D79D: + return new int[] {0x03BE}; + case 0x1D79A: + return new int[] {0x03BB}; + case 0x1D79B: + return new int[] {0x03BC}; + case 0x1D79E: + return new int[] {0x03BF}; + case 0x1D79F: + return new int[] {0x03C0}; + case 0x1D56C: + return new int[] {0x0061}; + case 0x1D56F: + return new int[] {0x0064}; + case 0x1D56D: + return new int[] {0x0062}; + case 0x1D56E: + return new int[] {0x0063}; + case 0x1D570: + return new int[] {0x0065}; + case 0x1D573: + return new int[] {0x0068}; + case 0x1D574: + return new int[] {0x0069}; + case 0x1D571: + return new int[] {0x0066}; + case 0x1D572: + return new int[] {0x0067}; + case 0x1D577: + return new int[] {0x006C}; + case 0x1D578: + return new int[] {0x006D}; + case 0x1D575: + return new int[] {0x006A}; + case 0x1D576: + return new int[] {0x006B}; + case 0x1D579: + return new int[] {0x006E}; + case 0x04A0: + return new int[] {0x04A1}; + case 0x1D539: + return new int[] {0x0062}; + case 0x1FAB: + return new int[] {0x1F63, 0x03B9}; + case 0x04A4: + return new int[] {0x04A5}; + case 0x1FAC: + return new int[] {0x1F64, 0x03B9}; + case 0x04A2: + return new int[] {0x04A3}; + case 0x1FAA: + return new int[] {0x1F62, 0x03B9}; + case 0x1FA6: + return new int[] {0x1F66, 0x03B9}; + case 0x1D53C: + return new int[] {0x0065}; + case 0x1FA7: + return new int[] {0x1F67, 0x03B9}; + case 0x1D53D: + return new int[] {0x0066}; + case 0x1FA4: + return new int[] {0x1F64, 0x03B9}; + case 0x1FA5: + return new int[] {0x1F65, 0x03B9}; + case 0x1D53B: + return new int[] {0x0064}; + case 0x1FA8: + return new int[] {0x1F60, 0x03B9}; + case 0x1D53E: + return new int[] {0x0067}; + case 0x1FA9: + return new int[] {0x1F61, 0x03B9}; + case 0x1FA2: + return new int[] {0x1F62, 0x03B9}; + case 0x1D540: + return new int[] {0x0069}; + case 0x1FA3: + return new int[] {0x1F63, 0x03B9}; + case 0x1D541: + return new int[] {0x006A}; + case 0x1FA0: + return new int[] {0x1F60, 0x03B9}; + case 0x1FA1: + return new int[] {0x1F61, 0x03B9}; + case 0x1D781: + return new int[] {0x03C3}; + case 0x1D544: + return new int[] {0x006D}; + case 0x1D542: + return new int[] {0x006B}; + case 0x1D543: + return new int[] {0x006C}; + case 0x1D546: + return new int[] {0x006F}; + case 0x04B0: + return new int[] {0x04B1}; + case 0x04B4: + return new int[] {0x04B5}; + case 0x1FBC: + return new int[] {0x03B1, 0x03B9}; + case 0x1D54A: + return new int[] {0x0073}; + case 0x04B2: + return new int[] {0x04B3}; + case 0x1FBA: + return new int[] {0x1F70}; + case 0x1FBB: + return new int[] {0x1F71}; + case 0x1FB7: + return new int[] {0x03B1, 0x0342, 0x03B9}; + case 0x1D54D: + return new int[] {0x0076}; + case 0x1FB8: + return new int[] {0x1FB0}; + case 0x1D54E: + return new int[] {0x0077}; + case 0x04AE: + return new int[] {0x04AF}; + case 0x1D54B: + return new int[] {0x0074}; + case 0x1FB6: + return new int[] {0x03B1, 0x0342}; + case 0x1D54C: + return new int[] {0x0075}; + case 0x1FB9: + return new int[] {0x1FB1}; + case 0x1D54F: + return new int[] {0x0078}; + case 0x1D790: + return new int[] {0x03B1}; + case 0x04AC: + return new int[] {0x04AD}; + case 0x1FB3: + return new int[] {0x03B1, 0x03B9}; + case 0x1D793: + return new int[] {0x03B4}; + case 0x1FB4: + return new int[] {0x03AC, 0x03B9}; + case 0x1D794: + return new int[] {0x03B5}; + case 0x04AA: + return new int[] {0x04AB}; + case 0x1D791: + return new int[] {0x03B2}; + case 0x1FB2: + return new int[] {0x1F70, 0x03B9}; + case 0x1D550: + return new int[] {0x0079}; + case 0x1D792: + return new int[] {0x03B3}; + case 0x1FAF: + return new int[] {0x1F67, 0x03B9}; + case 0x1D797: + return new int[] {0x03B8}; + case 0x04A8: + return new int[] {0x04A9}; + case 0x1D798: + return new int[] {0x03B9}; + case 0x1FAD: + return new int[] {0x1F65, 0x03B9}; + case 0x1D795: + return new int[] {0x03B6}; + case 0x04A6: + return new int[] {0x04A7}; + case 0x1FAE: + return new int[] {0x1F66, 0x03B9}; + case 0x1D796: + return new int[] {0x03B7}; + case 0x1D799: + return new int[] {0x03BA}; + default: + return new int[] {codePoint}; + } + } + + /** + * Mapping for case-folding used with no normalization. + * + * @param codePoint the character (Unicode code point) to be mapped. + * @return Case-folding used with no normalization for the given {@code codePoint}. + * @see RFC 3454, Appendix B.3 + */ + public static int[] mapWithoutNormalization(int codePoint) { + switch (codePoint) { + case 0x00C5: + return new int[] {0x00E5}; + case 0x00C6: + return new int[] {0x00E6}; + case 0x00C3: + return new int[] {0x00E3}; + case 0x00C4: + return new int[] {0x00E4}; + case 0x00C9: + return new int[] {0x00E9}; + case 0x00C7: + return new int[] {0x00E7}; + case 0x00C8: + return new int[] {0x00E8}; + case 0x00C1: + return new int[] {0x00E1}; + case 0x00C2: + return new int[] {0x00E2}; + case 0x00C0: + return new int[] {0x00E0}; + case 0x00D6: + return new int[] {0x00F6}; + case 0x00D4: + return new int[] {0x00F4}; + case 0x00D5: + return new int[] {0x00F5}; + case 0x00D8: + return new int[] {0x00F8}; + case 0x00D9: + return new int[] {0x00F9}; + case 0x00D2: + return new int[] {0x00F2}; + case 0x00D3: + return new int[] {0x00F3}; + case 0x00D0: + return new int[] {0x00F0}; + case 0x00D1: + return new int[] {0x00F1}; + case 0x00CE: + return new int[] {0x00EE}; + case 0x00CF: + return new int[] {0x00EF}; + case 0x00CC: + return new int[] {0x00EC}; + case 0x00CD: + return new int[] {0x00ED}; + case 0x00CA: + return new int[] {0x00EA}; + case 0x00CB: + return new int[] {0x00EB}; + case 0x00DF: + return new int[] {0x0073, 0x0073}; + case 0x00DD: + return new int[] {0x00FD}; + case 0x00DE: + return new int[] {0x00FE}; + case 0x00DB: + return new int[] {0x00FB}; + case 0x00DC: + return new int[] {0x00FC}; + case 0x00DA: + return new int[] {0x00FA}; + case 0x24B7: + return new int[] {0x24D1}; + case 0x24B6: + return new int[] {0x24D0}; + case 0x24C4: + return new int[] {0x24DE}; + case 0x24C3: + return new int[] {0x24DD}; + case 0x24C2: + return new int[] {0x24DC}; + case 0x24C1: + return new int[] {0x24DB}; + case 0x24C8: + return new int[] {0x24E2}; + case 0x24C7: + return new int[] {0x24E1}; + case 0x24C6: + return new int[] {0x24E0}; + case 0x24C5: + return new int[] {0x24DF}; + case 0x24C0: + return new int[] {0x24DA}; + case 0x24BC: + return new int[] {0x24D6}; + case 0x24BB: + return new int[] {0x24D5}; + case 0x24BA: + return new int[] {0x24D4}; + case 0x24BF: + return new int[] {0x24D9}; + case 0x24BE: + return new int[] {0x24D8}; + case 0x24BD: + return new int[] {0x24D7}; + case 0x24B9: + return new int[] {0x24D3}; + case 0x24B8: + return new int[] {0x24D2}; + case 0x00B5: + return new int[] {0x03BC}; + case 0x24CD: + return new int[] {0x24E7}; + case 0x24CC: + return new int[] {0x24E6}; + case 0x24CB: + return new int[] {0x24E5}; + case 0x24CA: + return new int[] {0x24E4}; + case 0x24CF: + return new int[] {0x24E9}; + case 0x24CE: + return new int[] {0x24E8}; + case 0x24C9: + return new int[] {0x24E3}; + case 0x054A: + return new int[] {0x057A}; + case 0x054B: + return new int[] {0x057B}; + case 0x1E10: + return new int[] {0x1E11}; + case 0x054E: + return new int[] {0x057E}; + case 0x054F: + return new int[] {0x057F}; + case 0x1E14: + return new int[] {0x1E15}; + case 0x054C: + return new int[] {0x057C}; + case 0x054D: + return new int[] {0x057D}; + case 0x1E12: + return new int[] {0x1E13}; + case 0x0549: + return new int[] {0x0579}; + case 0x0547: + return new int[] {0x0577}; + case 0x0548: + return new int[] {0x0578}; + case 0x1E0E: + return new int[] {0x1E0F}; + case 0x0541: + return new int[] {0x0571}; + case 0x0542: + return new int[] {0x0572}; + case 0x0540: + return new int[] {0x0570}; + case 0x0545: + return new int[] {0x0575}; + case 0x0546: + return new int[] {0x0576}; + case 0x1E0C: + return new int[] {0x1E0D}; + case 0x0543: + return new int[] {0x0573}; + case 0x0544: + return new int[] {0x0574}; + case 0x1E0A: + return new int[] {0x1E0B}; + case 0x1E06: + return new int[] {0x1E07}; + case 0x053F: + return new int[] {0x056F}; + case 0x1E04: + return new int[] {0x1E05}; + case 0x1E08: + return new int[] {0x1E09}; + case 0x1E20: + return new int[] {0x1E21}; + case 0x1E24: + return new int[] {0x1E25}; + case 0x1E22: + return new int[] {0x1E23}; + case 0x1E1E: + return new int[] {0x1E1F}; + case 0x0552: + return new int[] {0x0582}; + case 0x0553: + return new int[] {0x0583}; + case 0x0550: + return new int[] {0x0580}; + case 0x0551: + return new int[] {0x0581}; + case 0x0556: + return new int[] {0x0586}; + case 0x1E1C: + return new int[] {0x1E1D}; + case 0x0554: + return new int[] {0x0584}; + case 0x1E1A: + return new int[] {0x1E1B}; + case 0x0555: + return new int[] {0x0585}; + case 0x1E18: + return new int[] {0x1E19}; + case 0x1E16: + return new int[] {0x1E17}; + case 0x1E32: + return new int[] {0x1E33}; + case 0x1E30: + return new int[] {0x1E31}; + case 0x1E36: + return new int[] {0x1E37}; + case 0x1E34: + return new int[] {0x1E35}; + case 0x1E2A: + return new int[] {0x1E2B}; + case 0x1E2E: + return new int[] {0x1E2F}; + case 0x1E2C: + return new int[] {0x1E2D}; + case 0x1E28: + return new int[] {0x1E29}; + case 0x1E26: + return new int[] {0x1E27}; + case 0x1E42: + return new int[] {0x1E43}; + case 0x1E40: + return new int[] {0x1E41}; + case 0x1E46: + return new int[] {0x1E47}; + case 0x1E44: + return new int[] {0x1E45}; + case 0x1E3A: + return new int[] {0x1E3B}; + case 0x1E3E: + return new int[] {0x1E3F}; + case 0x1E3C: + return new int[] {0x1E3D}; + case 0x1E38: + return new int[] {0x1E39}; + case 0x050A: + return new int[] {0x050B}; + case 0x0506: + return new int[] {0x0507}; + case 0x0504: + return new int[] {0x0505}; + case 0x0508: + return new int[] {0x0509}; + case 0x0502: + return new int[] {0x0503}; + case 0x0500: + return new int[] {0x0501}; + case 0x050E: + return new int[] {0x050F}; + case 0x050C: + return new int[] {0x050D}; + case 0xFF30: + return new int[] {0xFF50}; + case 0xFF31: + return new int[] {0xFF51}; + case 0xFF32: + return new int[] {0xFF52}; + case 0xFF37: + return new int[] {0xFF57}; + case 0xFF38: + return new int[] {0xFF58}; + case 0xFF39: + return new int[] {0xFF59}; + case 0xFF33: + return new int[] {0xFF53}; + case 0xFF34: + return new int[] {0xFF54}; + case 0xFF35: + return new int[] {0xFF55}; + case 0xFF36: + return new int[] {0xFF56}; + case 0xFF3A: + return new int[] {0xFF5A}; + case 0x053A: + return new int[] {0x056A}; + case 0xFF21: + return new int[] {0xFF41}; + case 0x053D: + return new int[] {0x056D}; + case 0x1E02: + return new int[] {0x1E03}; + case 0x053E: + return new int[] {0x056E}; + case 0x053B: + return new int[] {0x056B}; + case 0x1E00: + return new int[] {0x1E01}; + case 0x053C: + return new int[] {0x056C}; + case 0x0538: + return new int[] {0x0568}; + case 0xFF26: + return new int[] {0xFF46}; + case 0x0539: + return new int[] {0x0569}; + case 0xFF27: + return new int[] {0xFF47}; + case 0x0536: + return new int[] {0x0566}; + case 0xFF28: + return new int[] {0xFF48}; + case 0x0537: + return new int[] {0x0567}; + case 0xFF29: + return new int[] {0xFF49}; + case 0xFF22: + return new int[] {0xFF42}; + case 0xFF23: + return new int[] {0xFF43}; + case 0xFF24: + return new int[] {0xFF44}; + case 0xFF25: + return new int[] {0xFF45}; + case 0x0531: + return new int[] {0x0561}; + case 0xFF2A: + return new int[] {0xFF4A}; + case 0x0534: + return new int[] {0x0564}; + case 0x0535: + return new int[] {0x0565}; + case 0x0532: + return new int[] {0x0562}; + case 0x0533: + return new int[] {0x0563}; + case 0xFF2F: + return new int[] {0xFF4F}; + case 0xFF2B: + return new int[] {0xFF4B}; + case 0xFF2C: + return new int[] {0xFF4C}; + case 0xFF2D: + return new int[] {0xFF4D}; + case 0xFF2E: + return new int[] {0xFF4E}; + case 0x014E: + return new int[] {0x014F}; + case 0x1E97: + return new int[] {0x0074, 0x0308}; + case 0x1E98: + return new int[] {0x0077, 0x030A}; + case 0x014C: + return new int[] {0x014D}; + case 0x038E: + return new int[] {0x03CD}; + case 0x038F: + return new int[] {0x03CE}; + case 0x1E96: + return new int[] {0x0068, 0x0331}; + case 0x1E99: + return new int[] {0x0079, 0x030A}; + case 0x1E90: + return new int[] {0x1E91}; + case 0x014A: + return new int[] {0x014B}; + case 0x038C: + return new int[] {0x03CC}; + case 0x1E94: + return new int[] {0x1E95}; + case 0x038A: + return new int[] {0x03AF}; + case 0x1E92: + return new int[] {0x1E93}; + case 0x0145: + return new int[] {0x0146}; + case 0x0388: + return new int[] {0x03AD}; + case 0x0143: + return new int[] {0x0144}; + case 0x0386: + return new int[] {0x03AC}; + case 0x1E8E: + return new int[] {0x1E8F}; + case 0x0149: + return new int[] {0x02BC, 0x006E}; + case 0x0147: + return new int[] {0x0148}; + case 0x0389: + return new int[] {0x03AE}; + case 0x0141: + return new int[] {0x0142}; + case 0x1E8C: + return new int[] {0x1E8D}; + case 0x1E8A: + return new int[] {0x1E8B}; + case 0x039F: + return new int[] {0x03BF}; + case 0x015E: + return new int[] {0x015F}; + case 0x039A: + return new int[] {0x03BA}; + case 0x039D: + return new int[] {0x03BD}; + case 0x015C: + return new int[] {0x015D}; + case 0x039E: + return new int[] {0x03BE}; + case 0x039B: + return new int[] {0x03BB}; + case 0x015A: + return new int[] {0x015B}; + case 0x039C: + return new int[] {0x03BC}; + case 0x0156: + return new int[] {0x0157}; + case 0x0398: + return new int[] {0x03B8}; + case 0x0399: + return new int[] {0x03B9}; + case 0x0154: + return new int[] {0x0155}; + case 0x0396: + return new int[] {0x03B6}; + case 0x0397: + return new int[] {0x03B7}; + case 0x0158: + return new int[] {0x0159}; + case 0x0390: + return new int[] {0x03B9, 0x0308, 0x0301}; + case 0x0391: + return new int[] {0x03B1}; + case 0x0152: + return new int[] {0x0153}; + case 0x0394: + return new int[] {0x03B4}; + case 0x0395: + return new int[] {0x03B5}; + case 0x0150: + return new int[] {0x0151}; + case 0x0392: + return new int[] {0x03B2}; + case 0x1E9A: + return new int[] {0x0061, 0x02BE}; + case 0x0393: + return new int[] {0x03B3}; + case 0x1E9B: + return new int[] {0x1E61}; + case 0x016E: + return new int[] {0x016F}; + case 0x016C: + return new int[] {0x016D}; + case 0x016A: + return new int[] {0x016B}; + case 0x0168: + return new int[] {0x0169}; + case 0x0166: + return new int[] {0x0167}; + case 0x0160: + return new int[] {0x0161}; + case 0x0164: + return new int[] {0x0165}; + case 0x0162: + return new int[] {0x0163}; + case 0x017F: + return new int[] {0x0073}; + case 0x017D: + return new int[] {0x017E}; + case 0x017B: + return new int[] {0x017C}; + case 0x0178: + return new int[] {0x00FF}; + case 0x0179: + return new int[] {0x017A}; + case 0x0176: + return new int[] {0x0177}; + case 0x0170: + return new int[] {0x0171}; + case 0x0174: + return new int[] {0x0175}; + case 0x0172: + return new int[] {0x0173}; + case 0x010A: + return new int[] {0x010B}; + case 0x1E54: + return new int[] {0x1E55}; + case 0xFB13: + return new int[] {0x0574, 0x0576}; + case 0x1E52: + return new int[] {0x1E53}; + case 0xFB14: + return new int[] {0x0574, 0x0565}; + case 0x010E: + return new int[] {0x010F}; + case 0x1E58: + return new int[] {0x1E59}; + case 0x010C: + return new int[] {0x010D}; + case 0x1E56: + return new int[] {0x1E57}; + case 0x0108: + return new int[] {0x0109}; + case 0xFB15: + return new int[] {0x0574, 0x056B}; + case 0x1E50: + return new int[] {0x1E51}; + case 0xFB16: + return new int[] {0x057E, 0x0576}; + case 0xFB17: + return new int[] {0x0574, 0x056D}; + case 0x0102: + return new int[] {0x0103}; + case 0x1E4C: + return new int[] {0x1E4D}; + case 0x0100: + return new int[] {0x0101}; + case 0x1E4A: + return new int[] {0x1E4B}; + case 0x0106: + return new int[] {0x0107}; + case 0x0345: + return new int[] {0x03B9}; + case 0x0587: + return new int[] {0x0565, 0x0582}; + case 0x0104: + return new int[] {0x0105}; + case 0x1E4E: + return new int[] {0x1E4F}; + case 0x1E48: + return new int[] {0x1E49}; + case 0x1E64: + return new int[] {0x1E65}; + case 0xFB00: + return new int[] {0x0066, 0x0066}; + case 0x011C: + return new int[] {0x011D}; + case 0xFB01: + return new int[] {0x0066, 0x0069}; + case 0x1E62: + return new int[] {0x1E63}; + case 0xFB02: + return new int[] {0x0066, 0x006C}; + case 0x011A: + return new int[] {0x011B}; + case 0xFB03: + return new int[] {0x0066, 0x0066, 0x0069}; + case 0x1E68: + return new int[] {0x1E69}; + case 0x1E66: + return new int[] {0x1E67}; + case 0x011E: + return new int[] {0x011F}; + case 0x0118: + return new int[] {0x0119}; + case 0x1E60: + return new int[] {0x1E61}; + case 0xFB04: + return new int[] {0x0066, 0x0066, 0x006C}; + case 0xFB05: + return new int[] {0x0073, 0x0074}; + case 0xFB06: + return new int[] {0x0073, 0x0074}; + case 0x0112: + return new int[] {0x0113}; + case 0x1E5C: + return new int[] {0x1E5D}; + case 0x0110: + return new int[] {0x0111}; + case 0x1E5A: + return new int[] {0x1E5B}; + case 0x0116: + return new int[] {0x0117}; + case 0x0114: + return new int[] {0x0115}; + case 0x1E5E: + return new int[] {0x1E5F}; + case 0x012C: + return new int[] {0x012D}; + case 0x1E76: + return new int[] {0x1E77}; + case 0x012A: + return new int[] {0x012B}; + case 0x1E74: + return new int[] {0x1E75}; + case 0x012E: + return new int[] {0x012F}; + case 0x1E78: + return new int[] {0x1E79}; + case 0x1E72: + return new int[] {0x1E73}; + case 0x1E70: + return new int[] {0x1E71}; + case 0x0124: + return new int[] {0x0125}; + case 0x1E6E: + return new int[] {0x1E6F}; + case 0x0122: + return new int[] {0x0123}; + case 0x1E6C: + return new int[] {0x1E6D}; + case 0x0128: + return new int[] {0x0129}; + case 0x0126: + return new int[] {0x0127}; + case 0x0120: + return new int[] {0x0121}; + case 0x1E6A: + return new int[] {0x1E6B}; + case 0x013D: + return new int[] {0x013E}; + case 0x1E86: + return new int[] {0x1E87}; + case 0x013B: + return new int[] {0x013C}; + case 0x1E84: + return new int[] {0x1E85}; + case 0x013F: + return new int[] {0x0140}; + case 0x1E88: + return new int[] {0x1E89}; + case 0x1E82: + return new int[] {0x1E83}; + case 0x1E80: + return new int[] {0x1E81}; + case 0x0134: + return new int[] {0x0135}; + case 0x1E7E: + return new int[] {0x1E7F}; + case 0x0132: + return new int[] {0x0133}; + case 0x1E7C: + return new int[] {0x1E7D}; + case 0x0139: + return new int[] {0x013A}; + case 0x0136: + return new int[] {0x0137}; + case 0x0130: + return new int[] {0x0069, 0x0307}; + case 0x1E7A: + return new int[] {0x1E7B}; + case 0x03A0: + return new int[] {0x03C0}; + case 0x03A1: + return new int[] {0x03C1}; + case 0x03A4: + return new int[] {0x03C4}; + case 0x1EAC: + return new int[] {0x1EAD}; + case 0x03A5: + return new int[] {0x03C5}; + case 0x1EAA: + return new int[] {0x1EAB}; + case 0x03A3: + return new int[] {0x03C3}; + case 0x216B: + return new int[] {0x217B}; + case 0x1EA8: + return new int[] {0x1EA9}; + case 0x216A: + return new int[] {0x217A}; + case 0x10425: + return new int[] {0x1044D}; + case 0x1EA6: + return new int[] {0x1EA7}; + case 0x216F: + return new int[] {0x217F}; + case 0x216E: + return new int[] {0x217E}; + case 0x216D: + return new int[] {0x217D}; + case 0x216C: + return new int[] {0x217C}; + case 0x1EA0: + return new int[] {0x1EA1}; + case 0x2169: + return new int[] {0x2179}; + case 0x2168: + return new int[] {0x2178}; + case 0x10420: + return new int[] {0x10448}; + case 0x2167: + return new int[] {0x2177}; + case 0x10422: + return new int[] {0x1044A}; + case 0x1EA4: + return new int[] {0x1EA5}; + case 0x10421: + return new int[] {0x10449}; + case 0x10424: + return new int[] {0x1044C}; + case 0x1EA2: + return new int[] {0x1EA3}; + case 0x10423: + return new int[] {0x1044B}; + case 0x2162: + return new int[] {0x2172}; + case 0x1041E: + return new int[] {0x10446}; + case 0x2161: + return new int[] {0x2171}; + case 0x1041D: + return new int[] {0x10445}; + case 0x2160: + return new int[] {0x2170}; + case 0x1041F: + return new int[] {0x10447}; + case 0x2166: + return new int[] {0x2176}; + case 0x2165: + return new int[] {0x2175}; + case 0x2164: + return new int[] {0x2174}; + case 0x2163: + return new int[] {0x2173}; + case 0x1EBA: + return new int[] {0x1EBB}; + case 0x03B0: + return new int[] {0x03C5, 0x0308, 0x0301}; + case 0x1041A: + return new int[] {0x10442}; + case 0x1EBE: + return new int[] {0x1EBF}; + case 0x1041C: + return new int[] {0x10444}; + case 0x1EBC: + return new int[] {0x1EBD}; + case 0x1041B: + return new int[] {0x10443}; + case 0x1EB8: + return new int[] {0x1EB9}; + case 0x10415: + return new int[] {0x1043D}; + case 0x10414: + return new int[] {0x1043C}; + case 0x1EB6: + return new int[] {0x1EB7}; + case 0x10417: + return new int[] {0x1043F}; + case 0x10416: + return new int[] {0x1043E}; + case 0x10419: + return new int[] {0x10441}; + case 0x10418: + return new int[] {0x10440}; + case 0x1EB0: + return new int[] {0x1EB1}; + case 0x03AA: + return new int[] {0x03CA}; + case 0x1EB4: + return new int[] {0x1EB5}; + case 0x10411: + return new int[] {0x10439}; + case 0x10410: + return new int[] {0x10438}; + case 0x03AB: + return new int[] {0x03CB}; + case 0x1EB2: + return new int[] {0x1EB3}; + case 0x10413: + return new int[] {0x1043B}; + case 0x10412: + return new int[] {0x1043A}; + case 0x03A8: + return new int[] {0x03C8}; + case 0x1040D: + return new int[] {0x10435}; + case 0x03A9: + return new int[] {0x03C9}; + case 0x1040C: + return new int[] {0x10434}; + case 0x03A6: + return new int[] {0x03C6}; + case 0x1EAE: + return new int[] {0x1EAF}; + case 0x1040F: + return new int[] {0x10437}; + case 0x03A7: + return new int[] {0x03C7}; + case 0x1040E: + return new int[] {0x10436}; + case 0x03C2: + return new int[] {0x03C3}; + case 0x1ECA: + return new int[] {0x1ECB}; + case 0x1ECE: + return new int[] {0x1ECF}; + case 0x1ECC: + return new int[] {0x1ECD}; + case 0x1EC8: + return new int[] {0x1EC9}; + case 0x1EC2: + return new int[] {0x1EC3}; + case 0x1EC0: + return new int[] {0x1EC1}; + case 0x1EC6: + return new int[] {0x1EC7}; + case 0x1EC4: + return new int[] {0x1EC5}; + case 0x1EDC: + return new int[] {0x1EDD}; + case 0x03D1: + return new int[] {0x03B8}; + case 0x1EDA: + return new int[] {0x1EDB}; + case 0x03D8: + return new int[] {0x03D9}; + case 0x03D5: + return new int[] {0x03C6}; + case 0x03D6: + return new int[] {0x03C0}; + case 0x1EDE: + return new int[] {0x1EDF}; + case 0x1ED8: + return new int[] {0x1ED9}; + case 0x03D0: + return new int[] {0x03B2}; + case 0x1ED2: + return new int[] {0x1ED3}; + case 0x1ED0: + return new int[] {0x1ED1}; + case 0x1ED6: + return new int[] {0x1ED7}; + case 0x1ED4: + return new int[] {0x1ED5}; + case 0x018A: + return new int[] {0x0257}; + case 0x018B: + return new int[] {0x018C}; + case 0x018E: + return new int[] {0x01DD}; + case 0x212B: + return new int[] {0x00E5}; + case 0x018F: + return new int[] {0x0259}; + case 0x212A: + return new int[] {0x006B}; + case 0x0189: + return new int[] {0x0256}; + case 0x2126: + return new int[] {0x03C9}; + case 0x0187: + return new int[] {0x0188}; + case 0x0181: + return new int[] {0x0253}; + case 0x0182: + return new int[] {0x0183}; + case 0x0186: + return new int[] {0x0254}; + case 0x0184: + return new int[] {0x0185}; + case 0x019C: + return new int[] {0x026F}; + case 0x019F: + return new int[] {0x0275}; + case 0x019D: + return new int[] {0x0272}; + case 0x0198: + return new int[] {0x0199}; + case 0x0193: + return new int[] {0x0260}; + case 0x0190: + return new int[] {0x025B}; + case 0x0191: + return new int[] {0x0192}; + case 0x0196: + return new int[] {0x0269}; + case 0x0197: + return new int[] {0x0268}; + case 0x0194: + return new int[] {0x0263}; + case 0x1040B: + return new int[] {0x10433}; + case 0x1040A: + return new int[] {0x10432}; + case 0x10404: + return new int[] {0x1042C}; + case 0x10403: + return new int[] {0x1042B}; + case 0x10406: + return new int[] {0x1042E}; + case 0x10405: + return new int[] {0x1042D}; + case 0x10408: + return new int[] {0x10430}; + case 0x10407: + return new int[] {0x1042F}; + case 0x10409: + return new int[] {0x10431}; + case 0x10400: + return new int[] {0x10428}; + case 0x10402: + return new int[] {0x1042A}; + case 0x10401: + return new int[] {0x10429}; + case 0x01E6: + return new int[] {0x01E7}; + case 0x01E4: + return new int[] {0x01E5}; + case 0x01E8: + return new int[] {0x01E9}; + case 0x01E2: + return new int[] {0x01E3}; + case 0x01E0: + return new int[] {0x01E1}; + case 0x01DE: + return new int[] {0x01DF}; + case 0x01DB: + return new int[] {0x01DC}; + case 0x01F7: + return new int[] {0x01BF}; + case 0x01F8: + return new int[] {0x01F9}; + case 0x01F6: + return new int[] {0x0195}; + case 0x01F0: + return new int[] {0x006A, 0x030C}; + case 0x01F4: + return new int[] {0x01F5}; + case 0x01F1: + return new int[] {0x01F3}; + case 0x01F2: + return new int[] {0x01F3}; + case 0x01EE: + return new int[] {0x01EF}; + case 0x01EC: + return new int[] {0x01ED}; + case 0x01EA: + return new int[] {0x01EB}; + case 0x01FE: + return new int[] {0x01FF}; + case 0x01FC: + return new int[] {0x01FD}; + case 0x01FA: + return new int[] {0x01FB}; + case 0x01A2: + return new int[] {0x01A3}; + case 0x03E4: + return new int[] {0x03E5}; + case 0x1EEC: + return new int[] {0x1EED}; + case 0x01A0: + return new int[] {0x01A1}; + case 0x03E2: + return new int[] {0x03E3}; + case 0x1EEA: + return new int[] {0x1EEB}; + case 0x01A6: + return new int[] {0x0280}; + case 0x03E8: + return new int[] {0x03E9}; + case 0x01A7: + return new int[] {0x01A8}; + case 0x01A4: + return new int[] {0x01A5}; + case 0x03E6: + return new int[] {0x03E7}; + case 0x1EEE: + return new int[] {0x1EEF}; + case 0x03E0: + return new int[] {0x03E1}; + case 0x03DC: + return new int[] {0x03DD}; + case 0x1EE4: + return new int[] {0x1EE5}; + case 0x03DA: + return new int[] {0x03DB}; + case 0x1EE2: + return new int[] {0x1EE3}; + case 0x1EE8: + return new int[] {0x1EE9}; + case 0x03DE: + return new int[] {0x03DF}; + case 0x1EE6: + return new int[] {0x1EE7}; + case 0x1EE0: + return new int[] {0x1EE1}; + case 0x01B3: + return new int[] {0x01B4}; + case 0x03F5: + return new int[] {0x03B5}; + case 0x01B1: + return new int[] {0x028A}; + case 0x01B2: + return new int[] {0x028B}; + case 0x03F4: + return new int[] {0x03B8}; + case 0x01B7: + return new int[] {0x0292}; + case 0x01B8: + return new int[] {0x01B9}; + case 0x01B5: + return new int[] {0x01B6}; + case 0x03F1: + return new int[] {0x03C1}; + case 0x03F2: + return new int[] {0x03C3}; + case 0x03F0: + return new int[] {0x03BA}; + case 0x1EF4: + return new int[] {0x1EF5}; + case 0x01AC: + return new int[] {0x01AD}; + case 0x03EE: + return new int[] {0x03EF}; + case 0x1EF2: + return new int[] {0x1EF3}; + case 0x03EC: + return new int[] {0x03ED}; + case 0x01AF: + return new int[] {0x01B0}; + case 0x1EF8: + return new int[] {0x1EF9}; + case 0x1EF6: + return new int[] {0x1EF7}; + case 0x01AE: + return new int[] {0x0288}; + case 0x01A9: + return new int[] {0x0283}; + case 0x1EF0: + return new int[] {0x1EF1}; + case 0x03EA: + return new int[] {0x03EB}; + case 0x01C4: + return new int[] {0x01C6}; + case 0x01C5: + return new int[] {0x01C6}; + case 0x01C8: + return new int[] {0x01C9}; + case 0x01C7: + return new int[] {0x01C9}; + case 0x01BC: + return new int[] {0x01BD}; + case 0x01D5: + return new int[] {0x01D6}; + case 0x01D3: + return new int[] {0x01D4}; + case 0x01D9: + return new int[] {0x01DA}; + case 0x01D7: + return new int[] {0x01D8}; + case 0x01D1: + return new int[] {0x01D2}; + case 0x01CD: + return new int[] {0x01CE}; + case 0x01CB: + return new int[] {0x01CC}; + case 0x01CF: + return new int[] {0x01D0}; + case 0x01CA: + return new int[] {0x01CC}; + case 0x042A: + return new int[] {0x044A}; + case 0x042D: + return new int[] {0x044D}; + case 0x042E: + return new int[] {0x044E}; + case 0x042B: + return new int[] {0x044B}; + case 0x042C: + return new int[] {0x044C}; + case 0x0428: + return new int[] {0x0448}; + case 0x0429: + return new int[] {0x0449}; + case 0x0426: + return new int[] {0x0446}; + case 0x1F2E: + return new int[] {0x1F26}; + case 0x0427: + return new int[] {0x0447}; + case 0x1F2F: + return new int[] {0x1F27}; + case 0x0420: + return new int[] {0x0440}; + case 0x0421: + return new int[] {0x0441}; + case 0x0424: + return new int[] {0x0444}; + case 0x1F2C: + return new int[] {0x1F24}; + case 0x0425: + return new int[] {0x0445}; + case 0x1F2D: + return new int[] {0x1F25}; + case 0x0422: + return new int[] {0x0442}; + case 0x1F2A: + return new int[] {0x1F22}; + case 0x0423: + return new int[] {0x0443}; + case 0x1F2B: + return new int[] {0x1F23}; + case 0x1F28: + return new int[] {0x1F20}; + case 0x041E: + return new int[] {0x043E}; + case 0x041F: + return new int[] {0x043F}; + case 0x1F29: + return new int[] {0x1F21}; + case 0x1F3F: + return new int[] {0x1F37}; + case 0x1F3A: + return new int[] {0x1F32}; + case 0x1F3D: + return new int[] {0x1F35}; + case 0x1F3E: + return new int[] {0x1F36}; + case 0x1F3B: + return new int[] {0x1F33}; + case 0x1F3C: + return new int[] {0x1F34}; + case 0x1F38: + return new int[] {0x1F30}; + case 0x1F39: + return new int[] {0x1F31}; + case 0x042F: + return new int[] {0x044F}; + case 0x1F52: + return new int[] {0x03C5, 0x0313, 0x0300}; + case 0x020A: + return new int[] {0x020B}; + case 0x1F50: + return new int[] {0x03C5, 0x0313}; + case 0x1F56: + return new int[] {0x03C5, 0x0313, 0x0342}; + case 0x020E: + return new int[] {0x020F}; + case 0x1F54: + return new int[] {0x03C5, 0x0313, 0x0301}; + case 0x020C: + return new int[] {0x020D}; + case 0x0208: + return new int[] {0x0209}; + case 0x0206: + return new int[] {0x0207}; + case 0x0200: + return new int[] {0x0201}; + case 0x1F4A: + return new int[] {0x1F42}; + case 0x1F4B: + return new int[] {0x1F43}; + case 0x0204: + return new int[] {0x0205}; + case 0x0202: + return new int[] {0x0203}; + case 0x1F4C: + return new int[] {0x1F44}; + case 0x1F4D: + return new int[] {0x1F45}; + case 0x1F49: + return new int[] {0x1F41}; + case 0x1F48: + return new int[] {0x1F40}; + case 0x021A: + return new int[] {0x021B}; + case 0x021E: + return new int[] {0x021F}; + case 0x1F68: + return new int[] {0x1F60}; + case 0x021C: + return new int[] {0x021D}; + case 0x0218: + return new int[] {0x0219}; + case 0x1F5B: + return new int[] {0x1F53}; + case 0x0212: + return new int[] {0x0213}; + case 0x0210: + return new int[] {0x0211}; + case 0x1F5F: + return new int[] {0x1F57}; + case 0x0216: + return new int[] {0x0217}; + case 0x1F5D: + return new int[] {0x1F55}; + case 0x0214: + return new int[] {0x0215}; + case 0x1F59: + return new int[] {0x1F51}; + case 0x040B: + return new int[] {0x045B}; + case 0x040C: + return new int[] {0x045C}; + case 0x040A: + return new int[] {0x045A}; + case 0x0406: + return new int[] {0x0456}; + case 0x1F0E: + return new int[] {0x1F06}; + case 0x0407: + return new int[] {0x0457}; + case 0x1F0F: + return new int[] {0x1F07}; + case 0x0404: + return new int[] {0x0454}; + case 0x1F0C: + return new int[] {0x1F04}; + case 0x0405: + return new int[] {0x0455}; + case 0x1F0D: + return new int[] {0x1F05}; + case 0x0408: + return new int[] {0x0458}; + case 0x0409: + return new int[] {0x0459}; + case 0x0402: + return new int[] {0x0452}; + case 0x1F0A: + return new int[] {0x1F02}; + case 0x0403: + return new int[] {0x0453}; + case 0x1F0B: + return new int[] {0x1F03}; + case 0x0400: + return new int[] {0x0450}; + case 0x0401: + return new int[] {0x0451}; + case 0x1F09: + return new int[] {0x1F01}; + case 0x1F08: + return new int[] {0x1F00}; + case 0x041C: + return new int[] {0x043C}; + case 0x041D: + return new int[] {0x043D}; + case 0x041A: + return new int[] {0x043A}; + case 0x041B: + return new int[] {0x043B}; + case 0x0417: + return new int[] {0x0437}; + case 0x0418: + return new int[] {0x0438}; + case 0x0415: + return new int[] {0x0435}; + case 0x1F1D: + return new int[] {0x1F15}; + case 0x0416: + return new int[] {0x0436}; + case 0x0419: + return new int[] {0x0439}; + case 0x0410: + return new int[] {0x0430}; + case 0x0413: + return new int[] {0x0433}; + case 0x1F1B: + return new int[] {0x1F13}; + case 0x0414: + return new int[] {0x0434}; + case 0x1F1C: + return new int[] {0x1F14}; + case 0x0411: + return new int[] {0x0431}; + case 0x0412: + return new int[] {0x0432}; + case 0x1F1A: + return new int[] {0x1F12}; + case 0x040F: + return new int[] {0x045F}; + case 0x040D: + return new int[] {0x045D}; + case 0x040E: + return new int[] {0x045E}; + case 0x1F18: + return new int[] {0x1F10}; + case 0x1F19: + return new int[] {0x1F11}; + case 0x004F: + return new int[] {0x006F}; + case 0x004D: + return new int[] {0x006D}; + case 0x004E: + return new int[] {0x006E}; + case 0x004B: + return new int[] {0x006B}; + case 0x004C: + return new int[] {0x006C}; + case 0x004A: + return new int[] {0x006A}; + case 0x0046: + return new int[] {0x0066}; + case 0x0047: + return new int[] {0x0067}; + case 0x0044: + return new int[] {0x0064}; + case 0x0045: + return new int[] {0x0065}; + case 0x0048: + return new int[] {0x0068}; + case 0x0049: + return new int[] {0x0069}; + case 0x0042: + return new int[] {0x0062}; + case 0x0043: + return new int[] {0x0063}; + case 0x0041: + return new int[] {0x0061}; + case 0x005A: + return new int[] {0x007A}; + case 0x0057: + return new int[] {0x0077}; + case 0x0058: + return new int[] {0x0078}; + case 0x0055: + return new int[] {0x0075}; + case 0x0056: + return new int[] {0x0076}; + case 0x0059: + return new int[] {0x0079}; + case 0x0050: + return new int[] {0x0070}; + case 0x0053: + return new int[] {0x0073}; + case 0x0054: + return new int[] {0x0074}; + case 0x0051: + return new int[] {0x0071}; + case 0x0052: + return new int[] {0x0072}; + case 0x022C: + return new int[] {0x022D}; + case 0x046E: + return new int[] {0x046F}; + case 0x022A: + return new int[] {0x022B}; + case 0x046C: + return new int[] {0x046D}; + case 0x022E: + return new int[] {0x022F}; + case 0x0228: + return new int[] {0x0229}; + case 0x046A: + return new int[] {0x046B}; + case 0x0222: + return new int[] {0x0223}; + case 0x0464: + return new int[] {0x0465}; + case 0x1F6C: + return new int[] {0x1F64}; + case 0x1F6D: + return new int[] {0x1F65}; + case 0x0220: + return new int[] {0x019E}; + case 0x0462: + return new int[] {0x0463}; + case 0x1F6A: + return new int[] {0x1F62}; + case 0x1F6B: + return new int[] {0x1F63}; + case 0x0226: + return new int[] {0x0227}; + case 0x0468: + return new int[] {0x0469}; + case 0x0224: + return new int[] {0x0225}; + case 0x0466: + return new int[] {0x0467}; + case 0x1F6E: + return new int[] {0x1F66}; + case 0x1F6F: + return new int[] {0x1F67}; + case 0x1F69: + return new int[] {0x1F61}; + case 0x0460: + return new int[] {0x0461}; + case 0x047E: + return new int[] {0x047F}; + case 0x1F85: + return new int[] {0x1F05, 0x03B9}; + case 0x1F86: + return new int[] {0x1F06, 0x03B9}; + case 0x047C: + return new int[] {0x047D}; + case 0x1F83: + return new int[] {0x1F03, 0x03B9}; + case 0x1F84: + return new int[] {0x1F04, 0x03B9}; + case 0x1F89: + return new int[] {0x1F01, 0x03B9}; + case 0x1F87: + return new int[] {0x1F07, 0x03B9}; + case 0x1F88: + return new int[] {0x1F00, 0x03B9}; + case 0x047A: + return new int[] {0x047B}; + case 0x1F81: + return new int[] {0x1F01, 0x03B9}; + case 0x1F82: + return new int[] {0x1F02, 0x03B9}; + case 0x1F80: + return new int[] {0x1F00, 0x03B9}; + case 0x0476: + return new int[] {0x0477}; + case 0x0232: + return new int[] {0x0233}; + case 0x0474: + return new int[] {0x0475}; + case 0x0478: + return new int[] {0x0479}; + case 0x0230: + return new int[] {0x0231}; + case 0x0472: + return new int[] {0x0473}; + case 0x0470: + return new int[] {0x0471}; + case 0x1F96: + return new int[] {0x1F26, 0x03B9}; + case 0x1F97: + return new int[] {0x1F27, 0x03B9}; + case 0x1F94: + return new int[] {0x1F24, 0x03B9}; + case 0x048E: + return new int[] {0x048F}; + case 0x1F95: + return new int[] {0x1F25, 0x03B9}; + case 0x1F98: + return new int[] {0x1F20, 0x03B9}; + case 0x1F99: + return new int[] {0x1F21, 0x03B9}; + case 0x1F92: + return new int[] {0x1F22, 0x03B9}; + case 0x048C: + return new int[] {0x048D}; + case 0x1F93: + return new int[] {0x1F23, 0x03B9}; + case 0x1F90: + return new int[] {0x1F20, 0x03B9}; + case 0x048A: + return new int[] {0x048B}; + case 0x1F91: + return new int[] {0x1F21, 0x03B9}; + case 0x1F8E: + return new int[] {0x1F06, 0x03B9}; + case 0x1F8F: + return new int[] {0x1F07, 0x03B9}; + case 0x1F8C: + return new int[] {0x1F04, 0x03B9}; + case 0x1F8D: + return new int[] {0x1F05, 0x03B9}; + case 0x1F8A: + return new int[] {0x1F02, 0x03B9}; + case 0x1F8B: + return new int[] {0x1F03, 0x03B9}; + case 0x0480: + return new int[] {0x0481}; + case 0x049E: + return new int[] {0x049F}; + case 0x049C: + return new int[] {0x049D}; + case 0x049A: + return new int[] {0x049B}; + case 0x1F9F: + return new int[] {0x1F27, 0x03B9}; + case 0x0498: + return new int[] {0x0499}; + case 0x1F9D: + return new int[] {0x1F25, 0x03B9}; + case 0x0496: + return new int[] {0x0497}; + case 0x1F9E: + return new int[] {0x1F26, 0x03B9}; + case 0x0490: + return new int[] {0x0491}; + case 0x1F9B: + return new int[] {0x1F23, 0x03B9}; + case 0x0494: + return new int[] {0x0495}; + case 0x1F9C: + return new int[] {0x1F24, 0x03B9}; + case 0x0492: + return new int[] {0x0493}; + case 0x1F9A: + return new int[] {0x1F22, 0x03B9}; + case 0x04C1: + return new int[] {0x04C2}; + case 0x1FCA: + return new int[] {0x1F74}; + case 0x04C5: + return new int[] {0x04C6}; + case 0x04C3: + return new int[] {0x04C4}; + case 0x1FCB: + return new int[] {0x1F75}; + case 0x1FCC: + return new int[] {0x03B7, 0x03B9}; + case 0x1FC8: + return new int[] {0x1F72}; + case 0x1FC9: + return new int[] {0x1F73}; + case 0x1FC6: + return new int[] {0x03B7, 0x0342}; + case 0x1FC7: + return new int[] {0x03B7, 0x0342, 0x03B9}; + case 0x04BA: + return new int[] {0x04BB}; + case 0x1FC4: + return new int[] {0x03AE, 0x03B9}; + case 0x04BE: + return new int[] {0x04BF}; + case 0x1FC2: + return new int[] {0x1F74, 0x03B9}; + case 0x04BC: + return new int[] {0x04BD}; + case 0x1FC3: + return new int[] {0x03B7, 0x03B9}; + case 0x04B8: + return new int[] {0x04B9}; + case 0x04B6: + return new int[] {0x04B7}; + case 0x1FBE: + return new int[] {0x03B9}; + case 0x04D2: + return new int[] {0x04D3}; + case 0x1FDA: + return new int[] {0x1F76}; + case 0x1FDB: + return new int[] {0x1F77}; + case 0x04D0: + return new int[] {0x04D1}; + case 0x04D6: + return new int[] {0x04D7}; + case 0x04D4: + return new int[] {0x04D5}; + case 0x1FD9: + return new int[] {0x1FD1}; + case 0x1FD7: + return new int[] {0x03B9, 0x0308, 0x0342}; + case 0x1FD8: + return new int[] {0x1FD0}; + case 0x04CB: + return new int[] {0x04CC}; + case 0x1FD2: + return new int[] {0x03B9, 0x0308, 0x0300}; + case 0x1FD6: + return new int[] {0x03B9, 0x0342}; + case 0x1FD3: + return new int[] {0x03B9, 0x0308, 0x0301}; + case 0x04CD: + return new int[] {0x04CE}; + case 0x04C9: + return new int[] {0x04CA}; + case 0x04C7: + return new int[] {0x04C8}; + case 0x1FEB: + return new int[] {0x1F7B}; + case 0x04E4: + return new int[] {0x04E5}; + case 0x1FEC: + return new int[] {0x1FE5}; + case 0x04E2: + return new int[] {0x04E3}; + case 0x1FEA: + return new int[] {0x1F7A}; + case 0x04E8: + return new int[] {0x04E9}; + case 0x04E6: + return new int[] {0x04E7}; + case 0x1FE8: + return new int[] {0x1FE0}; + case 0x1FE9: + return new int[] {0x1FE1}; + case 0x04E0: + return new int[] {0x04E1}; + case 0x1FE2: + return new int[] {0x03C5, 0x0308, 0x0300}; + case 0x04DC: + return new int[] {0x04DD}; + case 0x1FE3: + return new int[] {0x03C5, 0x0308, 0x0301}; + case 0x04DA: + return new int[] {0x04DB}; + case 0x1FE6: + return new int[] {0x03C5, 0x0342}; + case 0x1FE7: + return new int[] {0x03C5, 0x0308, 0x0342}; + case 0x1FE4: + return new int[] {0x03C1, 0x0313}; + case 0x04DE: + return new int[] {0x04DF}; + case 0x04D8: + return new int[] {0x04D9}; + case 0x04F4: + return new int[] {0x04F5}; + case 0x1FFC: + return new int[] {0x03C9, 0x03B9}; + case 0x04F2: + return new int[] {0x04F3}; + case 0x1FFA: + return new int[] {0x1F7C}; + case 0x1FFB: + return new int[] {0x1F7D}; + case 0x04F8: + return new int[] {0x04F9}; + case 0x1FF9: + return new int[] {0x1F79}; + case 0x04F0: + return new int[] {0x04F1}; + case 0x04EC: + return new int[] {0x04ED}; + case 0x1FF3: + return new int[] {0x03C9, 0x03B9}; + case 0x1FF4: + return new int[] {0x03CE, 0x03B9}; + case 0x04EA: + return new int[] {0x04EB}; + case 0x1FF2: + return new int[] {0x1F7C, 0x03B9}; + case 0x1FF7: + return new int[] {0x03C9, 0x0342, 0x03B9}; + case 0x1FF8: + return new int[] {0x1F78}; + case 0x04EE: + return new int[] {0x04EF}; + case 0x1FF6: + return new int[] {0x03C9, 0x0342}; + case 0x04A0: + return new int[] {0x04A1}; + case 0x1FAB: + return new int[] {0x1F63, 0x03B9}; + case 0x04A4: + return new int[] {0x04A5}; + case 0x1FAC: + return new int[] {0x1F64, 0x03B9}; + case 0x04A2: + return new int[] {0x04A3}; + case 0x1FAA: + return new int[] {0x1F62, 0x03B9}; + case 0x1FA6: + return new int[] {0x1F66, 0x03B9}; + case 0x1FA7: + return new int[] {0x1F67, 0x03B9}; + case 0x1FA4: + return new int[] {0x1F64, 0x03B9}; + case 0x1FA5: + return new int[] {0x1F65, 0x03B9}; + case 0x1FA8: + return new int[] {0x1F60, 0x03B9}; + case 0x1FA9: + return new int[] {0x1F61, 0x03B9}; + case 0x1FA2: + return new int[] {0x1F62, 0x03B9}; + case 0x1FA3: + return new int[] {0x1F63, 0x03B9}; + case 0x1FA0: + return new int[] {0x1F60, 0x03B9}; + case 0x1FA1: + return new int[] {0x1F61, 0x03B9}; + case 0x04B0: + return new int[] {0x04B1}; + case 0x04B4: + return new int[] {0x04B5}; + case 0x1FBC: + return new int[] {0x03B1, 0x03B9}; + case 0x04B2: + return new int[] {0x04B3}; + case 0x1FBA: + return new int[] {0x1F70}; + case 0x1FBB: + return new int[] {0x1F71}; + case 0x1FB7: + return new int[] {0x03B1, 0x0342, 0x03B9}; + case 0x1FB8: + return new int[] {0x1FB0}; + case 0x04AE: + return new int[] {0x04AF}; + case 0x1FB6: + return new int[] {0x03B1, 0x0342}; + case 0x1FB9: + return new int[] {0x1FB1}; + case 0x04AC: + return new int[] {0x04AD}; + case 0x1FB3: + return new int[] {0x03B1, 0x03B9}; + case 0x1FB4: + return new int[] {0x03AC, 0x03B9}; + case 0x04AA: + return new int[] {0x04AB}; + case 0x1FB2: + return new int[] {0x1F70, 0x03B9}; + case 0x1FAF: + return new int[] {0x1F67, 0x03B9}; + case 0x04A8: + return new int[] {0x04A9}; + case 0x1FAD: + return new int[] {0x1F65, 0x03B9}; + case 0x04A6: + return new int[] {0x04A7}; + case 0x1FAE: + return new int[] {0x1F66, 0x03B9}; + default: + return new int[] {codePoint}; + } + } + + /** + * ASCII space characters. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "ASCII space characters". + * @see RFC 3454, Appendix C.1.1 + */ + public static boolean prohibitionAsciiSpace(int codePoint) { + return codePoint == 0x0020 + + ; + } + + /** + * Non-ASCII space characters. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "Non-ASCII space characters". + * @see RFC 3454, Appendix C.1.2 + */ + public static boolean prohibitionNonAsciiSpace(int codePoint) { + return codePoint == 0x00A0 + || codePoint == 0x1680 + || codePoint == 0x2000 + || codePoint == 0x2001 + || codePoint == 0x2002 + || codePoint == 0x2003 + || codePoint == 0x2004 + || codePoint == 0x2005 + || codePoint == 0x2006 + || codePoint == 0x2007 + || codePoint == 0x2008 + || codePoint == 0x2009 + || codePoint == 0x200A + || codePoint == 0x200B + || codePoint == 0x202F + || codePoint == 0x205F + || codePoint == 0x3000 + + ; + } + + /** + * ASCII control characters. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "ASCII control characters". + * @see RFC 3454, Appendix C.2.1 + */ + public static boolean prohibitionAsciiControl(int codePoint) { + return (codePoint >= 0x0000 && codePoint <= 0x001F) + || codePoint == 0x007F + + ; + } + + /** + * Non-ASCII control characters. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "Non-ASCII control characters". + * @see RFC 3454, Appendix C.2.2 + */ + public static boolean prohibitionNonAsciiControl(int codePoint) { + return (codePoint >= 0x0080 && codePoint <= 0x009F) + || codePoint == 0x06DD + || codePoint == 0x070F + || codePoint == 0x180E + || codePoint == 0x200C + || codePoint == 0x200D + || codePoint == 0x2028 + || codePoint == 0x2029 + || codePoint == 0x2060 + || codePoint == 0x2061 + || codePoint == 0x2062 + || codePoint == 0x2063 + || (codePoint >= 0x206A && codePoint <= 0x206F) + || codePoint == 0xFEFF + || (codePoint >= 0xFFF9 && codePoint <= 0xFFFC) + || (codePoint >= 0x1D173 && codePoint <= 0x1D17A) + + ; + } + + /** + * Private use. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "Private use". + * @see RFC 3454, Appendix C.3 + */ + public static boolean prohibitionPrivateUse(int codePoint) { + return (codePoint >= 0xE000 && codePoint <= 0xF8FF) + || (codePoint >= 0xF0000 && codePoint <= 0xFFFFD) + || (codePoint >= 0x100000 && codePoint <= 0x10FFFD) + + ; + } + + /** + * Non-character code points. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "Non-character code points". + * @see RFC 3454, Appendix C.4 + */ + public static boolean prohibitionNonCharacterCodePoints(int codePoint) { + return (codePoint >= 0xFDD0 && codePoint <= 0xFDEF) + || (codePoint >= 0xFFFE && codePoint <= 0xFFFF) + || (codePoint >= 0x1FFFE && codePoint <= 0x1FFFF) + || (codePoint >= 0x2FFFE && codePoint <= 0x2FFFF) + || (codePoint >= 0x3FFFE && codePoint <= 0x3FFFF) + || (codePoint >= 0x4FFFE && codePoint <= 0x4FFFF) + || (codePoint >= 0x5FFFE && codePoint <= 0x5FFFF) + || (codePoint >= 0x6FFFE && codePoint <= 0x6FFFF) + || (codePoint >= 0x7FFFE && codePoint <= 0x7FFFF) + || (codePoint >= 0x8FFFE && codePoint <= 0x8FFFF) + || (codePoint >= 0x9FFFE && codePoint <= 0x9FFFF) + || (codePoint >= 0xAFFFE && codePoint <= 0xAFFFF) + || (codePoint >= 0xBFFFE && codePoint <= 0xBFFFF) + || (codePoint >= 0xCFFFE && codePoint <= 0xCFFFF) + || (codePoint >= 0xDFFFE && codePoint <= 0xDFFFF) + || (codePoint >= 0xEFFFE && codePoint <= 0xEFFFF) + || (codePoint >= 0xFFFFE && codePoint <= 0xFFFFF) + || (codePoint >= 0x10FFFE && codePoint <= 0x10FFFF) + + ; + } + + /** + * Surrogate codes. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "Surrogate codes". + * @see RFC 3454, Appendix C.5 + */ + public static boolean prohibitionSurrogateCodes(int codePoint) { + return (codePoint >= 0xD800 && codePoint <= 0xDFFF) + + ; + } + + /** + * Inappropriate for plain text. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is + * "Inappropriate for plain text". + * @see RFC 3454, Appendix C.6 + */ + public static boolean prohibitionInappropriatePlainText(int codePoint) { + return codePoint == 0xFFF9 + || codePoint == 0xFFFA + || codePoint == 0xFFFB + || codePoint == 0xFFFC + || codePoint == 0xFFFD + + ; + } + + /** + * Inappropriate for canonical representation. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is + * "Inappropriate for canonical representation". + * @see RFC 3454, Appendix C.7 + */ + public static boolean prohibitionInappropriateCanonicalRepresentation(int codePoint) { + return (codePoint >= 0x2FF0 && codePoint <= 0x2FFB) + + ; + } + + /** + * Change display properties or are deprecated. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is + * "Change display properties or are deprecated". + * @see RFC 3454, Appendix C.8 + */ + public static boolean prohibitionChangeDisplayProperties(int codePoint) { + return codePoint == 0x0340 + || codePoint == 0x0341 + || codePoint == 0x200E + || codePoint == 0x200F + || codePoint == 0x202A + || codePoint == 0x202B + || codePoint == 0x202C + || codePoint == 0x202D + || codePoint == 0x202E + || codePoint == 0x206A + || codePoint == 0x206B + || codePoint == 0x206C + || codePoint == 0x206D + || codePoint == 0x206E + || codePoint == 0x206F + + ; + } + + /** + * Tagging characters. + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is "Tagging characters". + * @see RFC 3454, Appendix C.9 + */ + public static boolean prohibitionTaggingCharacters(int codePoint) { + return codePoint == 0xE0001 + || (codePoint >= 0xE0020 && codePoint <= 0xE007F) + + ; + } + + /** + * Characters with bidirectional property "R" or "AL". + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is + * "Characters with bidirectional property R or AL". + * @see RFC 3454, Appendix D.1 + */ + public static boolean bidirectionalPropertyRorAL(int codePoint) { + return codePoint == 0x05BE + || codePoint == 0x05C0 + || codePoint == 0x05C3 + || (codePoint >= 0x05D0 && codePoint <= 0x05EA) + || (codePoint >= 0x05F0 && codePoint <= 0x05F4) + || codePoint == 0x061B + || codePoint == 0x061F + || (codePoint >= 0x0621 && codePoint <= 0x063A) + || (codePoint >= 0x0640 && codePoint <= 0x064A) + || (codePoint >= 0x066D && codePoint <= 0x066F) + || (codePoint >= 0x0671 && codePoint <= 0x06D5) + || codePoint == 0x06DD + || (codePoint >= 0x06E5 && codePoint <= 0x06E6) + || (codePoint >= 0x06FA && codePoint <= 0x06FE) + || (codePoint >= 0x0700 && codePoint <= 0x070D) + || codePoint == 0x0710 + || (codePoint >= 0x0712 && codePoint <= 0x072C) + || (codePoint >= 0x0780 && codePoint <= 0x07A5) + || codePoint == 0x07B1 + || codePoint == 0x200F + || codePoint == 0xFB1D + || (codePoint >= 0xFB1F && codePoint <= 0xFB28) + || (codePoint >= 0xFB2A && codePoint <= 0xFB36) + || (codePoint >= 0xFB38 && codePoint <= 0xFB3C) + || codePoint == 0xFB3E + || (codePoint >= 0xFB40 && codePoint <= 0xFB41) + || (codePoint >= 0xFB43 && codePoint <= 0xFB44) + || (codePoint >= 0xFB46 && codePoint <= 0xFBB1) + || (codePoint >= 0xFBD3 && codePoint <= 0xFD3D) + || (codePoint >= 0xFD50 && codePoint <= 0xFD8F) + || (codePoint >= 0xFD92 && codePoint <= 0xFDC7) + || (codePoint >= 0xFDF0 && codePoint <= 0xFDFC) + || (codePoint >= 0xFE70 && codePoint <= 0xFE74) + || (codePoint >= 0xFE76 && codePoint <= 0xFEFC) + + ; + } + + /** + * Characters with bidirectional property "L". + * + * @param codePoint the character (Unicode code point) to be tested. + * @return {@code true} if the given {@code codePoint} is + * "Characters with bidirectional property L". + * @see RFC 3454, Appendix D.2 + */ + public static boolean bidirectionalPropertyL(int codePoint) { + return (codePoint >= 0x0041 && codePoint <= 0x005A) + || (codePoint >= 0x0061 && codePoint <= 0x007A) + || codePoint == 0x00AA + || codePoint == 0x00B5 + || codePoint == 0x00BA + || (codePoint >= 0x00C0 && codePoint <= 0x00D6) + || (codePoint >= 0x00D8 && codePoint <= 0x00F6) + || (codePoint >= 0x00F8 && codePoint <= 0x0220) + || (codePoint >= 0x0222 && codePoint <= 0x0233) + || (codePoint >= 0x0250 && codePoint <= 0x02AD) + || (codePoint >= 0x02B0 && codePoint <= 0x02B8) + || (codePoint >= 0x02BB && codePoint <= 0x02C1) + || (codePoint >= 0x02D0 && codePoint <= 0x02D1) + || (codePoint >= 0x02E0 && codePoint <= 0x02E4) + || codePoint == 0x02EE + || codePoint == 0x037A + || codePoint == 0x0386 + || (codePoint >= 0x0388 && codePoint <= 0x038A) + || codePoint == 0x038C + || (codePoint >= 0x038E && codePoint <= 0x03A1) + || (codePoint >= 0x03A3 && codePoint <= 0x03CE) + || (codePoint >= 0x03D0 && codePoint <= 0x03F5) + || (codePoint >= 0x0400 && codePoint <= 0x0482) + || (codePoint >= 0x048A && codePoint <= 0x04CE) + || (codePoint >= 0x04D0 && codePoint <= 0x04F5) + || (codePoint >= 0x04F8 && codePoint <= 0x04F9) + || (codePoint >= 0x0500 && codePoint <= 0x050F) + || (codePoint >= 0x0531 && codePoint <= 0x0556) + || (codePoint >= 0x0559 && codePoint <= 0x055F) + || (codePoint >= 0x0561 && codePoint <= 0x0587) + || codePoint == 0x0589 + || codePoint == 0x0903 + || (codePoint >= 0x0905 && codePoint <= 0x0939) + || (codePoint >= 0x093D && codePoint <= 0x0940) + || (codePoint >= 0x0949 && codePoint <= 0x094C) + || codePoint == 0x0950 + || (codePoint >= 0x0958 && codePoint <= 0x0961) + || (codePoint >= 0x0964 && codePoint <= 0x0970) + || (codePoint >= 0x0982 && codePoint <= 0x0983) + || (codePoint >= 0x0985 && codePoint <= 0x098C) + || (codePoint >= 0x098F && codePoint <= 0x0990) + || (codePoint >= 0x0993 && codePoint <= 0x09A8) + || (codePoint >= 0x09AA && codePoint <= 0x09B0) + || codePoint == 0x09B2 + || (codePoint >= 0x09B6 && codePoint <= 0x09B9) + || (codePoint >= 0x09BE && codePoint <= 0x09C0) + || (codePoint >= 0x09C7 && codePoint <= 0x09C8) + || (codePoint >= 0x09CB && codePoint <= 0x09CC) + || codePoint == 0x09D7 + || (codePoint >= 0x09DC && codePoint <= 0x09DD) + || (codePoint >= 0x09DF && codePoint <= 0x09E1) + || (codePoint >= 0x09E6 && codePoint <= 0x09F1) + || (codePoint >= 0x09F4 && codePoint <= 0x09FA) + || (codePoint >= 0x0A05 && codePoint <= 0x0A0A) + || (codePoint >= 0x0A0F && codePoint <= 0x0A10) + || (codePoint >= 0x0A13 && codePoint <= 0x0A28) + || (codePoint >= 0x0A2A && codePoint <= 0x0A30) + || (codePoint >= 0x0A32 && codePoint <= 0x0A33) + || (codePoint >= 0x0A35 && codePoint <= 0x0A36) + || (codePoint >= 0x0A38 && codePoint <= 0x0A39) + || (codePoint >= 0x0A3E && codePoint <= 0x0A40) + || (codePoint >= 0x0A59 && codePoint <= 0x0A5C) + || codePoint == 0x0A5E + || (codePoint >= 0x0A66 && codePoint <= 0x0A6F) + || (codePoint >= 0x0A72 && codePoint <= 0x0A74) + || codePoint == 0x0A83 + || (codePoint >= 0x0A85 && codePoint <= 0x0A8B) + || codePoint == 0x0A8D + || (codePoint >= 0x0A8F && codePoint <= 0x0A91) + || (codePoint >= 0x0A93 && codePoint <= 0x0AA8) + || (codePoint >= 0x0AAA && codePoint <= 0x0AB0) + || (codePoint >= 0x0AB2 && codePoint <= 0x0AB3) + || (codePoint >= 0x0AB5 && codePoint <= 0x0AB9) + || (codePoint >= 0x0ABD && codePoint <= 0x0AC0) + || codePoint == 0x0AC9 + || (codePoint >= 0x0ACB && codePoint <= 0x0ACC) + || codePoint == 0x0AD0 + || codePoint == 0x0AE0 + || (codePoint >= 0x0AE6 && codePoint <= 0x0AEF) + || (codePoint >= 0x0B02 && codePoint <= 0x0B03) + || (codePoint >= 0x0B05 && codePoint <= 0x0B0C) + || (codePoint >= 0x0B0F && codePoint <= 0x0B10) + || (codePoint >= 0x0B13 && codePoint <= 0x0B28) + || (codePoint >= 0x0B2A && codePoint <= 0x0B30) + || (codePoint >= 0x0B32 && codePoint <= 0x0B33) + || (codePoint >= 0x0B36 && codePoint <= 0x0B39) + || (codePoint >= 0x0B3D && codePoint <= 0x0B3E) + || codePoint == 0x0B40 + || (codePoint >= 0x0B47 && codePoint <= 0x0B48) + || (codePoint >= 0x0B4B && codePoint <= 0x0B4C) + || codePoint == 0x0B57 + || (codePoint >= 0x0B5C && codePoint <= 0x0B5D) + || (codePoint >= 0x0B5F && codePoint <= 0x0B61) + || (codePoint >= 0x0B66 && codePoint <= 0x0B70) + || codePoint == 0x0B83 + || (codePoint >= 0x0B85 && codePoint <= 0x0B8A) + || (codePoint >= 0x0B8E && codePoint <= 0x0B90) + || (codePoint >= 0x0B92 && codePoint <= 0x0B95) + || (codePoint >= 0x0B99 && codePoint <= 0x0B9A) + || codePoint == 0x0B9C + || (codePoint >= 0x0B9E && codePoint <= 0x0B9F) + || (codePoint >= 0x0BA3 && codePoint <= 0x0BA4) + || (codePoint >= 0x0BA8 && codePoint <= 0x0BAA) + || (codePoint >= 0x0BAE && codePoint <= 0x0BB5) + || (codePoint >= 0x0BB7 && codePoint <= 0x0BB9) + || (codePoint >= 0x0BBE && codePoint <= 0x0BBF) + || (codePoint >= 0x0BC1 && codePoint <= 0x0BC2) + || (codePoint >= 0x0BC6 && codePoint <= 0x0BC8) + || (codePoint >= 0x0BCA && codePoint <= 0x0BCC) + || codePoint == 0x0BD7 + || (codePoint >= 0x0BE7 && codePoint <= 0x0BF2) + || (codePoint >= 0x0C01 && codePoint <= 0x0C03) + || (codePoint >= 0x0C05 && codePoint <= 0x0C0C) + || (codePoint >= 0x0C0E && codePoint <= 0x0C10) + || (codePoint >= 0x0C12 && codePoint <= 0x0C28) + || (codePoint >= 0x0C2A && codePoint <= 0x0C33) + || (codePoint >= 0x0C35 && codePoint <= 0x0C39) + || (codePoint >= 0x0C41 && codePoint <= 0x0C44) + || (codePoint >= 0x0C60 && codePoint <= 0x0C61) + || (codePoint >= 0x0C66 && codePoint <= 0x0C6F) + || (codePoint >= 0x0C82 && codePoint <= 0x0C83) + || (codePoint >= 0x0C85 && codePoint <= 0x0C8C) + || (codePoint >= 0x0C8E && codePoint <= 0x0C90) + || (codePoint >= 0x0C92 && codePoint <= 0x0CA8) + || (codePoint >= 0x0CAA && codePoint <= 0x0CB3) + || (codePoint >= 0x0CB5 && codePoint <= 0x0CB9) + || codePoint == 0x0CBE + || (codePoint >= 0x0CC0 && codePoint <= 0x0CC4) + || (codePoint >= 0x0CC7 && codePoint <= 0x0CC8) + || (codePoint >= 0x0CCA && codePoint <= 0x0CCB) + || (codePoint >= 0x0CD5 && codePoint <= 0x0CD6) + || codePoint == 0x0CDE + || (codePoint >= 0x0CE0 && codePoint <= 0x0CE1) + || (codePoint >= 0x0CE6 && codePoint <= 0x0CEF) + || (codePoint >= 0x0D02 && codePoint <= 0x0D03) + || (codePoint >= 0x0D05 && codePoint <= 0x0D0C) + || (codePoint >= 0x0D0E && codePoint <= 0x0D10) + || (codePoint >= 0x0D12 && codePoint <= 0x0D28) + || (codePoint >= 0x0D2A && codePoint <= 0x0D39) + || (codePoint >= 0x0D3E && codePoint <= 0x0D40) + || (codePoint >= 0x0D46 && codePoint <= 0x0D48) + || (codePoint >= 0x0D4A && codePoint <= 0x0D4C) + || codePoint == 0x0D57 + || (codePoint >= 0x0D60 && codePoint <= 0x0D61) + || (codePoint >= 0x0D66 && codePoint <= 0x0D6F) + || (codePoint >= 0x0D82 && codePoint <= 0x0D83) + || (codePoint >= 0x0D85 && codePoint <= 0x0D96) + || (codePoint >= 0x0D9A && codePoint <= 0x0DB1) + || (codePoint >= 0x0DB3 && codePoint <= 0x0DBB) + || codePoint == 0x0DBD + || (codePoint >= 0x0DC0 && codePoint <= 0x0DC6) + || (codePoint >= 0x0DCF && codePoint <= 0x0DD1) + || (codePoint >= 0x0DD8 && codePoint <= 0x0DDF) + || (codePoint >= 0x0DF2 && codePoint <= 0x0DF4) + || (codePoint >= 0x0E01 && codePoint <= 0x0E30) + || (codePoint >= 0x0E32 && codePoint <= 0x0E33) + || (codePoint >= 0x0E40 && codePoint <= 0x0E46) + || (codePoint >= 0x0E4F && codePoint <= 0x0E5B) + || (codePoint >= 0x0E81 && codePoint <= 0x0E82) + || codePoint == 0x0E84 + || (codePoint >= 0x0E87 && codePoint <= 0x0E88) + || codePoint == 0x0E8A + || codePoint == 0x0E8D + || (codePoint >= 0x0E94 && codePoint <= 0x0E97) + || (codePoint >= 0x0E99 && codePoint <= 0x0E9F) + || (codePoint >= 0x0EA1 && codePoint <= 0x0EA3) + || codePoint == 0x0EA5 + || codePoint == 0x0EA7 + || (codePoint >= 0x0EAA && codePoint <= 0x0EAB) + || (codePoint >= 0x0EAD && codePoint <= 0x0EB0) + || (codePoint >= 0x0EB2 && codePoint <= 0x0EB3) + || codePoint == 0x0EBD + || (codePoint >= 0x0EC0 && codePoint <= 0x0EC4) + || codePoint == 0x0EC6 + || (codePoint >= 0x0ED0 && codePoint <= 0x0ED9) + || (codePoint >= 0x0EDC && codePoint <= 0x0EDD) + || (codePoint >= 0x0F00 && codePoint <= 0x0F17) + || (codePoint >= 0x0F1A && codePoint <= 0x0F34) + || codePoint == 0x0F36 + || codePoint == 0x0F38 + || (codePoint >= 0x0F3E && codePoint <= 0x0F47) + || (codePoint >= 0x0F49 && codePoint <= 0x0F6A) + || codePoint == 0x0F7F + || codePoint == 0x0F85 + || (codePoint >= 0x0F88 && codePoint <= 0x0F8B) + || (codePoint >= 0x0FBE && codePoint <= 0x0FC5) + || (codePoint >= 0x0FC7 && codePoint <= 0x0FCC) + || codePoint == 0x0FCF + || (codePoint >= 0x1000 && codePoint <= 0x1021) + || (codePoint >= 0x1023 && codePoint <= 0x1027) + || (codePoint >= 0x1029 && codePoint <= 0x102A) + || codePoint == 0x102C + || codePoint == 0x1031 + || codePoint == 0x1038 + || (codePoint >= 0x1040 && codePoint <= 0x1057) + || (codePoint >= 0x10A0 && codePoint <= 0x10C5) + || (codePoint >= 0x10D0 && codePoint <= 0x10F8) + || codePoint == 0x10FB + || (codePoint >= 0x1100 && codePoint <= 0x1159) + || (codePoint >= 0x115F && codePoint <= 0x11A2) + || (codePoint >= 0x11A8 && codePoint <= 0x11F9) + || (codePoint >= 0x1200 && codePoint <= 0x1206) + || (codePoint >= 0x1208 && codePoint <= 0x1246) + || codePoint == 0x1248 + || (codePoint >= 0x124A && codePoint <= 0x124D) + || (codePoint >= 0x1250 && codePoint <= 0x1256) + || codePoint == 0x1258 + || (codePoint >= 0x125A && codePoint <= 0x125D) + || (codePoint >= 0x1260 && codePoint <= 0x1286) + || codePoint == 0x1288 + || (codePoint >= 0x128A && codePoint <= 0x128D) + || (codePoint >= 0x1290 && codePoint <= 0x12AE) + || codePoint == 0x12B0 + || (codePoint >= 0x12B2 && codePoint <= 0x12B5) + || (codePoint >= 0x12B8 && codePoint <= 0x12BE) + || codePoint == 0x12C0 + || (codePoint >= 0x12C2 && codePoint <= 0x12C5) + || (codePoint >= 0x12C8 && codePoint <= 0x12CE) + || (codePoint >= 0x12D0 && codePoint <= 0x12D6) + || (codePoint >= 0x12D8 && codePoint <= 0x12EE) + || (codePoint >= 0x12F0 && codePoint <= 0x130E) + || codePoint == 0x1310 + || (codePoint >= 0x1312 && codePoint <= 0x1315) + || (codePoint >= 0x1318 && codePoint <= 0x131E) + || (codePoint >= 0x1320 && codePoint <= 0x1346) + || (codePoint >= 0x1348 && codePoint <= 0x135A) + || (codePoint >= 0x1361 && codePoint <= 0x137C) + || (codePoint >= 0x13A0 && codePoint <= 0x13F4) + || (codePoint >= 0x1401 && codePoint <= 0x1676) + || (codePoint >= 0x1681 && codePoint <= 0x169A) + || (codePoint >= 0x16A0 && codePoint <= 0x16F0) + || (codePoint >= 0x1700 && codePoint <= 0x170C) + || (codePoint >= 0x170E && codePoint <= 0x1711) + || (codePoint >= 0x1720 && codePoint <= 0x1731) + || (codePoint >= 0x1735 && codePoint <= 0x1736) + || (codePoint >= 0x1740 && codePoint <= 0x1751) + || (codePoint >= 0x1760 && codePoint <= 0x176C) + || (codePoint >= 0x176E && codePoint <= 0x1770) + || (codePoint >= 0x1780 && codePoint <= 0x17B6) + || (codePoint >= 0x17BE && codePoint <= 0x17C5) + || (codePoint >= 0x17C7 && codePoint <= 0x17C8) + || (codePoint >= 0x17D4 && codePoint <= 0x17DA) + || codePoint == 0x17DC + || (codePoint >= 0x17E0 && codePoint <= 0x17E9) + || (codePoint >= 0x1810 && codePoint <= 0x1819) + || (codePoint >= 0x1820 && codePoint <= 0x1877) + || (codePoint >= 0x1880 && codePoint <= 0x18A8) + || (codePoint >= 0x1E00 && codePoint <= 0x1E9B) + || (codePoint >= 0x1EA0 && codePoint <= 0x1EF9) + || (codePoint >= 0x1F00 && codePoint <= 0x1F15) + || (codePoint >= 0x1F18 && codePoint <= 0x1F1D) + || (codePoint >= 0x1F20 && codePoint <= 0x1F45) + || (codePoint >= 0x1F48 && codePoint <= 0x1F4D) + || (codePoint >= 0x1F50 && codePoint <= 0x1F57) + || codePoint == 0x1F59 + || codePoint == 0x1F5B + || codePoint == 0x1F5D + || (codePoint >= 0x1F5F && codePoint <= 0x1F7D) + || (codePoint >= 0x1F80 && codePoint <= 0x1FB4) + || (codePoint >= 0x1FB6 && codePoint <= 0x1FBC) + || codePoint == 0x1FBE + || (codePoint >= 0x1FC2 && codePoint <= 0x1FC4) + || (codePoint >= 0x1FC6 && codePoint <= 0x1FCC) + || (codePoint >= 0x1FD0 && codePoint <= 0x1FD3) + || (codePoint >= 0x1FD6 && codePoint <= 0x1FDB) + || (codePoint >= 0x1FE0 && codePoint <= 0x1FEC) + || (codePoint >= 0x1FF2 && codePoint <= 0x1FF4) + || (codePoint >= 0x1FF6 && codePoint <= 0x1FFC) + || codePoint == 0x200E + || codePoint == 0x2071 + || codePoint == 0x207F + || codePoint == 0x2102 + || codePoint == 0x2107 + || (codePoint >= 0x210A && codePoint <= 0x2113) + || codePoint == 0x2115 + || (codePoint >= 0x2119 && codePoint <= 0x211D) + || codePoint == 0x2124 + || codePoint == 0x2126 + || codePoint == 0x2128 + || (codePoint >= 0x212A && codePoint <= 0x212D) + || (codePoint >= 0x212F && codePoint <= 0x2131) + || (codePoint >= 0x2133 && codePoint <= 0x2139) + || (codePoint >= 0x213D && codePoint <= 0x213F) + || (codePoint >= 0x2145 && codePoint <= 0x2149) + || (codePoint >= 0x2160 && codePoint <= 0x2183) + || (codePoint >= 0x2336 && codePoint <= 0x237A) + || codePoint == 0x2395 + || (codePoint >= 0x249C && codePoint <= 0x24E9) + || (codePoint >= 0x3005 && codePoint <= 0x3007) + || (codePoint >= 0x3021 && codePoint <= 0x3029) + || (codePoint >= 0x3031 && codePoint <= 0x3035) + || (codePoint >= 0x3038 && codePoint <= 0x303C) + || (codePoint >= 0x3041 && codePoint <= 0x3096) + || (codePoint >= 0x309D && codePoint <= 0x309F) + || (codePoint >= 0x30A1 && codePoint <= 0x30FA) + || (codePoint >= 0x30FC && codePoint <= 0x30FF) + || (codePoint >= 0x3105 && codePoint <= 0x312C) + || (codePoint >= 0x3131 && codePoint <= 0x318E) + || (codePoint >= 0x3190 && codePoint <= 0x31B7) + || (codePoint >= 0x31F0 && codePoint <= 0x321C) + || (codePoint >= 0x3220 && codePoint <= 0x3243) + || (codePoint >= 0x3260 && codePoint <= 0x327B) + || (codePoint >= 0x327F && codePoint <= 0x32B0) + || (codePoint >= 0x32C0 && codePoint <= 0x32CB) + || (codePoint >= 0x32D0 && codePoint <= 0x32FE) + || (codePoint >= 0x3300 && codePoint <= 0x3376) + || (codePoint >= 0x337B && codePoint <= 0x33DD) + || (codePoint >= 0x33E0 && codePoint <= 0x33FE) + || (codePoint >= 0x3400 && codePoint <= 0x4DB5) + || (codePoint >= 0x4E00 && codePoint <= 0x9FA5) + || (codePoint >= 0xA000 && codePoint <= 0xA48C) + || (codePoint >= 0xAC00 && codePoint <= 0xD7A3) + || (codePoint >= 0xD800 && codePoint <= 0xFA2D) + || (codePoint >= 0xFA30 && codePoint <= 0xFA6A) + || (codePoint >= 0xFB00 && codePoint <= 0xFB06) + || (codePoint >= 0xFB13 && codePoint <= 0xFB17) + || (codePoint >= 0xFF21 && codePoint <= 0xFF3A) + || (codePoint >= 0xFF41 && codePoint <= 0xFF5A) + || (codePoint >= 0xFF66 && codePoint <= 0xFFBE) + || (codePoint >= 0xFFC2 && codePoint <= 0xFFC7) + || (codePoint >= 0xFFCA && codePoint <= 0xFFCF) + || (codePoint >= 0xFFD2 && codePoint <= 0xFFD7) + || (codePoint >= 0xFFDA && codePoint <= 0xFFDC) + || (codePoint >= 0x10300 && codePoint <= 0x1031E) + || (codePoint >= 0x10320 && codePoint <= 0x10323) + || (codePoint >= 0x10330 && codePoint <= 0x1034A) + || (codePoint >= 0x10400 && codePoint <= 0x10425) + || (codePoint >= 0x10428 && codePoint <= 0x1044D) + || (codePoint >= 0x1D000 && codePoint <= 0x1D0F5) + || (codePoint >= 0x1D100 && codePoint <= 0x1D126) + || (codePoint >= 0x1D12A && codePoint <= 0x1D166) + || (codePoint >= 0x1D16A && codePoint <= 0x1D172) + || (codePoint >= 0x1D183 && codePoint <= 0x1D184) + || (codePoint >= 0x1D18C && codePoint <= 0x1D1A9) + || (codePoint >= 0x1D1AE && codePoint <= 0x1D1DD) + || (codePoint >= 0x1D400 && codePoint <= 0x1D454) + || (codePoint >= 0x1D456 && codePoint <= 0x1D49C) + || (codePoint >= 0x1D49E && codePoint <= 0x1D49F) + || codePoint == 0x1D4A2 + || (codePoint >= 0x1D4A5 && codePoint <= 0x1D4A6) + || (codePoint >= 0x1D4A9 && codePoint <= 0x1D4AC) + || (codePoint >= 0x1D4AE && codePoint <= 0x1D4B9) + || codePoint == 0x1D4BB + || (codePoint >= 0x1D4BD && codePoint <= 0x1D4C0) + || (codePoint >= 0x1D4C2 && codePoint <= 0x1D4C3) + || (codePoint >= 0x1D4C5 && codePoint <= 0x1D505) + || (codePoint >= 0x1D507 && codePoint <= 0x1D50A) + || (codePoint >= 0x1D50D && codePoint <= 0x1D514) + || (codePoint >= 0x1D516 && codePoint <= 0x1D51C) + || (codePoint >= 0x1D51E && codePoint <= 0x1D539) + || (codePoint >= 0x1D53B && codePoint <= 0x1D53E) + || (codePoint >= 0x1D540 && codePoint <= 0x1D544) + || codePoint == 0x1D546 + || (codePoint >= 0x1D54A && codePoint <= 0x1D550) + || (codePoint >= 0x1D552 && codePoint <= 0x1D6A3) + || (codePoint >= 0x1D6A8 && codePoint <= 0x1D7C9) + || (codePoint >= 0x20000 && codePoint <= 0x2A6D6) + || (codePoint >= 0x2F800 && codePoint <= 0x2FA1D) + || (codePoint >= 0xF0000 && codePoint <= 0xFFFFD) + || (codePoint >= 0x100000 && codePoint <= 0x10FFFD) + + ; + } + +} diff --git a/stringprep/src/main/java/module-info.java b/stringprep/src/main/java/module-info.java new file mode 100644 index 0000000..3884d55 --- /dev/null +++ b/stringprep/src/main/java/module-info.java @@ -0,0 +1,4 @@ +module org.xbib.stringprep { + exports com.ongres.stringprep; + uses com.ongres.stringprep.Profile; +} \ No newline at end of file diff --git a/stringprep/src/test/java/test/stringprep/ProfileTest.java b/stringprep/src/test/java/test/stringprep/ProfileTest.java new file mode 100644 index 0000000..bf22678 --- /dev/null +++ b/stringprep/src/test/java/test/stringprep/ProfileTest.java @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2021 OnGres, Inc. + * SPDX-License-Identifier: BSD-2-Clause + */ + +package test.stringprep; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import java.util.EnumSet; +import java.util.Locale; +import java.util.Set; +import java.util.stream.IntStream; + +import com.ongres.stringprep.Option; +import com.ongres.stringprep.Profile; +import com.ongres.stringprep.Stringprep; +import com.ongres.stringprep.Tables; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +class ProfileTest { + + @Test + void nullProvider() { + assertThrows(NullPointerException.class, () -> Stringprep.getProvider(null)); + assertThrows(IllegalArgumentException.class, () -> Stringprep.getProvider("")); + } + + @Test + void testAllOptions() { + Profile profile = () -> EnumSet.allOf(Option.class); + char[] example1 = "A\u00ADDⅨ".toCharArray(); + assertArrayEquals("adix".toCharArray(), profile.prepareStored(example1)); + assertArrayEquals("adix".toCharArray(), profile.prepareQuery(example1)); + } + + @Test + void testNoneOptions() { + Profile profile = () -> EnumSet.noneOf(Option.class); + char[] example1 = "A\u00AD\u200A\u0BBCZ".toCharArray(); + assertArrayEquals(example1, profile.prepareQuery(example1)); + IllegalArgumentException storedIllegal = + assertThrows(IllegalArgumentException.class, () -> profile.prepareStored(example1)); + assertEquals("Unassigned code point \"0x0BBC\"", storedIllegal.getMessage()); + } + + @ParameterizedTest + @CsvSource(value = {"℻,FAX", "⑳,20", "㎓,GHz", "A\u00ADD ⑳,AD 20"}) + void testNormKcOptions(String value, String expected) { + Profile profile = () -> EnumSet.of(Option.NORMALIZE_KC, Option.MAP_TO_NOTHING); + char[] valueChars = value.toCharArray(); + char[] expectedChars = expected.toCharArray(); + assertArrayEquals(expectedChars, profile.prepareStored(valueChars)); + assertArrayEquals(expectedChars, profile.prepareQuery(valueChars)); + } + + @Test + void testAdditionalProhibitionsOptions() { + Profile profile = new Profile() { + @Override + public Set