initial commit

This commit is contained in:
Jörg Prante 2016-11-01 17:28:05 +01:00
commit 7069e31fe0
62 changed files with 6705 additions and 0 deletions

13
.gitignore vendored Normal file
View file

@ -0,0 +1,13 @@
/data
/work
/logs
/.idea
/target
.DS_Store
*.iml
/.settings
/.classpath
/.project
/.gradle
/build
/plugins

12
.travis.yml Normal file
View file

@ -0,0 +1,12 @@
language: java
sudo: required
jdk:
- oraclejdk8
cache:
directories:
- $HOME/.m2
after_success:
- ./gradlew sonarqube -Dsonar.host.url=https://sonarqube.com -Dsonar.login=$SONAR_TOKEN
env:
global:
secure: n1Ai4q/yMLn/Pg5pA4lTavoJoe7mQYB1PSKnZAqwbgyla94ySzK6iyBCBiNs/foMPisB/x+DHvmUXTsjvquw9Ay48ZITCV3xhcWzD0eZM2TMoG19CpRAEe8L8LNuYiti9k89ijDdUGZ5ifsvQNTGNHksouayAuApC3PrTUejJfR6SYrp1ZsQTbsMlr+4XU3p7QknK5rGgOwATIMP28F+bVnB05WJtlJA3b0SeucCurn3wJ4FGBQXRYmdlT7bQhNE4QgZM1VzcUFD/K0TBxzzq/otb/lNRSifyoekktDmJwQnaT9uQ4R8R6KdQ2Kb38Rvgjur+TKm5i1G8qS2+6LnIxQJG1aw3JvKK6W0wWCgnAVVRrXaCLday9NuY59tuh1mfjQ10UcsMNKcTdcKEMrLow506wSETcXc7L/LEnneWQyJJeV4vhPqR7KJfsBbeqgz3yIfsCn1GZVWFlfegzYCN52YTl0Y0uRD2Z+TnzQu+Bf4DzaWXLge1rz31xkhyeNNspub4h024+XqBjcMm6M9mlMzmmK8t2DIwPy/BlQbFBUyhrxziuR/5/2NEDPyHltvWkRb4AUIa25WJqkV0gTBegbMadZ9DyOo6Ea7aoVFBae2WGR08F1kzABsWrd1S7UJmWxW35iyMEtoAIayXphIK98qO5aCutwZ+3iOQazxbAs=

202
LICENSE.txt Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

82
README.md Normal file
View file

@ -0,0 +1,82 @@
![Helper](https://github.com/jprante/elasticsearch-helper/raw/master/src/site/resources/helper.jpg)
# Elasticsearch helper plugin
This plugin offers some Java helper classes for easier use of Elasticsearch API.
## Compatibility matrix
| Elasticsearch | Plugin | Release date |
| ----------------- | -------------- | -------------|
| 2.4.1 | 2.4.1.0 | Oct 4, 2016 |
| 2.4.0 | 2.4.0.0 | Oct 4, 2016 |
| 2.3.5 | 2.3.5.0 | Aug 4, 2016 |
| 2.3.3 | 2.3.3.1 | Jul 6, 2016 |
| 2.3.3 | 2.3.3.0 | May 23, 2016 |
| 2.3.2 | 2.3.2.0 | May 9, 2016 |
| 2.3.1 | 2.3.1.1 | May 9, 2016 |
| 2.3.1 | 2.3.1.0 | Apr 9, 2016 |
| 2.3.0 | 2.3.0.0 | Apr 9, 2016 |
| 2.2.1 | 2.2.1.1 | Mar 30, 2016 |
| 2.2.0 | 2.2.0.5 | Mar 15, 2016 |
| 2.2.0 | 2.2.0.4 | Mar 10, 2016 |
| 2.2.0 | 2.2.0.3 | Feb 16, 2016 |
| 2.2.0 | 2.2.0.2 | Feb 12, 2016 |
| 2.2.0 | 2.2.0.0 | Feb 3, 2016 |
| 2.1.1 | 2.1.1.0 | Dec 21, 2015 |
| 2.1.0 | 2.1.0.0 | Nov 29, 2015 |
| 2.0.0 | 2.0.0.2 | Nov 3, 2015 |
| 2.0.0 | 2.0.0.1 | Oct 29, 2015 |
| 2.0.0 | 2.0.0.0 | Oct 28, 2015 |
| 1.6.0 | 1.6.0.0 | Jul 1, 2015 |
| 1.5.2 | 1.5.2.2 | May 11, 2015 |
| 1.5.2 | 1.5.2.1 | May 3, 2015 |
| 1.5.1 | 1.5.1.0 | Apr 23, 2015 |
| 1.3.1 | 1.3.0.3 | Aug 8, 2014 |
| 1.3.1 | 1.3.0.1 | Aug 4, 2014 |
| 1.3.0 | 1.3.0.0 | Jul 23, 2014 |
| 1.2.2 | 1.2.2.0 | Jul 19, 2014 |
| 1.2.1 | 1.2.1.0 | Jun 4, 2014 |
| 1.2.0 | 1.2.0.1 | May 28, 2014 |
| 1.2.0 | 1.2.0.0 | May 22, 2014 |
| 1.1.0 | 1.1.0.7 | May 11, 2014 |
| 1.0.0.RC2 | 1.0.0.RC2.1 | Feb 3, 2014 |
| 0.90.7 | 0.90.7.1 | Dec 3, 2013 |
| 0.20.6 | 0.20.6.1 | Feb 4, 2014 |
| 0.19.11.2 | 0.19.11.2 | Feb 1, 2013 |
## Installation 2.x
./bin/plugin install http://xbib.org/repository/org/xbib/elasticsearch/plugin/elasticsearch-helper/2.3.3.1/elasticsearch-helper-2.3.3.1-plugin.zip
## Installation 1.x
./bin/plugin -install helper -url http://xbib.org/repository/org/xbib/elasticsearch/plugin/elasticsearch-helper/1.6.0.0/elasticsearch-helper-1.6.0.0.zip
Do not forget to restart the node after installing.
## Project docs
The Maven project site is available at `Github <http://jprante.github.io/elasticsearch-helper>`_
## Issues
All feedback is welcome! If you find issues, please post them at `Github <https://github.com/jprante/elasticsearch-helper/issues>`_
# License
Elasticsearch Helper Plugin (formerly Elasticsearch Support Plugin)
Copyright (C) 2013 Jörg Prante
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

117
build.gradle Normal file
View file

@ -0,0 +1,117 @@
plugins {
id "org.sonarqube" version "2.2"
id "org.ajoberstar.github-pages" version "1.6.0-rc.1"
id "org.xbib.gradle.plugin.jbake" version "1.1.0"
}
group = 'org.xbib'
version = '2.2.1.0'
printf "Host: %s\nOS: %s %s %s\nJVM: %s %s %s %s\nGroovy: %s\nGradle: %s\n" +
"Build: group: ${project.group} name: ${project.name} version: ${project.version}\n",
InetAddress.getLocalHost(),
System.getProperty("os.name"),
System.getProperty("os.arch"),
System.getProperty("os.version"),
System.getProperty("java.version"),
System.getProperty("java.vm.version"),
System.getProperty("java.vm.vendor"),
System.getProperty("java.vm.name"),
GroovySystem.getVersion(),
gradle.gradleVersion
apply plugin: 'java'
apply plugin: 'maven'
apply plugin: 'signing'
apply plugin: 'findbugs'
apply plugin: 'pmd'
apply plugin: 'checkstyle'
apply plugin: "jacoco"
apply plugin: 'org.ajoberstar.github-pages'
apply from: 'gradle/ext.gradle'
sourceSets {
integrationTest {
java {
srcDir file('src/integration-test/java')
compileClasspath += main.output
compileClasspath += test.output
}
resources {
srcDir file('src/integration-test/resources')
}
}
}
sourceCompatibility = 1.8
targetCompatibility = 1.8
configurations {
wagon
integrationTestCompile.extendsFrom testCompile
integrationTestRuntime.extendsFrom testRuntime
}
dependencies {
compile "org.xbib:metrics:1.0.0"
compile "org.elasticsearch:elasticsearch:2.2.1"
testCompile "net.java.dev.jna:jna:4.1.0"
testCompile "junit:junit:4.12"
testCompile "org.apache.logging.log4j:log4j-core:2.7"
testCompile "org.apache.logging.log4j:log4j-slf4j-impl:2.7"
wagon 'org.apache.maven.wagon:wagon-ssh-external:2.10'
}
tasks.withType(JavaCompile) {
options.compilerArgs << "-Xlint:all" << "-profile" << "compact3"
}
task integrationTest(type: Test) {
include '**/MiscTestSuite.class'
include '**/BulkNodeTestSuite.class'
include '**/BulkTransportTestSuite.class'
testClassesDir = sourceSets.integrationTest.output.classesDir
classpath = configurations.integrationTestCompile
classpath += configurations.integrationTestRuntime
classpath += sourceSets.main.output
classpath += sourceSets.test.output
classpath += sourceSets.integrationTest.output
outputs.upToDateWhen { false }
systemProperty 'path.home', projectDir.absolutePath
testLogging.showStandardStreams = true
}
integrationTest.mustRunAfter test
check.dependsOn integrationTest
clean {
delete "plugins"
delete "logs"
}
task javadocJar(type: Jar, dependsOn: classes) {
from javadoc
into "build/tmp"
classifier 'javadoc'
}
task sourcesJar(type: Jar, dependsOn: classes) {
from sourceSets.main.allSource
into "build/tmp"
classifier 'sources'
}
artifacts {
archives javadocJar, sourcesJar
}
if (project.hasProperty('signing.keyId')) {
signing {
sign configurations.archives
}
}
apply from: 'gradle/publish.gradle'
apply from: 'gradle/sonarqube.gradle'

View file

@ -0,0 +1,323 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<!-- This is a checkstyle configuration file. For descriptions of
what the following rules do, please see the checkstyle configuration
page at http://checkstyle.sourceforge.net/config.html -->
<module name="Checker">
<module name="FileTabCharacter">
<!-- Checks that there are no tab characters in the file.
-->
</module>
<module name="NewlineAtEndOfFile">
<property name="lineSeparator" value="lf"/>
</module>
<module name="RegexpSingleline">
<!-- Checks that FIXME is not used in comments. TODO is preferred.
-->
<property name="format" value="((//.*)|(\*.*))FIXME" />
<property name="message" value='TODO is preferred to FIXME. e.g. "TODO(johndoe): Refactor when v2 is released."' />
</module>
<module name="RegexpSingleline">
<!-- Checks that TODOs are named. (Actually, just that they are followed
by an open paren.)
-->
<property name="format" value="((//.*)|(\*.*))TODO[^(]" />
<property name="message" value='All TODOs should be named. e.g. "TODO(johndoe): Refactor when v2 is released."' />
</module>
<module name="JavadocPackage">
<!-- Checks that each Java package has a Javadoc file used for commenting.
Only allows a package-info.java, not package.html. -->
</module>
<!-- All Java AST specific tests live under TreeWalker module. -->
<module name="TreeWalker">
<!--
IMPORT CHECKS
-->
<module name="RedundantImport">
<!-- Checks for redundant import statements. -->
<property name="severity" value="error"/>
</module>
<module name="ImportOrder">
<!-- Checks for out of order import statements. -->
<property name="severity" value="warning"/>
<property name="groups" value="com,junit,net,org,java,javax"/>
<!-- This ensures that static imports go first. -->
<property name="option" value="top"/>
<property name="tokens" value="STATIC_IMPORT, IMPORT"/>
</module>
<!--
JAVADOC CHECKS
-->
<!-- Checks for Javadoc comments. -->
<!-- See http://checkstyle.sf.net/config_javadoc.html -->
<module name="JavadocMethod">
<property name="scope" value="protected"/>
<property name="severity" value="warning"/>
<property name="allowMissingJavadoc" value="true"/>
<property name="allowMissingParamTags" value="true"/>
<property name="allowMissingReturnTag" value="true"/>
<property name="allowMissingThrowsTags" value="true"/>
<property name="allowThrowsTagsForSubclasses" value="true"/>
<property name="allowUndeclaredRTE" value="true"/>
</module>
<module name="JavadocType">
<property name="scope" value="protected"/>
<property name="severity" value="error"/>
</module>
<module name="JavadocStyle">
<property name="severity" value="warning"/>
</module>
<!--
NAMING CHECKS
-->
<!-- Item 38 - Adhere to generally accepted naming conventions -->
<module name="PackageName">
<!-- Validates identifiers for package names against the
supplied expression. -->
<!-- Here the default checkstyle rule restricts package name parts to
seven characters, this is not in line with common practice at Google.
-->
<property name="format" value="^[a-z]+(\.[a-z][a-z0-9]{1,})*$"/>
<property name="severity" value="warning"/>
</module>
<module name="TypeNameCheck">
<!-- Validates static, final fields against the
expression "^[A-Z][a-zA-Z0-9]*$". -->
<metadata name="altname" value="TypeName"/>
<property name="severity" value="warning"/>
</module>
<module name="ConstantNameCheck">
<!-- Validates non-private, static, final fields against the supplied
public/package final fields "^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$". -->
<metadata name="altname" value="ConstantName"/>
<property name="applyToPublic" value="true"/>
<property name="applyToProtected" value="true"/>
<property name="applyToPackage" value="true"/>
<property name="applyToPrivate" value="false"/>
<property name="format" value="^([A-Z][A-Z0-9]*(_[A-Z0-9]+)*|FLAG_.*)$"/>
<message key="name.invalidPattern"
value="Variable ''{0}'' should be in ALL_CAPS (if it is a constant) or be private (otherwise)."/>
<property name="severity" value="warning"/>
</module>
<module name="StaticVariableNameCheck">
<!-- Validates static, non-final fields against the supplied
expression "^[a-z][a-zA-Z0-9]*_?$". -->
<metadata name="altname" value="StaticVariableName"/>
<property name="applyToPublic" value="true"/>
<property name="applyToProtected" value="true"/>
<property name="applyToPackage" value="true"/>
<property name="applyToPrivate" value="true"/>
<property name="format" value="^[a-z][a-zA-Z0-9]*_?$"/>
<property name="severity" value="warning"/>
</module>
<module name="MemberNameCheck">
<!-- Validates non-static members against the supplied expression. -->
<metadata name="altname" value="MemberName"/>
<property name="applyToPublic" value="true"/>
<property name="applyToProtected" value="true"/>
<property name="applyToPackage" value="true"/>
<property name="applyToPrivate" value="true"/>
<property name="format" value="^[a-z][a-zA-Z0-9]*$"/>
<property name="severity" value="warning"/>
</module>
<module name="MethodNameCheck">
<!-- Validates identifiers for method names. -->
<metadata name="altname" value="MethodName"/>
<property name="format" value="^[a-z][a-zA-Z0-9]*(_[a-zA-Z0-9]+)*$"/>
<property name="severity" value="warning"/>
</module>
<module name="ParameterName">
<!-- Validates identifiers for method parameters against the
expression "^[a-z][a-zA-Z0-9]*$". -->
<property name="severity" value="warning"/>
</module>
<module name="LocalFinalVariableName">
<!-- Validates identifiers for local final variables against the
expression "^[a-z][a-zA-Z0-9]*$". -->
<property name="severity" value="warning"/>
</module>
<module name="LocalVariableName">
<!-- Validates identifiers for local variables against the
expression "^[a-z][a-zA-Z0-9]*$". -->
<property name="severity" value="warning"/>
</module>
<!--
LENGTH and CODING CHECKS
-->
<module name="LineLength">
<!-- Checks if a line is too long. -->
<property name="max" value="${com.puppycrawl.tools.checkstyle.checks.sizes.LineLength.max}" default="128"/>
<property name="severity" value="error"/>
<!--
The default ignore pattern exempts the following elements:
- import statements
- long URLs inside comments
-->
<property name="ignorePattern"
value="${com.puppycrawl.tools.checkstyle.checks.sizes.LineLength.ignorePattern}"
default="^(package .*;\s*)|(import .*;\s*)|( *(\*|//).*https?://.*)$"/>
</module>
<module name="LeftCurly">
<!-- Checks for placement of the left curly brace ('{'). -->
<property name="severity" value="warning"/>
</module>
<module name="RightCurly">
<!-- Checks right curlies on CATCH, ELSE, and TRY blocks are on
the same line. e.g., the following example is fine:
<pre>
if {
...
} else
</pre>
-->
<!-- This next example is not fine:
<pre>
if {
...
}
else
</pre>
-->
<property name="option" value="same"/>
<property name="severity" value="warning"/>
</module>
<!-- Checks for braces around if and else blocks -->
<module name="NeedBraces">
<property name="severity" value="warning"/>
<property name="tokens" value="LITERAL_IF, LITERAL_ELSE, LITERAL_FOR, LITERAL_WHILE, LITERAL_DO"/>
</module>
<module name="UpperEll">
<!-- Checks that long constants are defined with an upper ell.-->
<property name="severity" value="error"/>
</module>
<module name="FallThrough">
<!-- Warn about falling through to the next case statement. Similar to
javac -Xlint:fallthrough, but the check is suppressed if a single-line comment
on the last non-blank line preceding the fallen-into case contains 'fall through' (or
some other variants which we don't publicized to promote consistency).
-->
<property name="reliefPattern"
value="fall through|Fall through|fallthru|Fallthru|falls through|Falls through|fallthrough|Fallthrough|No break|NO break|no break|continue on"/>
<property name="severity" value="error"/>
</module>
<!--
MODIFIERS CHECKS
-->
<module name="ModifierOrder">
<!-- Warn if modifier order is inconsistent with JLS3 8.1.1, 8.3.1, and
8.4.3. The prescribed order is:
public, protected, private, abstract, static, final, transient, volatile,
synchronized, native, strictfp
-->
</module>
<!--
WHITESPACE CHECKS
-->
<module name="WhitespaceAround">
<!-- Checks that various tokens are surrounded by whitespace.
This includes most binary operators and keywords followed
by regular or curly braces.
-->
<property name="tokens" value="ASSIGN, BAND, BAND_ASSIGN, BOR,
BOR_ASSIGN, BSR, BSR_ASSIGN, BXOR, BXOR_ASSIGN, COLON, DIV, DIV_ASSIGN,
EQUAL, GE, GT, LAND, LE, LITERAL_CATCH, LITERAL_DO, LITERAL_ELSE,
LITERAL_FINALLY, LITERAL_FOR, LITERAL_IF, LITERAL_RETURN,
LITERAL_SYNCHRONIZED, LITERAL_TRY, LITERAL_WHILE, LOR, LT, MINUS,
MINUS_ASSIGN, MOD, MOD_ASSIGN, NOT_EQUAL, PLUS, PLUS_ASSIGN, QUESTION,
SL, SL_ASSIGN, SR_ASSIGN, STAR, STAR_ASSIGN"/>
<property name="severity" value="error"/>
</module>
<module name="WhitespaceAfter">
<!-- Checks that commas, semicolons and typecasts are followed by
whitespace.
-->
<property name="tokens" value="COMMA, SEMI, TYPECAST"/>
</module>
<module name="NoWhitespaceAfter">
<!-- Checks that there is no whitespace after various unary operators.
Linebreaks are allowed.
-->
<property name="tokens" value="BNOT, DEC, DOT, INC, LNOT, UNARY_MINUS,
UNARY_PLUS"/>
<property name="allowLineBreaks" value="true"/>
<property name="severity" value="error"/>
</module>
<module name="NoWhitespaceBefore">
<!-- Checks that there is no whitespace before various unary operators.
Linebreaks are allowed.
-->
<property name="tokens" value="SEMI, DOT, POST_DEC, POST_INC"/>
<property name="allowLineBreaks" value="true"/>
<property name="severity" value="error"/>
</module>
<module name="ParenPad">
<!-- Checks that there is no whitespace before close parens or after
open parens.
-->
<property name="severity" value="warning"/>
</module>
</module>
</module>

8
gradle/ext.gradle Normal file
View file

@ -0,0 +1,8 @@
ext {
user = 'jprante'
name = 'elasticsearch-extras-client'
description = 'Some extras implemented for using Elasticsearch clients (node and transport)'
scmUrl = 'https://github.com/' + user + '/' + name
scmConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git'
scmDeveloperConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git'
}

63
gradle/publish.gradle Normal file
View file

@ -0,0 +1,63 @@
task xbibUpload(type: Upload) {
configuration = configurations.archives
uploadDescriptor = true
repositories {
if (project.hasProperty("xbibUsername")) {
mavenDeployer {
configuration = configurations.wagon
repository(url: 'scpexe://xbib.org/repository') {
authentication(userName: xbibUsername, privateKey: xbibPrivateKey)
}
}
}
}
}
task sonaTypeUpload(type: Upload) {
configuration = configurations.archives
uploadDescriptor = true
repositories {
if (project.hasProperty('ossrhUsername')) {
mavenDeployer {
beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }
repository(url: 'https://oss.sonatype.org/service/local/staging/deploy/maven2') {
authentication(userName: ossrhUsername, password: ossrhPassword)
}
snapshotRepository(url: 'https://oss.sonatype.org/content/repositories/snapshots') {
authentication(userName: ossrhUsername, password: ossrhPassword)
}
pom.project {
name name
description description
packaging 'jar'
inceptionYear '2012'
url scmUrl
organization {
name 'xbib'
url 'http://xbib.org'
}
developers {
developer {
id user
name 'Jörg Prante'
email 'joergprante@gmail.com'
url 'https://github.com/jprante'
}
}
scm {
url scmUrl
connection scmConnection
developerConnection scmDeveloperConnection
}
licenses {
license {
name 'The Apache License, Version 2.0'
url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
}
}
}
}
}
}
}

104
gradle/publish.gradle~ Normal file
View file

@ -0,0 +1,104 @@
task xbibUpload(type: Upload) {
configuration = configurations.archives
uploadDescriptor = true
repositories {
if (project.hasProperty("xbibUsername")) {
mavenDeployer {
configuration = configurations.wagon
repository(url: 'scpexe://xbib.org/repository') {
authentication(userName: xbibUsername, privateKey: xbibPrivateKey)
}
}
}
}
}
task sonaTypeUpload(type: Upload) {
configuration = configurations.archives
uploadDescriptor = true
repositories {
if (project.hasProperty('ossrhUsername')) {
mavenDeployer {
beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }
repository(url: 'https://oss.sonatype.org/service/local/staging/deploy/maven2') {
authentication(userName: ossrhUsername, password: ossrhPassword)
}
snapshotRepository(url: 'https://oss.sonatype.org/content/repositories/snapshots') {
authentication(userName: ossrhUsername, password: ossrhPassword)
}
pom.project {
name name
description description
packaging 'jar'
inceptionYear '2012'
url scmUrl
organization {
name 'xbib'
url 'http://xbib.org'
}
developers {
developer {
id user
name 'Jörg Prante'
email 'joergprante@gmail.com'
url 'https://github.com/jprante'
}
}
scm {
url scmUrl
connection scmConnection
developerConnection scmDeveloperConnection
}
licenses {
license {
name 'The Apache License, Version 2.0'
url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
}
}
}
}
}
}
}
task hbzUpload(type: Upload) {
configuration = configurations.archives
uploadDescriptor = true
repositories {
if (project.hasProperty('hbzUserName')) {
mavenDeployer {
configuration = configurations.wagon
beforeDeployment { MavenDeployment deployment ->
signing.signPom(deployment)
}
repository(url: uri(hbzUrl)) {
authentication(userName: hbzUserName, privateKey: hbzPrivateKey)
}
pom.project {
developers {
developer {
id 'jprante'
name 'Jörg Prante'
email 'joergprante@gmail.com'
url 'https://github.com/jprante'
}
}
scm {
url 'https://github.com/xbib/elasticsearch-webapp-libraryservice'
connection 'scm:git:git://github.com/xbib/elasticsaerch-webapp-libraryservice.git'
developerConnection 'scm:git:git://github.com/xbib/elasticsaerch-webapp-libraryservice.git'
}
inceptionYear '2016'
licenses {
license {
name 'The Apache License, Version 2.0'
url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
}
}
}
}
}
}
}

41
gradle/sonarqube.gradle Normal file
View file

@ -0,0 +1,41 @@
tasks.withType(FindBugs) {
ignoreFailures = true
reports {
xml.enabled = true
html.enabled = false
}
}
tasks.withType(Pmd) {
ignoreFailures = true
reports {
xml.enabled = true
html.enabled = true
}
}
tasks.withType(Checkstyle) {
ignoreFailures = true
reports {
xml.enabled = true
html.enabled = true
}
}
jacocoTestReport {
reports {
xml.enabled true
csv.enabled false
xml.destination "${buildDir}/reports/jacoco-xml"
html.destination "${buildDir}/reports/jacoco-html"
}
}
sonarqube {
properties {
property "sonar.projectName", "${project.group} ${project.name}"
property "sonar.sourceEncoding", "UTF-8"
property "sonar.tests", "src/integration-test/java"
property "sonar.scm.provider", "git"
property "sonar.java.coveragePlugin", "jacoco"
property "sonar.junit.reportsPath", "build/test-results/test/"
}
}

BIN
gradle/wrapper/gradle-wrapper.jar vendored Normal file

Binary file not shown.

View file

@ -0,0 +1,6 @@
#Tue Nov 01 14:46:00 CET 2016
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-3.1-bin.zip

169
gradlew vendored Executable file
View file

@ -0,0 +1,169 @@
#!/usr/bin/env bash
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn ( ) {
echo "$*"
}
die ( ) {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
function splitJvmOpts() {
JVM_OPTS=("$@")
}
eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [[ "$(uname)" == "Darwin" ]] && [[ "$HOME" == "$PWD" ]]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"

84
gradlew.bat vendored Normal file
View file

@ -0,0 +1,84 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

1
settings.gradle Normal file
View file

@ -0,0 +1 @@
rootProject.name = 'elasticsearch-extras-client'

View file

@ -0,0 +1,38 @@
package org.elasticsearch.node;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.Plugin;
import java.util.ArrayList;
import java.util.Collection;
/**
*
*/
public class MockNode extends Node {
public MockNode() {
super(Settings.EMPTY);
}
public MockNode(Settings settings) {
super(settings);
}
public MockNode(Settings settings, Collection<Class<? extends Plugin>> classpathPlugins) {
super(InternalSettingsPreparer.prepareEnvironment(settings, null), Version.CURRENT, classpathPlugins);
}
public MockNode(Settings settings, Class<? extends Plugin> classpathPlugin) {
this(settings, list(classpathPlugin));
}
private static Collection<Class<? extends Plugin>> list(Class<? extends Plugin> classpathPlugin) {
Collection<Class<? extends Plugin>> list = new ArrayList<>();
list.add(classpathPlugin);
return list;
}
}

View file

@ -0,0 +1,4 @@
/**
* Classes to support Elasticsearch node creation.
*/
package org.elasticsearch.node;

View file

@ -0,0 +1,92 @@
package org.xbib.elasticsearch;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.junit.Test;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
*
*/
public class AliasTest extends NodeTestUtils {
private static final ESLogger logger = ESLoggerFactory.getLogger(AliasTest.class.getName());
@Test
public void testAlias() throws IOException {
CreateIndexRequest indexRequest = new CreateIndexRequest("test");
client("1").admin().indices().create(indexRequest).actionGet();
// put alias
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
String[] indices = new String[]{"test"};
String[] aliases = new String[]{"test_alias"};
IndicesAliasesRequest.AliasActions aliasAction =
new IndicesAliasesRequest.AliasActions(AliasAction.Type.ADD, indices, aliases);
indicesAliasesRequest.addAliasAction(aliasAction);
client("1").admin().indices().aliases(indicesAliasesRequest).actionGet();
// get alias
GetAliasesRequest getAliasesRequest = new GetAliasesRequest(Strings.EMPTY_ARRAY);
long t0 = System.nanoTime();
GetAliasesResponse getAliasesResponse = client("1").admin().indices().getAliases(getAliasesRequest).actionGet();
long t1 = (System.nanoTime() - t0) / 1000000;
logger.info("{} time(ms) = {}", getAliasesResponse.getAliases(), t1);
assertTrue(t1 >= 0);
}
@Test
public void testMostRecentIndex() throws IOException {
String alias = "test";
CreateIndexRequest indexRequest = new CreateIndexRequest("test20160101");
client("1").admin().indices().create(indexRequest).actionGet();
indexRequest = new CreateIndexRequest("test20160102");
client("1").admin().indices().create(indexRequest).actionGet();
indexRequest = new CreateIndexRequest("test20160103");
client("1").admin().indices().create(indexRequest).actionGet();
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
String[] indices = new String[]{"test20160101", "test20160102", "test20160103"};
String[] aliases = new String[]{alias};
IndicesAliasesRequest.AliasActions aliasAction =
new IndicesAliasesRequest.AliasActions(AliasAction.Type.ADD, indices, aliases);
indicesAliasesRequest.addAliasAction(aliasAction);
client("1").admin().indices().aliases(indicesAliasesRequest).actionGet();
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client("1"),
GetAliasesAction.INSTANCE);
GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet();
Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
Set<String> result = new TreeSet<>(Collections.reverseOrder());
for (ObjectCursor<String> indexName : getAliasesResponse.getAliases().keys()) {
Matcher m = pattern.matcher(indexName.value);
if (m.matches()) {
if (alias.equals(m.group(1))) {
result.add(indexName.value);
}
}
}
Iterator<String> it = result.iterator();
assertEquals("test20160103", it.next());
assertEquals("test20160102", it.next());
assertEquals("test20160101", it.next());
logger.info("result={}", result);
}
}

View file

@ -0,0 +1,204 @@
package org.xbib.elasticsearch;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.client.support.AbstractClient;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.junit.After;
import org.junit.Before;
import org.xbib.elasticsearch.extras.client.NetworkUtils;
import java.io.IOException;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
*/
public class NodeTestUtils {
protected static final ESLogger logger = ESLoggerFactory.getLogger("test");
private static Random random = new Random();
private static char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray();
private Map<String, Node> nodes = new HashMap<>();
private Map<String, AbstractClient> clients = new HashMap<>();
private AtomicInteger counter = new AtomicInteger();
private String cluster;
private String host;
private int port;
private static void deleteFiles() throws IOException {
Path directory = Paths.get(System.getProperty("path.home") + "/data");
Files.walkFileTree(directory, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Files.delete(file);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
Files.delete(dir);
return FileVisitResult.CONTINUE;
}
});
}
@Before
public void startNodes() {
try {
logger.info("starting");
setClusterName();
startNode("1");
findNodeAddress();
try {
ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE,
new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN).timeout(TimeValue.timeValueSeconds(30))).actionGet();
if (healthResponse != null && healthResponse.isTimedOut()) {
throw new IOException("cluster state is " + healthResponse.getStatus().name()
+ ", from here on, everything will fail!");
}
} catch (ElasticsearchTimeoutException e) {
throw new IOException("timeout, cluster does not respond to health request, cowardly refusing to continue with operations");
}
} catch (Throwable t) {
logger.error("startNodes failed", t);
}
}
@After
public void stopNodes() {
try {
closeNodes();
} catch (Exception e) {
logger.error("can not close nodes", e);
} finally {
try {
deleteFiles();
logger.info("data files wiped");
Thread.sleep(2000L);
} catch (IOException e) {
logger.error(e.getMessage(), e);
} catch (InterruptedException e) {
// ignore
}
}
}
protected void setClusterName() {
this.cluster = "test-helper-cluster-"
+ NetworkUtils.getLocalAddress().getHostName()
+ "-" + System.getProperty("user.name")
+ "-" + counter.incrementAndGet();
}
protected String getClusterName() {
return cluster;
}
protected Settings getSettings() {
return settingsBuilder()
.put("host", host)
.put("port", port)
.put("cluster.name", cluster)
.put("path.home", getHome())
.build();
}
protected Settings getNodeSettings() {
return settingsBuilder()
.put("cluster.name", cluster)
.put("cluster.routing.schedule", "50ms")
.put("cluster.routing.allocation.disk.threshold_enabled", false)
.put("discovery.zen.multicast.enabled", true)
.put("discovery.zen.multicast.ping_timeout", "5s")
.put("http.enabled", true)
.put("threadpool.bulk.size", Runtime.getRuntime().availableProcessors())
.put("threadpool.bulk.queue_size", 16 * Runtime.getRuntime().availableProcessors()) // default is 50, too low
.put("index.number_of_replicas", 0)
.put("path.home", getHome())
.build();
}
protected String getHome() {
return System.getProperty("path.home");
}
public void startNode(String id) throws IOException {
buildNode(id).start();
}
public AbstractClient client(String id) {
return clients.get(id);
}
private void closeNodes() throws IOException {
logger.info("closing all clients");
for (AbstractClient client : clients.values()) {
client.close();
}
clients.clear();
logger.info("closing all nodes");
for (Node node : nodes.values()) {
if (node != null) {
node.close();
}
}
nodes.clear();
logger.info("all nodes closed");
}
protected void findNodeAddress() {
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true);
NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet();
Object obj = response.iterator().next().getTransport().getAddress()
.publishAddress();
if (obj instanceof InetSocketTransportAddress) {
InetSocketTransportAddress address = (InetSocketTransportAddress) obj;
host = address.address().getHostName();
port = address.address().getPort();
}
}
private Node buildNode(String id) throws IOException {
Settings nodeSettings = settingsBuilder()
.put(getNodeSettings())
.put("name", id)
.build();
logger.info("settings={}", nodeSettings.getAsMap());
Node node = new MockNode(nodeSettings);
AbstractClient client = (AbstractClient) node.client();
nodes.put(id, node);
clients.put(id, client);
logger.info("clients={}", clients);
return node;
}
protected String randomString(int len) {
final char[] buf = new char[len];
final int n = numbersAndLetters.length - 1;
for (int i = 0; i < buf.length; i++) {
buf[i] = numbersAndLetters[random.nextInt(n)];
}
return new String(buf);
}
}

View file

@ -0,0 +1,66 @@
package org.xbib.elasticsearch;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.sort.SortOrder;
import org.junit.Test;
import static org.elasticsearch.client.Requests.indexRequest;
import static org.elasticsearch.client.Requests.refreshRequest;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
*
*/
public class SearchTest extends NodeTestUtils {
@Test
public void testSearch() throws Exception {
Client client = client("1");
long t0 = System.currentTimeMillis();
BulkRequestBuilder builder = new BulkRequestBuilder(client, BulkAction.INSTANCE);
for (int i = 0; i < 1000; i++) {
builder.add(indexRequest()
.index("pages").type("row")
.source(jsonBuilder()
.startObject()
.field("user1", "kimchy")
.field("user2", "kimchy")
.field("user3", "kimchy")
.field("user4", "kimchy")
.field("user5", "kimchy")
.field("user6", "kimchy")
.field("user7", "kimchy")
.field("user8", "kimchy")
.field("user9", "kimchy")
.field("rowcount", i)
.field("rs", 1234)));
}
client.bulk(builder.request()).actionGet();
client.admin().indices().refresh(refreshRequest()).actionGet();
long t1 = System.currentTimeMillis();
logger.info("t1-t0 = {}", t1 - t0);
for (int i = 0; i < 100; i++) {
t1 = System.currentTimeMillis();
QueryBuilder queryStringBuilder =
QueryBuilders.queryStringQuery("rs:" + 1234);
SearchRequestBuilder requestBuilder = client.prepareSearch()
.setIndices("pages")
.setTypes("row")
.setQuery(queryStringBuilder)
.addSort("rowcount", SortOrder.DESC)
.setFrom(i * 10).setSize(10);
SearchResponse response = requestBuilder.execute().actionGet();
long t2 = System.currentTimeMillis();
logger.info("t2-t1 = {}", t2 - t1);
}
}
}

View file

@ -0,0 +1,59 @@
package org.xbib.elasticsearch;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.common.settings.Settings;
import org.junit.Test;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.junit.Assert.assertEquals;
/**
*
*/
public class SimpleTest extends NodeTestUtils {
protected Settings getNodeSettings() {
return settingsBuilder()
.put("path.home", System.getProperty("path.home"))
.put("index.analysis.analyzer.default.filter.0", "lowercase")
.put("index.analysis.analyzer.default.filter.1", "trim")
.put("index.analysis.analyzer.default.tokenizer", "keyword")
.build();
}
@Test
public void test() throws Exception {
try {
DeleteIndexRequestBuilder deleteIndexRequestBuilder =
new DeleteIndexRequestBuilder(client("1"), DeleteIndexAction.INSTANCE, "test");
deleteIndexRequestBuilder.execute().actionGet();
} catch (Exception e) {
// ignore
}
IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client("1"), IndexAction.INSTANCE);
indexRequestBuilder
.setIndex("test")
.setType("test")
.setId("1")
.setSource(jsonBuilder().startObject().field("field",
"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8").endObject())
.setRefresh(true)
.execute()
.actionGet();
String doc = client("1").prepareSearch("test")
.setTypes("test")
.setQuery(matchQuery("field",
"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8"))
.execute()
.actionGet()
.getHits().getAt(0).getSourceAsString();
assertEquals(doc,
"{\"field\":\"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8\"}");
}
}

View file

@ -0,0 +1,70 @@
package org.xbib.elasticsearch;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilder;
import org.junit.Test;
import java.io.IOException;
import static org.elasticsearch.client.Requests.indexRequest;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
/**
*
*/
public class WildcardTest extends NodeTestUtils {
protected Settings getNodeSettings() {
return settingsBuilder()
.put("cluster.name", getClusterName())
.put("cluster.routing.allocation.disk.threshold_enabled", false)
.put("discovery.zen.multicast.enabled", false)
.put("http.enabled", false)
.put("path.home", System.getProperty("path.home"))
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 0)
.build();
}
@Test
public void testWildcard() throws Exception {
index(client("1"), "1", "010");
index(client("1"), "2", "0*0");
// exact
validateCount(client("1"), queryStringQuery("010").defaultField("field"), 1);
validateCount(client("1"), queryStringQuery("0\\*0").defaultField("field"), 1);
// pattern
validateCount(client("1"), queryStringQuery("0*0").defaultField("field"), 1); // 2?
validateCount(client("1"), queryStringQuery("0?0").defaultField("field"), 1); // 2?
validateCount(client("1"), queryStringQuery("0**0").defaultField("field"), 1); // 2?
validateCount(client("1"), queryStringQuery("0??0").defaultField("field"), 0);
validateCount(client("1"), queryStringQuery("*10").defaultField("field"), 1);
validateCount(client("1"), queryStringQuery("*1*").defaultField("field"), 1);
validateCount(client("1"), queryStringQuery("*\\*0").defaultField("field"), 0); // 1?
validateCount(client("1"), queryStringQuery("*\\**").defaultField("field"), 0); // 1?
}
private void index(Client client, String id, String fieldValue) throws IOException {
client.index(indexRequest()
.index("index").type("type").id(id)
.source(jsonBuilder().startObject().field("field", fieldValue).endObject())
.refresh(true)).actionGet();
}
private long count(Client client, QueryBuilder queryBuilder) {
return client.prepareSearch("index").setTypes("type")
.setQuery(queryBuilder)
.execute().actionGet().getHits().getTotalHits();
}
private void validateCount(Client client, QueryBuilder queryBuilder, long expectedHits) {
final long actualHits = count(client, queryBuilder);
if (actualHits != expectedHits) {
throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits);
}
}
}

View file

@ -0,0 +1,44 @@
package org.xbib.elasticsearch.extras.client;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.Test;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.util.Collections;
import java.util.Enumeration;
/**
*
*/
public class NetworkTest {
private static final Logger logger = LogManager.getLogger(NetworkTest.class);
@Test
public void testNetwork() throws Exception {
Enumeration<NetworkInterface> nets = NetworkInterface.getNetworkInterfaces();
for (NetworkInterface netint : Collections.list(nets)) {
System.out.println("checking network interface = " + netint.getName());
Enumeration<InetAddress> inetAddresses = netint.getInetAddresses();
for (InetAddress addr : Collections.list(inetAddresses)) {
logger.info("found address = " + addr.getHostAddress()
+ " name = " + addr.getHostName()
+ " canicalhostname = " + addr.getCanonicalHostName()
+ " loopback = " + addr.isLoopbackAddress()
+ " sitelocal = " + addr.isSiteLocalAddress()
+ " linklocal = " + addr.isLinkLocalAddress()
+ " anylocal = " + addr.isAnyLocalAddress()
+ " multicast = " + addr.isMulticastAddress()
+ " mcglobal = " + addr.isMCGlobal()
+ " mclinklocal = " + addr.isMCLinkLocal()
+ " mcnodelocal = " + addr.isMCNodeLocal()
+ " mcorglocal = " + addr.isMCOrgLocal()
+ " mcsitelocal = " + addr.isMCSiteLocal()
+ " mcsitelocal = " + addr.isReachable(1000));
}
}
}
}

View file

@ -0,0 +1,208 @@
package org.xbib.elasticsearch.extras.client.node;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.junit.Before;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
/**
*
*/
public class BulkNodeClientTest extends NodeTestUtils {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeClientTest.class.getSimpleName());
private static final Long MAX_ACTIONS = 1000L;
private static final Long NUM_ACTIONS = 1234L;
@Before
public void startNodes() {
try {
super.startNodes();
startNode("2");
} catch (Throwable t) {
logger.error("startNodes failed", t);
}
}
@Test
public void testNewIndexNodeClient() throws Exception {
final BulkNodeClient client = ClientBuilder.builder()
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5))
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
client.newIndex("test");
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.shutdown();
}
@Test
public void testMappingNodeClient() throws Exception {
final BulkNodeClient client = ClientBuilder.builder()
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5))
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
XContentBuilder builder = jsonBuilder()
.startObject()
.startObject("test")
.startObject("properties")
.startObject("location")
.field("type", "geo_point")
.endObject()
.endObject()
.endObject()
.endObject();
client.mapping("test", builder.string());
client.newIndex("test");
GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test");
GetMappingsResponse getMappingsResponse =
client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
logger.info("mappings={}", getMappingsResponse.getMappings());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.shutdown();
}
@Test
public void testSingleDocNodeClient() {
final BulkNodeClient client = ClientBuilder.builder()
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS)
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(30))
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
try {
client.newIndex("test");
client.index("test", "test", "1", "{ \"name\" : \"Hello World\"}"); // single doc ingest
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
} catch (InterruptedException e) {
// ignore
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} catch (ExecutionException e) {
logger.error(e.getMessage(), e);
} finally {
assertEquals(1, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.shutdown();
}
}
@Test
public void testRandomDocsNodeClient() throws Exception {
long numactions = NUM_ACTIONS;
final BulkNodeClient client = ClientBuilder.builder()
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS)
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
try {
client.newIndex("test");
for (int i = 0; i < NUM_ACTIONS; i++) {
client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
assertEquals(numactions, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.shutdown();
}
}
@Test
public void testThreadedRandomDocsNodeClient() throws Exception {
int maxthreads = Runtime.getRuntime().availableProcessors();
Long maxactions = MAX_ACTIONS;
final Long maxloop = NUM_ACTIONS;
logger.info("NodeClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop);
final BulkNodeClient client = ClientBuilder.builder()
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions)
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
try {
client.newIndex("test")
.startBulk("test", -1, 1000);
ThreadPoolExecutor pool = EsExecutors.newFixed("bulk-nodeclient-test", maxthreads, 30,
EsExecutors.daemonThreadFactory("bulk-nodeclient-test"));
final CountDownLatch latch = new CountDownLatch(maxthreads);
for (int i = 0; i < maxthreads; i++) {
pool.execute(new Runnable() {
public void run() {
for (int i = 0; i < maxloop; i++) {
client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
latch.countDown();
}
});
}
logger.info("waiting for max 30 seconds...");
latch.await(30, TimeUnit.SECONDS);
logger.info("flush...");
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
logger.info("got all responses, thread pool shutdown...");
pool.shutdown();
logger.info("pool is shut down");
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
client.stopBulk("test");
assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.refreshIndex("test");
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE)
.setQuery(QueryBuilders.matchAllQuery()).setSize(0);
assertEquals(maxthreads * maxloop,
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
client.shutdown();
}
}
}

View file

@ -0,0 +1,49 @@
package org.xbib.elasticsearch.extras.client.node;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.junit.Before;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
*
*/
public class BulkNodeClusterBlockTest extends NodeTestUtils {
@Before
public void startNodes() {
try {
setClusterName();
startNode("1");
findNodeAddress();
// do not wait for green health state
logger.info("ready");
} catch (Throwable t) {
logger.error("startNodes failed", t);
}
}
protected Settings getNodeSettings() {
return Settings.settingsBuilder()
.put(super.getNodeSettings())
.put("discovery.zen.minimum_master_nodes", 2) // block until we have two nodes
.build();
}
@Test(expected = ClusterBlockException.class)
public void testClusterBlock() throws Exception {
BulkRequestBuilder brb = client("1").prepareBulk();
XContentBuilder builder = jsonBuilder().startObject().field("field1", "value1").endObject();
String jsonString = builder.string();
IndexRequestBuilder irb = client("1").prepareIndex("test", "test", "1").setSource(jsonString);
brb.add(irb);
brb.execute().actionGet();
}
}

View file

@ -0,0 +1,60 @@
package org.xbib.elasticsearch.extras.client.node;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.junit.Assert.*;
public class BulkNodeDuplicateIDTest extends NodeTestUtils {
private final static ESLogger logger = ESLoggerFactory.getLogger(BulkNodeDuplicateIDTest.class.getSimpleName());
private final static Long MAX_ACTIONS = 1000L;
private final static Long NUM_ACTIONS = 12345L;
@Test
public void testDuplicateDocIDs() throws Exception {
long numactions = NUM_ACTIONS;
final BulkNodeClient client = ClientBuilder.builder()
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS)
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
try {
client.newIndex("test");
for (int i = 0; i < NUM_ACTIONS; i++) {
client.index("test", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
client.refreshIndex("test");
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE)
.setIndices("test")
.setTypes("test")
.setQuery(matchAllQuery());
long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
logger.info("hits = {}", hits);
assertTrue(hits < NUM_ACTIONS);
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
client.shutdown();
assertEquals(numactions, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
}
}
}

View file

@ -0,0 +1,77 @@
package org.xbib.elasticsearch.extras.client.node;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.IndexAliasAdder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertFalse;
/**
*
*/
public class BulkNodeIndexAliasTest extends NodeTestUtils {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeIndexAliasTest.class.getSimpleName());
@Test
public void testIndexAlias() throws Exception {
final BulkNodeClient client = ClientBuilder.builder()
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
try {
client.newIndex("test1234");
for (int i = 0; i < 1; i++) {
client.index("test1234", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.refreshIndex("test1234");
List<String> simpleAliases = Arrays.asList("a", "b", "c");
client.switchAliases("test", "test1234", simpleAliases);
client.newIndex("test5678");
for (int i = 0; i < 1; i++) {
client.index("test5678", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.refreshIndex("test5678");
simpleAliases = Arrays.asList("d", "e", "f");
client.switchAliases("test", "test5678", simpleAliases, new IndexAliasAdder() {
@Override
public void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias) {
builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias));
}
});
Map<String, String> aliases = client.getIndexFilters("test5678");
logger.info("aliases of index test5678 = {}", aliases);
aliases = client.getAliasFilters("test");
logger.info("aliases of alias test = {}", aliases);
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
client.waitForResponses(TimeValue.timeValueSeconds(30));
client.shutdown();
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
}
}
}

View file

@ -0,0 +1,105 @@
package org.xbib.elasticsearch.extras.client.node;
import org.elasticsearch.action.admin.indices.stats.*;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.indexing.IndexingStats;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import java.util.Map;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class BulkNodeReplicaTest extends NodeTestUtils {
private final static ESLogger logger = ESLoggerFactory.getLogger(BulkNodeReplicaTest.class.getSimpleName());
@Test
public void testReplicaLevel() throws Exception {
// we need nodes for replica levels
startNode("2");
startNode("3");
startNode("4");
Settings settingsTest1 = Settings.settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 3)
.build();
Settings settingsTest2 = Settings.settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 1)
.build();
final BulkNodeClient client = ClientBuilder.builder()
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
try {
client.newIndex("test1", settingsTest1, null)
.newIndex("test2", settingsTest2, null);
client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30));
for (int i = 0; i < 1234; i++) {
client.index("test1", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
for (int i = 0; i < 1234; i++) {
client.index("test2", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(60));
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
logger.info("refreshing");
client.refreshIndex("test1");
client.refreshIndex("test2");
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE)
.setIndices("test1", "test2")
.setQuery(matchAllQuery());
long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
logger.info("query total hits={}", hits);
assertEquals(2468, hits);
IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), IndicesStatsAction.INSTANCE)
.all();
IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet();
for (Map.Entry<String, IndexStats> m : response.getIndices().entrySet()) {
IndexStats indexStats = m.getValue();
CommonStats commonStats = indexStats.getTotal();
IndexingStats indexingStats = commonStats.getIndexing();
IndexingStats.Stats stats = indexingStats.getTotal();
logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount());
for (Map.Entry<Integer, IndexShardStats> me : indexStats.getIndexShards().entrySet()) {
IndexShardStats indexShardStats = me.getValue();
CommonStats commonShardStats = indexShardStats.getTotal();
logger.info("shard {} count = {}", me.getKey(),
commonShardStats.getIndexing().getTotal().getIndexCount());
}
}
try {
client.deleteIndex("test1")
.deleteIndex("test2");
} catch (Exception e) {
logger.error("delete index failed, ignored. Reason:", e);
}
client.shutdown();
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
}
}
}

View file

@ -0,0 +1,67 @@
package org.xbib.elasticsearch.extras.client.node;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
/**
*
*/
public class BulkNodeUpdateReplicaLevelTest extends NodeTestUtils {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeUpdateReplicaLevelTest.class.getSimpleName());
@Test
public void testUpdateReplicaLevel() throws Exception {
int numberOfShards = 2;
int replicaLevel = 3;
// we need 3 nodes for replica level 3
startNode("2");
startNode("3");
int shardsAfterReplica;
Settings settings = Settings.settingsBuilder()
.put("index.number_of_shards", numberOfShards)
.put("index.number_of_replicas", 0)
.build();
final BulkNodeClient client = ClientBuilder.builder()
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkNodeClient(client("1"));
try {
client.newIndex("replicatest", settings, null);
client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30));
for (int i = 0; i < 12345; i++) {
client.index("replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel);
assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1));
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
client.shutdown();
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
}
}
}

View file

@ -0,0 +1,4 @@
/**
* Classes for testing Elasticsearch node client extras.
*/
package org.xbib.elasticsearch.extras.client.node;

View file

@ -0,0 +1,4 @@
/**
* Classes to test Elasticsearch clients.
*/
package org.xbib.elasticsearch.extras.client;

View file

@ -0,0 +1,201 @@
package org.xbib.elasticsearch.extras.client.transport;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.index.query.QueryBuilders;
import org.junit.Before;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
/**
*
*/
public class BulkTransportClientTest extends NodeTestUtils {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportClientTest.class.getSimpleName());
private static final Long MAX_ACTIONS = 1000L;
private static final Long NUM_ACTIONS = 1234L;
@Before
public void startNodes() {
try {
super.startNodes();
startNode("2");
} catch (Throwable t) {
logger.error("startNodes failed", t);
}
}
@Test
public void testBulkClient() throws IOException {
final BulkTransportClient client = ClientBuilder.builder()
.put(getSettings())
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkTransportClient();
client.newIndex("test");
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
try {
client.deleteIndex("test")
.newIndex("test")
.deleteIndex("test");
} catch (NoNodeAvailableException e) {
logger.error("no node available");
} finally {
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.shutdown();
}
}
@Test
public void testSingleDocBulkClient() throws IOException {
final BulkTransportClient client = ClientBuilder.builder()
.put(getSettings())
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS)
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkTransportClient();
try {
client.newIndex("test");
client.index("test", "test", "1", "{ \"name\" : \"Hello World\"}"); // single doc ingest
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
} catch (InterruptedException e) {
// ignore
} catch (ExecutionException e) {
logger.error(e.getMessage(), e);
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
assertEquals(1, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.shutdown();
}
}
@Test
public void testRandomDocsBulkClient() throws IOException {
long numactions = NUM_ACTIONS;
final BulkTransportClient client = ClientBuilder.builder()
.put(getSettings())
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS)
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkTransportClient();
try {
client.newIndex("test");
for (int i = 0; i < NUM_ACTIONS; i++) {
client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
} catch (InterruptedException e) {
// ignore
} catch (ExecutionException e) {
logger.error(e.getMessage(), e);
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
assertEquals(numactions, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.shutdown();
}
}
@Test
public void testThreadedRandomDocsBulkClient() throws Exception {
int maxthreads = Runtime.getRuntime().availableProcessors();
long maxactions = MAX_ACTIONS;
final long maxloop = NUM_ACTIONS;
Settings settingsForIndex = Settings.settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 1)
.build();
final BulkTransportClient client = ClientBuilder.builder()
.put(getSettings())
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions)
.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) // = disable autoflush for this test
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkTransportClient();
try {
client.newIndex("test", settingsForIndex, null)
.startBulk("test", -1, 1000);
ThreadPoolExecutor pool =
EsExecutors.newFixed("bulkclient-test", maxthreads, 30, EsExecutors.daemonThreadFactory("bulkclient-test"));
final CountDownLatch latch = new CountDownLatch(maxthreads);
for (int i = 0; i < maxthreads; i++) {
pool.execute(() -> {
for (int i1 = 0; i1 < maxloop; i1++) {
client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
latch.countDown();
});
}
logger.info("waiting for max 30 seconds...");
latch.await(30, TimeUnit.SECONDS);
logger.info("client flush ...");
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
logger.info("thread pool to be shut down ...");
pool.shutdown();
logger.info("poot shut down");
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
client.stopBulk("test");
assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
client.refreshIndex("test");
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE)
// to avoid NPE at org.elasticsearch.action.search.SearchRequest.writeTo(SearchRequest.java:580)
.setIndices("_all")
.setQuery(QueryBuilders.matchAllQuery())
.setSize(0);
assertEquals(maxthreads * maxloop,
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
client.shutdown();
}
}
}

View file

@ -0,0 +1,61 @@
package org.xbib.elasticsearch.extras.client.transport;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.junit.Assert.*;
public class BulkTransportDuplicateIDTest extends NodeTestUtils {
private final static ESLogger logger = ESLoggerFactory.getLogger(BulkTransportDuplicateIDTest.class.getSimpleName());
private final static Long MAX_ACTIONS = 1000L;
private final static Long NUM_ACTIONS = 12345L;
@Test
public void testDuplicateDocIDs() throws Exception {
long numactions = NUM_ACTIONS;
final BulkTransportClient client = ClientBuilder.builder()
.put(getSettings())
.put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS)
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkTransportClient();
try {
client.newIndex("test");
for (int i = 0; i < NUM_ACTIONS; i++) {
client.index("test", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
client.refreshIndex("test");
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE)
.setIndices("test")
.setTypes("test")
.setQuery(matchAllQuery());
long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
logger.info("hits = {}", hits);
assertTrue(hits < NUM_ACTIONS);
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
client.shutdown();
assertEquals(numactions, client.getMetric().getSucceeded().getCount());
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
}
}
}

View file

@ -0,0 +1,108 @@
package org.xbib.elasticsearch.extras.client.transport;
import org.elasticsearch.action.admin.indices.stats.*;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.indexing.IndexingStats;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import java.util.Map;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
/**
*
*/
public class BulkTransportReplicaTest extends NodeTestUtils {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportReplicaTest.class.getSimpleName());
@Test
public void testReplicaLevel() throws Exception {
// we need nodes for replica levels
startNode("2");
startNode("3");
startNode("4");
Settings settingsTest1 = Settings.settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 3)
.build();
Settings settingsTest2 = Settings.settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 1)
.build();
final BulkTransportClient client = ClientBuilder.builder()
.put(getSettings())
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkTransportClient();
try {
client.newIndex("test1", settingsTest1, null)
.newIndex("test2", settingsTest2, null);
client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30));
for (int i = 0; i < 1234; i++) {
client.index("test1", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
for (int i = 0; i < 1234; i++) {
client.index("test2", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(60));
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
logger.info("refreshing");
client.refreshIndex("test1");
client.refreshIndex("test2");
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE)
.setIndices("test1", "test2")
.setQuery(matchAllQuery());
long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
logger.info("query total hits={}", hits);
assertEquals(2468, hits);
IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(),
IndicesStatsAction.INSTANCE).all();
IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet();
for (Map.Entry<String, IndexStats> m : response.getIndices().entrySet()) {
IndexStats indexStats = m.getValue();
CommonStats commonStats = indexStats.getTotal();
IndexingStats indexingStats = commonStats.getIndexing();
IndexingStats.Stats stats = indexingStats.getTotal();
logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount());
for (Map.Entry<Integer, IndexShardStats> me : indexStats.getIndexShards().entrySet()) {
IndexShardStats indexShardStats = me.getValue();
CommonStats commonShardStats = indexShardStats.getTotal();
logger.info("shard {} count = {}", me.getKey(),
commonShardStats.getIndexing().getTotal().getIndexCount());
}
}
try {
client.deleteIndex("test1")
.deleteIndex("test2");
} catch (Exception e) {
logger.error("delete index failed, ignored. Reason:", e);
}
client.shutdown();
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
}
}
}

View file

@ -0,0 +1,69 @@
package org.xbib.elasticsearch.extras.client.transport;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.junit.Test;
import org.xbib.elasticsearch.NodeTestUtils;
import org.xbib.elasticsearch.extras.client.ClientBuilder;
import org.xbib.elasticsearch.extras.client.SimpleBulkControl;
import org.xbib.elasticsearch.extras.client.SimpleBulkMetric;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
/**
*
*/
public class BulkTransportUpdateReplicaLevelTest extends NodeTestUtils {
private static final ESLogger logger =
ESLoggerFactory.getLogger(BulkTransportUpdateReplicaLevelTest.class.getSimpleName());
@Test
public void testUpdateReplicaLevel() throws Exception {
int numberOfShards = 2;
int replicaLevel = 3;
// we need 3 nodes for replica level 3
startNode("2");
startNode("3");
int shardsAfterReplica;
Settings settings = Settings.settingsBuilder()
.put("index.number_of_shards", numberOfShards)
.put("index.number_of_replicas", 0)
.build();
final BulkTransportClient client = ClientBuilder.builder()
.put(getSettings())
.setMetric(new SimpleBulkMetric())
.setControl(new SimpleBulkControl())
.toBulkTransportClient();
try {
client.newIndex("replicatest", settings, null);
client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30));
for (int i = 0; i < 12345; i++) {
client.index("replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}");
}
client.flushIngest();
client.waitForResponses(TimeValue.timeValueSeconds(30));
shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel);
assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1));
} catch (NoNodeAvailableException e) {
logger.warn("skipping, no node available");
} finally {
client.shutdown();
if (client.hasThrowable()) {
logger.error("error", client.getThrowable());
}
assertFalse(client.hasThrowable());
}
}
}

View file

@ -0,0 +1,4 @@
/**
* Test classes for testing Elasticsearch.
*/
package org.xbib.elasticsearch;

View file

@ -0,0 +1,23 @@
package suites;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.xbib.elasticsearch.extras.client.node.BulkNodeClientTest;
import org.xbib.elasticsearch.extras.client.node.BulkNodeDuplicateIDTest;
import org.xbib.elasticsearch.extras.client.node.BulkNodeIndexAliasTest;
import org.xbib.elasticsearch.extras.client.node.BulkNodeReplicaTest;
import org.xbib.elasticsearch.extras.client.node.BulkNodeUpdateReplicaLevelTest;
/**
*
*/
@RunWith(ListenerSuite.class)
@Suite.SuiteClasses({
BulkNodeClientTest.class,
BulkNodeDuplicateIDTest.class,
BulkNodeReplicaTest.class,
BulkNodeUpdateReplicaLevelTest.class,
BulkNodeIndexAliasTest.class
})
public class BulkNodeTestSuite {
}

View file

@ -0,0 +1,22 @@
package suites;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.xbib.elasticsearch.extras.client.transport.BulkTransportClientTest;
import org.xbib.elasticsearch.extras.client.transport.BulkTransportDuplicateIDTest;
import org.xbib.elasticsearch.extras.client.transport.BulkTransportReplicaTest;
import org.xbib.elasticsearch.extras.client.transport.BulkTransportUpdateReplicaLevelTest;
/**
*
*/
@RunWith(ListenerSuite.class)
@Suite.SuiteClasses({
BulkTransportClientTest.class,
BulkTransportDuplicateIDTest.class,
BulkTransportReplicaTest.class,
BulkTransportUpdateReplicaLevelTest.class
})
public class BulkTransportTestSuite {
}

View file

@ -0,0 +1,23 @@
package suites;
import org.junit.runner.Runner;
import org.junit.runner.notification.RunNotifier;
import org.junit.runners.Suite;
import org.junit.runners.model.InitializationError;
import org.junit.runners.model.RunnerBuilder;
public class ListenerSuite extends Suite {
private final TestListener listener = new TestListener();
public ListenerSuite(Class<?> klass, RunnerBuilder builder) throws InitializationError {
super(klass, builder);
}
@Override
protected void runChild(Runner runner, RunNotifier notifier) {
notifier.addListener(listener);
runner.run(notifier);
notifier.removeListener(listener);
}
}

View file

@ -0,0 +1,21 @@
package suites;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.xbib.elasticsearch.AliasTest;
import org.xbib.elasticsearch.SearchTest;
import org.xbib.elasticsearch.SimpleTest;
import org.xbib.elasticsearch.WildcardTest;
/**
*
*/
@RunWith(ListenerSuite.class)
@Suite.SuiteClasses({
SimpleTest.class,
AliasTest.class,
SearchTest.class,
WildcardTest.class
})
public class MiscTestSuite {
}

View file

@ -0,0 +1,44 @@
package suites;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.runner.Description;
import org.junit.runner.Result;
import org.junit.runner.notification.Failure;
import org.junit.runner.notification.RunListener;
/**
*
*/
public class TestListener extends RunListener {
private static final Logger logger = LogManager.getLogger("test.listener");
public void testRunStarted(Description description) throws java.lang.Exception {
logger.info("number of tests to execute: {}", description.testCount());
}
public void testRunFinished(Result result) throws java.lang.Exception {
logger.info("number of tests executed: {}", result.getRunCount());
}
public void testStarted(Description description) throws java.lang.Exception {
logger.info("starting execution of {} {}",
description.getClassName(), description.getMethodName());
}
public void testFinished(Description description) throws java.lang.Exception {
logger.info("finished execution of {} {}",
description.getClassName(), description.getMethodName());
}
public void testFailure(Failure failure) throws java.lang.Exception {
logger.info("failed execution of tests: {}",
failure.getMessage());
}
public void testIgnored(Description description) throws java.lang.Exception {
logger.info("execution of test ignored: {}",
description.getClassName(), description.getMethodName());
}
}

View file

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration status="OFF">
<appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="[%d{ABSOLUTE}][%-5p][%-25c][%t] %m%n"/>
</Console>
</appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console" />
</Root>
</Loggers>
</configuration>

View file

@ -0,0 +1,3 @@
{
"index.analysis.analyzer.default.type" : "keyword"
}

View file

@ -0,0 +1,495 @@
package org.xbib.elasticsearch.extras.client;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.flush.FlushAction;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
*
*/
public abstract class AbstractClient {
private static final ESLogger logger = ESLoggerFactory.getLogger(AbstractClient.class.getName());
private Settings.Builder settingsBuilder;
private Settings settings;
private Map<String, String> mappings = new HashMap<>();
public abstract ElasticsearchClient client();
protected abstract void createClient(Settings settings) throws IOException;
public abstract void shutdown();
public Settings.Builder getSettingsBuilder() {
return settingsBuilder();
}
public void resetSettings() {
settingsBuilder = Settings.settingsBuilder();
settings = null;
mappings = new HashMap<>();
}
public void setSettings(Settings settings) {
this.settings = settings;
}
public void setting(String key, String value) {
if (settingsBuilder == null) {
settingsBuilder = Settings.settingsBuilder();
}
settingsBuilder.put(key, value);
}
public void setting(String key, Boolean value) {
if (settingsBuilder == null) {
settingsBuilder = Settings.settingsBuilder();
}
settingsBuilder.put(key, value);
}
public void setting(String key, Integer value) {
if (settingsBuilder == null) {
settingsBuilder = Settings.settingsBuilder();
}
settingsBuilder.put(key, value);
}
public void setting(InputStream in) throws IOException {
settingsBuilder = Settings.settingsBuilder().loadFromStream(".json", in);
}
public Settings.Builder settingsBuilder() {
return settingsBuilder != null ? settingsBuilder : Settings.settingsBuilder();
}
public Settings settings() {
if (settings != null) {
return settings;
}
if (settingsBuilder == null) {
settingsBuilder = Settings.settingsBuilder();
}
return settingsBuilder.build();
}
public void mapping(String type, String mapping) throws IOException {
mappings.put(type, mapping);
}
public void mapping(String type, InputStream in) throws IOException {
if (type == null) {
return;
}
StringWriter sw = new StringWriter();
Streams.copy(new InputStreamReader(in), sw);
mappings.put(type, sw.toString());
}
public Map<String, String> mappings() {
return mappings.isEmpty() ? null : mappings;
}
public void updateIndexSetting(String index, String key, Object value) throws IOException {
if (client() == null) {
return;
}
if (index == null) {
throw new IOException("no index name given");
}
if (key == null) {
throw new IOException("no key given");
}
if (value == null) {
throw new IOException("no value given");
}
Settings.Builder settingsBuilder = Settings.settingsBuilder();
settingsBuilder.put(key, value.toString());
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index)
.settings(settingsBuilder);
client().execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet();
}
public void waitForRecovery() throws IOException {
if (client() == null) {
return;
}
client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).actionGet();
}
public int waitForRecovery(String index) throws IOException {
if (client() == null) {
return -1;
}
if (index == null) {
throw new IOException("unable to waitfor recovery, index not set");
}
RecoveryResponse response = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest(index)).actionGet();
int shards = response.getTotalShards();
client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest(index)
.waitForActiveShards(shards)).actionGet();
return shards;
}
public void waitForCluster(String statusString, TimeValue timeout)
throws IOException, ElasticsearchTimeoutException {
if (client() == null) {
return;
}
ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString);
ClusterHealthResponse healthResponse =
client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest()
.waitForStatus(status).timeout(timeout)).actionGet();
if (healthResponse != null && healthResponse.isTimedOut()) {
throw new IOException("cluster state is " + healthResponse.getStatus().name()
+ " and not " + status.name()
+ ", from here on, everything will fail!");
}
}
public String fetchClusterName() {
if (client() == null) {
return null;
}
try {
ClusterStateRequestBuilder clusterStateRequestBuilder =
new ClusterStateRequestBuilder(client(), ClusterStateAction.INSTANCE).all();
ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet();
String name = clusterStateResponse.getClusterName().value();
int nodeCount = clusterStateResponse.getState().getNodes().size();
return name + " (" + nodeCount + " nodes connected)";
} catch (ElasticsearchTimeoutException e) {
return "TIMEOUT";
} catch (NoNodeAvailableException e) {
return "DISCONNECTED";
} catch (Throwable t) {
return "[" + t.getMessage() + "]";
}
}
public String healthColor() {
if (client() == null) {
return null;
}
try {
ClusterHealthResponse healthResponse =
client().execute(ClusterHealthAction.INSTANCE,
new ClusterHealthRequest().timeout(TimeValue.timeValueSeconds(30))).actionGet();
ClusterHealthStatus status = healthResponse.getStatus();
return status.name();
} catch (ElasticsearchTimeoutException e) {
return "TIMEOUT";
} catch (NoNodeAvailableException e) {
return "DISCONNECTED";
} catch (Throwable t) {
return "[" + t.getMessage() + "]";
}
}
public int updateReplicaLevel(String index, int level) throws IOException {
waitForCluster("YELLOW", TimeValue.timeValueSeconds(30));
updateIndexSetting(index, "number_of_replicas", level);
return waitForRecovery(index);
}
public void flushIndex(String index) {
if (client() == null) {
return;
}
if (index != null) {
client().execute(FlushAction.INSTANCE, new FlushRequest(index)).actionGet();
}
}
public void refreshIndex(String index) {
if (client() == null) {
return;
}
if (index != null) {
client().execute(RefreshAction.INSTANCE, new RefreshRequest(index)).actionGet();
}
}
public void putMapping(String index) {
if (client() == null) {
return;
}
if (!mappings().isEmpty()) {
for (Map.Entry<String, String> me : mappings().entrySet()) {
client().execute(PutMappingAction.INSTANCE,
new PutMappingRequest(index).type(me.getKey()).source(me.getValue())).actionGet();
}
}
}
public String resolveAlias(String alias) {
if (client() == null) {
return alias;
}
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet();
if (!getAliasesResponse.getAliases().isEmpty()) {
return getAliasesResponse.getAliases().keys().iterator().next().value;
}
return alias;
}
public String resolveMostRecentIndex(String alias) {
if (client() == null) {
return alias;
}
if (alias == null) {
return null;
}
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet();
Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
Set<String> indices = new TreeSet<>(Collections.reverseOrder());
for (ObjectCursor<String> indexName : getAliasesResponse.getAliases().keys()) {
Matcher m = pattern.matcher(indexName.value);
if (m.matches()) {
if (alias.equals(m.group(1))) {
indices.add(indexName.value);
}
}
}
return indices.isEmpty() ? alias : indices.iterator().next();
}
public Map<String, String> getAliasFilters(String alias) {
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
return getFilters(getAliasesRequestBuilder.setIndices(resolveAlias(alias)).execute().actionGet());
}
public Map<String, String> getIndexFilters(String index) {
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
return getFilters(getAliasesRequestBuilder.setIndices(index).execute().actionGet());
}
private Map<String, String> getFilters(GetAliasesResponse getAliasesResponse) {
Map<String, String> result = new HashMap<>();
for (ObjectObjectCursor<String, List<AliasMetaData>> object : getAliasesResponse.getAliases()) {
List<AliasMetaData> aliasMetaDataList = object.value;
for (AliasMetaData aliasMetaData : aliasMetaDataList) {
if (aliasMetaData.filteringRequired()) {
result.put(aliasMetaData.alias(), new String(aliasMetaData.getFilter().uncompressed()));
} else {
result.put(aliasMetaData.alias(), null);
}
}
}
return result;
}
public void switchAliases(String index, String concreteIndex, List<String> extraAliases) {
switchAliases(index, concreteIndex, extraAliases, null);
}
public void switchAliases(String index, String concreteIndex,
List<String> extraAliases, IndexAliasAdder adder) {
if (client() == null) {
return;
}
if (index.equals(concreteIndex)) {
return;
}
// two situations: 1. there is a new alias 2. there is already an old index with the alias
String oldIndex = resolveAlias(index);
final Map<String, String> oldFilterMap = oldIndex.equals(index) ? null : getIndexFilters(oldIndex);
final List<String> newAliases = new LinkedList<>();
final List<String> switchAliases = new LinkedList<>();
IndicesAliasesRequestBuilder requestBuilder = new IndicesAliasesRequestBuilder(client(), IndicesAliasesAction.INSTANCE);
if (oldFilterMap == null || !oldFilterMap.containsKey(index)) {
// never apply a filter for trunk index name
requestBuilder.addAlias(concreteIndex, index);
newAliases.add(index);
}
// switch existing aliases
if (oldFilterMap != null) {
for (Map.Entry<String, String> entry : oldFilterMap.entrySet()) {
String alias = entry.getKey();
String filter = entry.getValue();
requestBuilder.removeAlias(oldIndex, alias);
if (filter != null) {
requestBuilder.addAlias(concreteIndex, alias, filter);
} else {
requestBuilder.addAlias(concreteIndex, alias);
}
switchAliases.add(alias);
}
}
// a list of aliases that should be added, check if new or old
if (extraAliases != null) {
for (String extraAlias : extraAliases) {
if (oldFilterMap == null || !oldFilterMap.containsKey(extraAlias)) {
// index alias adder only active on extra aliases, and if alias is new
if (adder != null) {
adder.addIndexAlias(requestBuilder, concreteIndex, extraAlias);
} else {
requestBuilder.addAlias(concreteIndex, extraAlias);
}
newAliases.add(extraAlias);
} else {
String filter = oldFilterMap.get(extraAlias);
requestBuilder.removeAlias(oldIndex, extraAlias);
if (filter != null) {
requestBuilder.addAlias(concreteIndex, extraAlias, filter);
} else {
requestBuilder.addAlias(concreteIndex, extraAlias);
}
switchAliases.add(extraAlias);
}
}
}
if (!newAliases.isEmpty() || !switchAliases.isEmpty()) {
logger.info("new aliases = {}, switch aliases = {}", newAliases, switchAliases);
requestBuilder.execute().actionGet();
}
}
public void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep) {
if (client() == null) {
return;
}
if (index.equals(concreteIndex)) {
return;
}
GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client(), GetIndexAction.INSTANCE);
GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet();
Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
Set<String> indices = new TreeSet<>();
logger.info("{} indices", getIndexResponse.getIndices().length);
for (String s : getIndexResponse.getIndices()) {
Matcher m = pattern.matcher(s);
if (m.matches()) {
if (index.equals(m.group(1)) && !s.equals(concreteIndex)) {
indices.add(s);
}
}
}
if (indices.isEmpty()) {
logger.info("no indices found, retention policy skipped");
return;
}
if (mintokeep > 0 && indices.size() <= mintokeep) {
logger.info("{} indices found, not enough for retention policy ({}), skipped",
indices.size(), mintokeep);
return;
} else {
logger.info("candidates for deletion = {}", indices);
}
List<String> indicesToDelete = new ArrayList<>();
// our index
Matcher m1 = pattern.matcher(concreteIndex);
if (m1.matches()) {
Integer i1 = Integer.parseInt(m1.group(2));
for (String s : indices) {
Matcher m2 = pattern.matcher(s);
if (m2.matches()) {
Integer i2 = Integer.parseInt(m2.group(2));
int kept = indices.size() - indicesToDelete.size();
if ((timestampdiff == 0 || (timestampdiff > 0 && i1 - i2 > timestampdiff)) && mintokeep <= kept) {
indicesToDelete.add(s);
}
}
}
}
logger.info("indices to delete = {}", indicesToDelete);
if (indicesToDelete.isEmpty()) {
logger.info("not enough indices found to delete, retention policy complete");
return;
}
String[] s = indicesToDelete.toArray(new String[indicesToDelete.size()]);
DeleteIndexRequestBuilder requestBuilder = new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, s);
DeleteIndexResponse response = requestBuilder.execute().actionGet();
if (!response.isAcknowledged()) {
logger.warn("retention delete index operation was not acknowledged");
}
}
public Long mostRecentDocument(String index) {
if (client() == null) {
return null;
}
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client(), SearchAction.INSTANCE);
SortBuilder sort = SortBuilders.fieldSort("_timestamp").order(SortOrder.DESC);
SearchResponse searchResponse = searchRequestBuilder.setIndices(index)
.addField("_timestamp")
.setSize(1)
.addSort(sort)
.execute().actionGet();
if (searchResponse.getHits().getHits().length == 1) {
SearchHit hit = searchResponse.getHits().getHits()[0];
if (hit.getFields().get("_timestamp") != null) {
return hit.getFields().get("_timestamp").getValue();
} else {
return 0L;
}
}
return null;
}
}

View file

@ -0,0 +1,22 @@
package org.xbib.elasticsearch.extras.client;
import java.util.Map;
import java.util.Set;
/**
*/
public interface BulkControl {
void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval);
boolean isBulk(String indexName);
void finishBulk(String indexName);
Set<String> indices();
Map<String, Long> getStartBulkRefreshIntervals();
Map<String, Long> getStopBulkRefreshIntervals();
}

View file

@ -0,0 +1,31 @@
package org.xbib.elasticsearch.extras.client;
import org.xbib.metrics.Count;
import org.xbib.metrics.Metered;
/**
*
*/
public interface BulkMetric {
Metered getTotalIngest();
Count getTotalIngestSizeInBytes();
Count getCurrentIngest();
Count getCurrentIngestNumDocs();
Count getSubmitted();
Count getSucceeded();
Count getFailed();
void start();
void stop();
long elapsed();
}

View file

@ -0,0 +1,473 @@
package org.xbib.elasticsearch.extras.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import java.io.Closeable;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* A bulk processor is a thread safe bulk processing class, allowing to easily set when to "flush" a new bulk request
* (either based on number of actions, based on the size, or time), and to easily control the number of concurrent bulk
* requests allowed to be executed in parallel.
* In order to create a new bulk processor, use the {@link Builder}.
*/
public class BulkProcessor implements Closeable {
private final int bulkActions;
private final long bulkSize;
private final ScheduledThreadPoolExecutor scheduler;
private final ScheduledFuture<?> scheduledFuture;
private final AtomicLong executionIdGen = new AtomicLong();
private final BulkRequestHandler bulkRequestHandler;
private BulkRequest bulkRequest;
private volatile boolean closed = false;
private BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests,
int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytes();
this.bulkRequest = new BulkRequest();
this.bulkRequestHandler = concurrentRequests == 0 ?
new SyncBulkRequestHandler(client, listener) :
new AsyncBulkRequestHandler(client, listener, concurrentRequests);
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1,
EsExecutors.daemonThreadFactory(client.settings(),
name != null ? "[" + name + "]" : "" + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(),
flushInterval.millis(), TimeUnit.MILLISECONDS);
} else {
this.scheduler = null;
this.scheduledFuture = null;
}
}
public static Builder builder(Client client, Listener listener) {
if (client == null) {
throw new NullPointerException("The client you specified while building a BulkProcessor is null");
}
return new Builder(client, listener);
}
/**
* Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed.
*/
@Override
public void close() {
try {
awaitClose(0, TimeUnit.NANOSECONDS);
} catch (InterruptedException exc) {
Thread.currentThread().interrupt();
}
}
/**
* Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are
* flushed.
*
* If concurrent requests are not enabled, returns {@code true} immediately.
* If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then
* returns {@code true},
* If the specified waiting time elapses before all bulk requests complete, {@code false} is returned.
*
* @param timeout The maximum time to wait for the bulk requests to complete
* @param unit The time unit of the {@code timeout} argument
* @return {@code true} if all bulk requests completed and {@code false} if the waiting time elapsed before all the
* bulk requests completed
* @throws InterruptedException If the current thread is interrupted
*/
public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
if (closed) {
return true;
}
closed = true;
if (this.scheduledFuture != null) {
FutureUtils.cancel(this.scheduledFuture);
this.scheduler.shutdown();
}
if (bulkRequest.numberOfActions() > 0) {
execute();
}
return this.bulkRequestHandler.awaitClose(timeout, unit);
}
/**
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*
* @param request request
* @return his bulk processor
*/
public BulkProcessor add(IndexRequest request) {
return add((ActionRequest) request);
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*
* @param request request
* @return his bulk processor
*/
public BulkProcessor add(DeleteRequest request) {
return add((ActionRequest) request);
}
/**
* Adds either a delete or an index request.
*
* @param request request
* @return his bulk processor
*/
public BulkProcessor add(ActionRequest<?> request) {
return add(request, null);
}
/**
* Adds either a delete or an index request with a payload.
*
* @param request request
* @param payload payload
* @return his bulk processor
*/
public BulkProcessor add(ActionRequest<?> request, @Nullable Object payload) {
internalAdd(request, payload);
return this;
}
protected void ensureOpen() {
if (closed) {
throw new IllegalStateException("bulk process already closed");
}
}
private synchronized void internalAdd(ActionRequest<?> request, @Nullable Object payload) {
ensureOpen();
bulkRequest.add(request, payload);
executeIfNeeded();
}
public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType)
throws Exception {
return add(data, defaultIndex, defaultType, null);
}
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex,
@Nullable String defaultType, @Nullable Object payload) throws Exception {
bulkRequest.add(data, defaultIndex, defaultType, null, null, payload, true);
executeIfNeeded();
return this;
}
private void executeIfNeeded() {
ensureOpen();
if (!isOverTheLimit()) {
return;
}
execute();
}
private void execute() {
final BulkRequest bulkRequest = this.bulkRequest;
final long executionId = executionIdGen.incrementAndGet();
this.bulkRequest = new BulkRequest();
this.bulkRequestHandler.execute(bulkRequest, executionId);
}
private boolean isOverTheLimit() {
return bulkActions != -1 &&
bulkRequest.numberOfActions() >= bulkActions ||
bulkSize != -1 &&
bulkRequest.estimatedSizeInBytes() >= bulkSize;
}
/**
* Flush pending delete or index requests.
*/
public synchronized void flush() {
ensureOpen();
if (bulkRequest.numberOfActions() > 0) {
execute();
}
}
/**
* A listener for the execution.
*/
public interface Listener {
/**
* Callback before the bulk is executed.
*
* @param executionId execution ID
* @param request request
*/
void beforeBulk(long executionId, BulkRequest request);
/**
* Callback after a successful execution of bulk request.
*
* @param executionId execution ID
* @param request request
* @param response response
*/
void afterBulk(long executionId, BulkRequest request, BulkResponse response);
/**
* Callback after a failed execution of bulk request.
*
* Note that in case an instance of <code>InterruptedException</code> is passed, which means that request
* processing has been
* cancelled externally, the thread's interruption status has been restored prior to calling this method.
*
* @param executionId execution ID
* @param request request
* @param failure failure
*/
void afterBulk(long executionId, BulkRequest request, Throwable failure);
}
/**
* A builder used to create a build an instance of a bulk processor.
*/
public static class Builder {
private final Client client;
private final Listener listener;
private String name;
private int concurrentRequests = 1;
private int bulkActions = 1000;
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
private TimeValue flushInterval = null;
/**
* Creates a builder of bulk processor with the client to use and the listener that will be used
* to be notified on the completion of bulk requests.
*
* @param client the client
* @param listener the listener
*/
Builder(Client client, Listener listener) {
this.client = client;
this.listener = listener;
}
/**
* Sets an optional name to identify this bulk processor.
*
* @param name name
* @return this builder
*/
public Builder setName(String name) {
this.name = name;
return this;
}
/**
* Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single
* request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed
* while accumulating new bulk requests. Defaults to <tt>1</tt>.
*
* @param concurrentRequests maximum number of concurrent requests
* @return this builder
*/
public Builder setConcurrentRequests(int concurrentRequests) {
this.concurrentRequests = concurrentRequests;
return this;
}
/**
* Sets when to flush a new bulk request based on the number of actions currently added. Defaults to
* <tt>1000</tt>. Can be set to <tt>-1</tt> to disable it.
*
* @param bulkActions mbulk actions
* @return this builder
*/
public Builder setBulkActions(int bulkActions) {
this.bulkActions = bulkActions;
return this;
}
/**
* Sets when to flush a new bulk request based on the size of actions currently added. Defaults to
* <tt>5mb</tt>. Can be set to <tt>-1</tt> to disable it.
*
* @param bulkSize bulk size
* @return this builder
*/
public Builder setBulkSize(ByteSizeValue bulkSize) {
this.bulkSize = bulkSize;
return this;
}
/**
* Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set.
* Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)}
* can be set to <tt>-1</tt> with the flush interval set allowing for complete async processing of bulk actions.
*
* @param flushInterval flush interval
* @return this builder
*/
public Builder setFlushInterval(TimeValue flushInterval) {
this.flushInterval = flushInterval;
return this;
}
/**
* Builds a new bulk processor.
*
* @return a bulk processor
*/
public BulkProcessor build() {
return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
}
}
private class Flush implements Runnable {
@Override
public void run() {
synchronized (BulkProcessor.this) {
if (closed) {
return;
}
if (bulkRequest.numberOfActions() == 0) {
return;
}
execute();
}
}
}
/**
* Abstracts the low-level details of bulk request handling.
*/
abstract class BulkRequestHandler {
public abstract void execute(BulkRequest bulkRequest, long executionId);
public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
}
private class SyncBulkRequestHandler extends BulkRequestHandler {
private final Client client;
private final BulkProcessor.Listener listener;
SyncBulkRequestHandler(Client client, BulkProcessor.Listener listener) {
this.client = client;
this.listener = listener;
}
public void execute(BulkRequest bulkRequest, long executionId) {
boolean afterCalled = false;
try {
listener.beforeBulk(executionId, bulkRequest);
BulkResponse bulkResponse = client.execute(BulkAction.INSTANCE, bulkRequest).actionGet();
afterCalled = true;
listener.afterBulk(executionId, bulkRequest, bulkResponse);
} catch (Throwable t) {
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, t);
}
}
}
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
return true;
}
}
private class AsyncBulkRequestHandler extends BulkRequestHandler {
private final Client client;
private final BulkProcessor.Listener listener;
private final Semaphore semaphore;
private final int concurrentRequests;
private AsyncBulkRequestHandler(Client client, BulkProcessor.Listener listener, int concurrentRequests) {
this.client = client;
this.listener = listener;
this.concurrentRequests = concurrentRequests;
this.semaphore = new Semaphore(concurrentRequests);
}
@Override
public void execute(final BulkRequest bulkRequest, final long executionId) {
boolean bulkRequestSetupSuccessful = false;
boolean acquired = false;
try {
listener.beforeBulk(executionId, bulkRequest);
semaphore.acquire();
acquired = true;
client.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
try {
listener.afterBulk(executionId, bulkRequest, response);
} finally {
semaphore.release();
}
}
@Override
public void onFailure(Throwable e) {
try {
listener.afterBulk(executionId, bulkRequest, e);
} finally {
semaphore.release();
}
}
});
bulkRequestSetupSuccessful = true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
listener.afterBulk(executionId, bulkRequest, e);
} catch (Throwable t) {
listener.afterBulk(executionId, bulkRequest, t);
} finally {
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
semaphore.release();
}
}
}
@Override
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
semaphore.release(this.concurrentRequests);
return true;
}
return false;
}
}
}

View file

@ -0,0 +1,105 @@
package org.xbib.elasticsearch.extras.client;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.xbib.elasticsearch.extras.client.node.BulkNodeClient;
import org.xbib.elasticsearch.extras.client.transport.BulkTransportClient;
import org.xbib.elasticsearch.extras.client.transport.MockTransportClient;
/**
*
*/
public final class ClientBuilder implements Parameters {
private final Settings.Builder settingsBuilder;
private BulkMetric metric;
private BulkControl control;
public ClientBuilder() {
settingsBuilder = Settings.builder();
}
public static ClientBuilder builder() {
return new ClientBuilder();
}
public ClientBuilder put(String key, String value) {
settingsBuilder.put(key, value);
return this;
}
public ClientBuilder put(String key, Integer value) {
settingsBuilder.put(key, value);
return this;
}
public ClientBuilder put(String key, Long value) {
settingsBuilder.put(key, value);
return this;
}
public ClientBuilder put(String key, Double value) {
settingsBuilder.put(key, value);
return this;
}
public ClientBuilder put(String key, ByteSizeValue value) {
settingsBuilder.put(key, value);
return this;
}
public ClientBuilder put(String key, TimeValue value) {
settingsBuilder.put(key, value);
return this;
}
public ClientBuilder put(Settings settings) {
settingsBuilder.put(settings);
return this;
}
public ClientBuilder setMetric(BulkMetric metric) {
this.metric = metric;
return this;
}
public ClientBuilder setControl(BulkControl control) {
this.control = control;
return this;
}
public BulkNodeClient toBulkNodeClient(Client client) {
Settings settings = settingsBuilder.build();
return new BulkNodeClient()
.maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST))
.maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS))
.maxVolumePerRequest(settings.getAsBytesSize(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST))
.flushIngestInterval(settings.getAsTime(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL))
.init(client, metric, control);
}
public BulkTransportClient toBulkTransportClient() {
Settings settings = settingsBuilder.build();
return new BulkTransportClient()
.maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST))
.maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS))
.maxVolumePerRequest(settings.getAsBytesSize(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST))
.flushIngestInterval(settings.getAsTime(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL))
.init(settings, metric, control);
}
public MockTransportClient toMockTransportClient() {
Settings settings = settingsBuilder.build();
return new MockTransportClient()
.maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST))
.maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS))
.maxVolumePerRequest(settings.getAsBytesSize(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST))
.flushIngestInterval(settings.getAsTime(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL))
.init(settings, metric, control);
}
}

View file

@ -0,0 +1,391 @@
package org.xbib.elasticsearch.extras.client;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
/**
* Interface for providing convenient administrative methods for ingesting data into Elasticsearch.
*/
public interface ClientMethods extends Parameters {
/**
* Initialize new ingest client, wrap an existing Elasticsearch client, and set up metrics.
*
* @param client the Elasticsearch client
* @param metric metric
* @param control control
* @return this ingest
* @throws IOException if client could not get created
*/
ClientMethods init(ElasticsearchClient client, BulkMetric metric, BulkControl control) throws IOException;
/**
* Initialize, create new ingest client, and set up metrics.
*
* @param settings settings
* @param metric metric
* @param control control
* @return this ingest
* @throws IOException if client could not get created
*/
ClientMethods init(Settings settings, BulkMetric metric, BulkControl control) throws IOException;
/**
* Return Elasticsearch client.
*
* @return Elasticsearch client
*/
ElasticsearchClient client();
/**
* Index document.
*
* @param index the index
* @param type the type
* @param id the id
* @param source the source
* @return this
*/
ClientMethods index(String index, String type, String id, String source);
/**
* Delete document.
*
* @param index the index
* @param type the type
* @param id the id
* @return this ingest
*/
ClientMethods delete(String index, String type, String id);
/**
* Update document. Use with precaution! Does not work in all cases.
*
* @param index the index
* @param type the type
* @param id the id
* @param source the source
* @return this
*/
ClientMethods update(String index, String type, String id, String source);
/**
* Set the maximum number of actions per request.
*
* @param maxActionsPerRequest maximum number of actions per request
* @return this ingest
*/
ClientMethods maxActionsPerRequest(int maxActionsPerRequest);
/**
* Set the maximum concurent requests.
*
* @param maxConcurentRequests maximum number of concurrent ingest requests
* @return this Ingest
*/
ClientMethods maxConcurrentRequests(int maxConcurentRequests);
/**
* Set the maximum volume for request before flush.
*
* @param maxVolume maximum volume
* @return this ingest
*/
ClientMethods maxVolumePerRequest(ByteSizeValue maxVolume);
/**
* Set the flush interval for automatic flushing outstanding ingest requests.
*
* @param flushInterval the flush interval, default is 30 seconds
* @return this ingest
*/
ClientMethods flushIngestInterval(TimeValue flushInterval);
/**
* Set mapping.
*
* @param type mapping type
* @param in mapping definition as input stream
* @throws IOException if mapping could not be added
*/
void mapping(String type, InputStream in) throws IOException;
/**
* Set mapping.
*
* @param type mapping type
* @param mapping mapping definition as input stream
* @throws IOException if mapping could not be added
*/
void mapping(String type, String mapping) throws IOException;
/**
* Put mapping.
*
* @param index index
*/
void putMapping(String index);
/**
* Create a new index.
*
* @param index index
* @return this ingest
*/
ClientMethods newIndex(String index);
/**
* Create a new index.
*
* @param index index
* @param type type
* @param settings settings
* @param mappings mappings
* @return this ingest
* @throws IOException if new index creation fails
*/
ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException;
/**
* Create a new index.
*
* @param index index
* @param settings settings
* @param mappings mappings
* @return this ingest
*/
ClientMethods newIndex(String index, Settings settings, Map<String, String> mappings);
/**
* Create new mapping.
*
* @param index index
* @param type index type
* @param mapping mapping
* @return this ingest
*/
ClientMethods newMapping(String index, String type, Map<String, Object> mapping);
/**
* Delete index.
*
* @param index index
* @return this ingest
*/
ClientMethods deleteIndex(String index);
/**
* Start bulk mode.
*
* @param index index
* @param startRefreshIntervalSeconds refresh interval before bulk
* @param stopRefreshIntervalSeconds refresh interval after bulk
* @return this ingest
* @throws IOException if bulk could not be started
*/
ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) throws IOException;
/**
* Stops bulk mode.
*
* @param index index
* @return this Ingest
* @throws IOException if bulk could not be stopped
*/
ClientMethods stopBulk(String index) throws IOException;
/**
* Bulked index request. Each request will be added to a queue for bulking requests.
* Submitting request will be done when bulk limits are exceeded.
*
* @param indexRequest the index request to add
* @return this ingest
*/
ClientMethods bulkIndex(IndexRequest indexRequest);
/**
* Bulked delete request. Each request will be added to a queue for bulking requests.
* Submitting request will be done when bulk limits are exceeded.
*
* @param deleteRequest the delete request to add
* @return this ingest
*/
ClientMethods bulkDelete(DeleteRequest deleteRequest);
/**
* Bulked update request. Each request will be added to a queue for bulking requests.
* Submitting request will be done when bulk limits are exceeded.
* Note that updates only work correctly when all operations between nodes are synchronized!
*
* @param updateRequest the update request to add
* @return this ingest
*/
ClientMethods bulkUpdate(UpdateRequest updateRequest);
/**
* Flush ingest, move all pending documents to the cluster.
*
* @return this
*/
ClientMethods flushIngest();
/**
* Wait for all outstanding responses.
*
* @param maxWait maximum wait time
* @return this ingest
* @throws InterruptedException if wait is interrupted
* @throws ExecutionException if execution failed
*/
ClientMethods waitForResponses(TimeValue maxWait) throws InterruptedException, ExecutionException;
/**
* Refresh the index.
*
* @param index index
*/
void refreshIndex(String index);
/**
* Flush the index.
*
* @param index index
*/
void flushIndex(String index);
/**
* Update replica level.
*
* @param index index
* @param level the replica level
* @return number of shards after updating replica level
* @throws IOException if replica could not be updated
*/
int updateReplicaLevel(String index, int level) throws IOException;
/**
* Wait for cluster being healthy.
*
* @param healthColor cluster health color to wait for
* @param timeValue time value
* @throws IOException if wait failed
*/
void waitForCluster(String healthColor, TimeValue timeValue) throws IOException;
/**
* Get current health color.
*
* @return the cluster health color
*/
String healthColor();
/**
* Wait for index recovery (after replica change).
*
* @param index index
* @return number of shards found
* @throws IOException if wait failed
*/
int waitForRecovery(String index) throws IOException;
/**
* Resolve alias.
*
* @param alias the alias
* @return one index name behind the alias or the alias if there is no index
*/
String resolveAlias(String alias);
/**
* Resolve alias to all connected indices, sort index names with most recent timestamp on top, return this index
* name.
*
* @param alias the alias
* @return the most recent index name pointing to the alias
*/
String resolveMostRecentIndex(String alias);
/**
* Get all alias filters.
*
* @param index index
* @return map of alias filters
*/
Map<String, String> getAliasFilters(String index);
/**
* Switch aliases from one index to another.
*
* @param index the index name
* @param concreteIndex the index name with timestamp
* @param extraAliases a list of names that should be set as index aliases
*/
void switchAliases(String index, String concreteIndex, List<String> extraAliases);
/**
* Switch aliases from one index to another.
*
* @param index the index name
* @param concreteIndex the index name with timestamp
* @param extraAliases a list of names that should be set as index aliases
* @param adder an adder method to create alias term queries
*/
void switchAliases(String index, String concreteIndex, List<String> extraAliases, IndexAliasAdder adder);
/**
* Retention policy for an index. All indices before timestampdiff should be deleted,
* but mintokeep indices must be kept.
*
* @param index index name
* @param concreteIndex index name with timestamp
* @param timestampdiff timestamp delta (for index timestamps)
* @param mintokeep minimum number of indices to keep
*/
void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep);
/**
* Log the timestamp of the most recently indexed document in the index.
*
* @param index the index name
* @return millis UTC millis of the most recent document
* @throws IOException if most rcent document can not be found
*/
Long mostRecentDocument(String index) throws IOException;
/**
* Get metric.
*
* @return metric
*/
BulkMetric getMetric();
/**
* Returns true is a throwable exists.
*
* @return true if a Throwable exists
*/
boolean hasThrowable();
/**
* Return last throwable if exists.
*
* @return last throwable
*/
Throwable getThrowable();
/**
* Shutdown the ingesting.
*/
void shutdown();
}

View file

@ -0,0 +1,11 @@
package org.xbib.elasticsearch.extras.client;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
/**
*
*/
public interface IndexAliasAdder {
void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias);
}

View file

@ -0,0 +1,264 @@
package org.xbib.elasticsearch.extras.client;
import java.io.IOException;
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Enumeration;
import java.util.List;
import java.util.Locale;
/**
*
*/
public class NetworkUtils {
private static final String IPv4_SETTING = "java.net.preferIPv4Stack";
private static final String IPv6_SETTING = "java.net.preferIPv6Addresses";
private static final InetAddress localAddress;
static {
InetAddress address;
try {
address = InetAddress.getLocalHost();
} catch (Throwable e) {
address = InetAddress.getLoopbackAddress();
}
localAddress = address;
}
private NetworkUtils() {
}
public static InetAddress getLocalAddress() {
return localAddress;
}
public static InetAddress getFirstNonLoopbackAddress(ProtocolVersion ipversion) throws SocketException {
InetAddress address;
for (NetworkInterface networkInterface : getNetworkInterfaces()) {
try {
if (!networkInterface.isUp() || networkInterface.isLoopback()) {
continue;
}
} catch (Exception e) {
continue;
}
address = getFirstNonLoopbackAddress(networkInterface, ipversion);
if (address != null) {
return address;
}
}
return null;
}
public static InetAddress getFirstNonLoopbackAddress(NetworkInterface networkInterface, ProtocolVersion ipVersion)
throws SocketException {
if (networkInterface == null) {
throw new IllegalArgumentException("network interface is null");
}
for (Enumeration<InetAddress> addresses = networkInterface.getInetAddresses(); addresses.hasMoreElements(); ) {
InetAddress address = addresses.nextElement();
if (!address.isLoopbackAddress()) {
if ((address instanceof Inet4Address && ipVersion == ProtocolVersion.IPv4) ||
(address instanceof Inet6Address && ipVersion == ProtocolVersion.IPv6)) {
return address;
}
}
}
return null;
}
public static InetAddress getFirstAddress(NetworkInterface networkInterface, ProtocolVersion ipVersion)
throws SocketException {
if (networkInterface == null) {
throw new IllegalArgumentException("network interface is null");
}
for (Enumeration<InetAddress> addresses = networkInterface.getInetAddresses(); addresses.hasMoreElements(); ) {
InetAddress address = addresses.nextElement();
if ((address instanceof Inet4Address && ipVersion == ProtocolVersion.IPv4) ||
(address instanceof Inet6Address && ipVersion == ProtocolVersion.IPv6)) {
return address;
}
}
return null;
}
public static List<NetworkInterface> getAllAvailableInterfaces() throws SocketException {
List<NetworkInterface> allInterfaces = new ArrayList<>();
for (Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
interfaces.hasMoreElements(); ) {
NetworkInterface networkInterface = interfaces.nextElement();
allInterfaces.add(networkInterface);
Enumeration<NetworkInterface> subInterfaces = networkInterface.getSubInterfaces();
if (subInterfaces.hasMoreElements()) {
while (subInterfaces.hasMoreElements()) {
allInterfaces.add(subInterfaces.nextElement());
}
}
}
sortInterfaces(allInterfaces);
return allInterfaces;
}
public static List<InetAddress> getAllAvailableAddresses() throws SocketException {
List<InetAddress> allAddresses = new ArrayList<>();
for (NetworkInterface networkInterface : getNetworkInterfaces()) {
Enumeration<InetAddress> addrs = networkInterface.getInetAddresses();
while (addrs.hasMoreElements()) {
allAddresses.add(addrs.nextElement());
}
}
sortAddresses(allAddresses);
return allAddresses;
}
public static ProtocolVersion getProtocolVersion() throws SocketException {
switch (findAvailableProtocols()) {
case IPv4:
return ProtocolVersion.IPv4;
case IPv6:
return ProtocolVersion.IPv6;
case IPv46:
if (Boolean.getBoolean(System.getProperty(IPv4_SETTING))) {
return ProtocolVersion.IPv4;
}
if (Boolean.getBoolean(System.getProperty(IPv6_SETTING))) {
return ProtocolVersion.IPv6;
}
return ProtocolVersion.IPv6;
}
return ProtocolVersion.NONE;
}
public static ProtocolVersion findAvailableProtocols() throws SocketException {
boolean hasIPv4 = false;
boolean hasIPv6 = false;
for (InetAddress addr : getAllAvailableAddresses()) {
if (addr instanceof Inet4Address) {
hasIPv4 = true;
}
if (addr instanceof Inet6Address) {
hasIPv6 = true;
}
}
if (hasIPv4 && hasIPv6) {
return ProtocolVersion.IPv46;
}
if (hasIPv4) {
return ProtocolVersion.IPv4;
}
if (hasIPv6) {
return ProtocolVersion.IPv6;
}
return ProtocolVersion.NONE;
}
public static InetAddress resolveInetAddress(String host, String defaultValue) throws IOException {
if (host == null) {
host = defaultValue;
}
String origHost = host;
int pos = host.indexOf(':');
if (pos > 0) {
host = host.substring(0, pos - 1);
}
if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) {
host = host.substring(1, host.length() - 1);
if (host.equals("local")) {
return getLocalAddress();
} else if (host.startsWith("non_loopback")) {
if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) {
return getFirstNonLoopbackAddress(ProtocolVersion.IPv4);
} else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) {
return getFirstNonLoopbackAddress(ProtocolVersion.IPv6);
} else {
return getFirstNonLoopbackAddress(getProtocolVersion());
}
} else {
ProtocolVersion protocolVersion = getProtocolVersion();
if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) {
protocolVersion = ProtocolVersion.IPv4;
host = host.substring(0, host.length() - 5);
} else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) {
protocolVersion = ProtocolVersion.IPv6;
host = host.substring(0, host.length() - 5);
}
for (NetworkInterface ni : getAllAvailableInterfaces()) {
if (!ni.isUp()) {
continue;
}
if (host.equals(ni.getName()) || host.equals(ni.getDisplayName())) {
if (ni.isLoopback()) {
return getFirstAddress(ni, protocolVersion);
} else {
return getFirstNonLoopbackAddress(ni, protocolVersion);
}
}
}
}
throw new IOException("failed to find network interface for [" + origHost + "]");
}
return InetAddress.getByName(host);
}
private static List<NetworkInterface> getNetworkInterfaces() throws SocketException {
List<NetworkInterface> networkInterfaces = new ArrayList<>();
Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
while (interfaces.hasMoreElements()) {
NetworkInterface networkInterface = interfaces.nextElement();
networkInterfaces.add(networkInterface);
Enumeration<NetworkInterface> subInterfaces = networkInterface.getSubInterfaces();
if (subInterfaces.hasMoreElements()) {
while (subInterfaces.hasMoreElements()) {
networkInterfaces.add(subInterfaces.nextElement());
}
}
}
sortInterfaces(networkInterfaces);
return networkInterfaces;
}
private static void sortInterfaces(List<NetworkInterface> interfaces) {
Collections.sort(interfaces, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface o1, NetworkInterface o2) {
return Integer.compare(o1.getIndex(), o2.getIndex());
}
});
}
private static void sortAddresses(List<InetAddress> addressList) {
Collections.sort(addressList, new Comparator<InetAddress>() {
@Override
public int compare(InetAddress o1, InetAddress o2) {
return compareBytes(o1.getAddress(), o2.getAddress());
}
});
}
private static int compareBytes(byte[] left, byte[] right) {
for (int i = 0, j = 0; i < left.length && j < right.length; i++, j++) {
int a = (left[i] & 0xff);
int b = (right[j] & 0xff);
if (a != b) {
return a - b;
}
}
return left.length - right.length;
}
/**
*
*/
public enum ProtocolVersion {
IPv4, IPv6, IPv46, NONE
}
}

View file

@ -0,0 +1,28 @@
package org.xbib.elasticsearch.extras.client;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
/**
*
*/
public interface Parameters {
int DEFAULT_MAX_ACTIONS_PER_REQUEST = 1000;
int DEFAULT_MAX_CONCURRENT_REQUESTS = Runtime.getRuntime().availableProcessors() * 4;
ByteSizeValue DEFAULT_MAX_VOLUME_PER_REQUEST = new ByteSizeValue(10, ByteSizeUnit.MB);
TimeValue DEFAULT_FLUSH_INTERVAL = TimeValue.timeValueSeconds(30);
String MAX_ACTIONS_PER_REQUEST = "max_actions_per_request";
String MAX_CONCURRENT_REQUESTS = "max_concurrent_requests";
String MAX_VOLUME_PER_REQUEST = "max_volume_per_request";
String FLUSH_INTERVAL = "flush_interval";
}

View file

@ -0,0 +1,54 @@
package org.xbib.elasticsearch.extras.client;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
*/
public class SimpleBulkControl implements BulkControl {
private final Set<String> indexNames = new HashSet<>();
private final Map<String, Long> startBulkRefreshIntervals = new HashMap<>();
private final Map<String, Long> stopBulkRefreshIntervals = new HashMap<>();
@Override
public void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval) {
synchronized (indexNames) {
indexNames.add(indexName);
startBulkRefreshIntervals.put(indexName, startRefreshInterval);
stopBulkRefreshIntervals.put(indexName, stopRefreshInterval);
}
}
@Override
public boolean isBulk(String indexName) {
return indexNames.contains(indexName);
}
@Override
public void finishBulk(String indexName) {
synchronized (indexNames) {
indexNames.remove(indexName);
}
}
@Override
public Set<String> indices() {
return indexNames;
}
@Override
public Map<String, Long> getStartBulkRefreshIntervals() {
return startBulkRefreshIntervals;
}
@Override
public Map<String, Long> getStopBulkRefreshIntervals() {
return stopBulkRefreshIntervals;
}
}

View file

@ -0,0 +1,82 @@
package org.xbib.elasticsearch.extras.client;
import org.xbib.metrics.Count;
import org.xbib.metrics.CountMetric;
import org.xbib.metrics.Meter;
import org.xbib.metrics.Metered;
/**
*
*/
public class SimpleBulkMetric implements BulkMetric {
private final Meter totalIngest = new Meter();
private final Count totalIngestSizeInBytes = new CountMetric();
private final Count currentIngest = new CountMetric();
private final Count currentIngestNumDocs = new CountMetric();
private final Count submitted = new CountMetric();
private final Count succeeded = new CountMetric();
private final Count failed = new CountMetric();
private Long started;
private Long stopped;
@Override
public Metered getTotalIngest() {
return totalIngest;
}
@Override
public Count getTotalIngestSizeInBytes() {
return totalIngestSizeInBytes;
}
@Override
public Count getCurrentIngest() {
return currentIngest;
}
@Override
public Count getCurrentIngestNumDocs() {
return currentIngestNumDocs;
}
@Override
public Count getSubmitted() {
return submitted;
}
@Override
public Count getSucceeded() {
return succeeded;
}
@Override
public Count getFailed() {
return failed;
}
@Override
public void start() {
this.started = System.nanoTime();
this.totalIngest.spawn(5L);
}
@Override
public void stop() {
this.stopped = System.nanoTime();
totalIngest.stop();
}
@Override
public long elapsed() {
return (stopped != null ? stopped : System.nanoTime()) - started;
}
}

View file

@ -0,0 +1,502 @@
package org.xbib.elasticsearch.extras.client.node;
import com.google.common.collect.ImmutableSet;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import org.xbib.elasticsearch.extras.client.AbstractClient;
import org.xbib.elasticsearch.extras.client.BulkControl;
import org.xbib.elasticsearch.extras.client.BulkMetric;
import org.xbib.elasticsearch.extras.client.ClientMethods;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
*
*/
public class BulkNodeClient extends AbstractClient implements ClientMethods {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeClient.class.getName());
private int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST;
private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS;
private ByteSizeValue maxVolume = DEFAULT_MAX_VOLUME_PER_REQUEST;
private TimeValue flushInterval = DEFAULT_FLUSH_INTERVAL;
private ElasticsearchClient client;
private BulkProcessor bulkProcessor;
private BulkMetric metric;
private BulkControl control;
private Throwable throwable;
private boolean closed;
public BulkNodeClient() {
}
@Override
public BulkNodeClient maxActionsPerRequest(int maxActionsPerRequest) {
this.maxActionsPerRequest = maxActionsPerRequest;
return this;
}
@Override
public BulkNodeClient maxConcurrentRequests(int maxConcurrentRequests) {
this.maxConcurrentRequests = maxConcurrentRequests;
return this;
}
@Override
public BulkNodeClient maxVolumePerRequest(ByteSizeValue maxVolume) {
this.maxVolume = maxVolume;
return this;
}
@Override
public BulkNodeClient flushIngestInterval(TimeValue flushInterval) {
this.flushInterval = flushInterval;
return this;
}
@Override
public BulkNodeClient init(ElasticsearchClient client,
final BulkMetric metric, final BulkControl control) {
this.client = client;
this.metric = metric;
this.control = control;
if (metric != null) {
metric.start();
}
BulkProcessor.Listener listener = new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
long l = -1;
if (metric != null) {
metric.getCurrentIngest().inc();
l = metric.getCurrentIngest().getCount();
int n = request.numberOfActions();
metric.getSubmitted().inc(n);
metric.getCurrentIngestNumDocs().inc(n);
metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
}
logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
executionId,
request.numberOfActions(),
request.estimatedSizeInBytes(),
l);
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
long l = -1;
if (metric != null) {
metric.getCurrentIngest().dec();
l = metric.getCurrentIngest().getCount();
metric.getSucceeded().inc(response.getItems().length);
}
int n = 0;
for (BulkItemResponse itemResponse : response.getItems()) {
if (metric != null) {
metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
}
if (itemResponse.isFailed()) {
n++;
if (metric != null) {
metric.getSucceeded().dec(1);
metric.getFailed().inc(1);
}
}
}
if (metric != null) {
logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests",
executionId,
metric.getSucceeded().getCount(),
metric.getFailed().getCount(),
response.getTook().millis(),
l);
}
if (n > 0) {
logger.error("bulk [{}] failed with {} failed items, failure message = {}",
executionId, n, response.buildFailureMessage());
} else {
if (metric != null) {
metric.getCurrentIngestNumDocs().dec(response.getItems().length);
}
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
if (metric != null) {
metric.getCurrentIngest().dec();
}
throwable = failure;
closed = true;
logger.error("after bulk [" + executionId + "] error", failure);
}
};
BulkProcessor.Builder builder = BulkProcessor.builder((Client) client, listener)
.setBulkActions(maxActionsPerRequest)
.setConcurrentRequests(maxConcurrentRequests)
.setFlushInterval(flushInterval);
if (maxVolume != null) {
builder.setBulkSize(maxVolume);
}
this.bulkProcessor = builder.build();
this.closed = false;
return this;
}
@Override
public BulkNodeClient init(Settings settings, BulkMetric metric, BulkControl control) throws IOException {
createClient(settings);
this.metric = metric;
this.control = control;
return this;
}
@Override
public ElasticsearchClient client() {
return client;
}
@Override
protected void createClient(Settings settings) throws IOException {
if (client != null) {
logger.warn("client is open, closing...");
client.threadPool().shutdown();
logger.warn("client is closed");
client = null;
}
if (settings != null) {
String version = System.getProperty("os.name")
+ " " + System.getProperty("java.vm.name")
+ " " + System.getProperty("java.vm.vendor")
+ " " + System.getProperty("java.runtime.version")
+ " " + System.getProperty("java.vm.version");
Settings effectiveSettings = Settings.builder().put(settings)
.put("node.client", true)
.put("node.master", false)
.put("node.data", false).build();
logger.info("creating node client on {} with effective settings {}",
version, effectiveSettings.getAsMap());
Collection<Class<? extends Plugin>> plugins = Collections.emptyList();
Node node = new BulkNode(new Environment(effectiveSettings), plugins);
node.start();
this.client = node.client();
}
}
@Override
public BulkMetric getMetric() {
return metric;
}
@Override
public BulkNodeClient index(String index, String type, String id, String source) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
if (metric != null) {
metric.getCurrentIngest().inc(index, type, id);
}
bulkProcessor.add(new IndexRequest(index).type(type).id(id).create(false).source(source));
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of index request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkNodeClient bulkIndex(IndexRequest indexRequest) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
if (metric != null) {
metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id());
}
bulkProcessor.add(indexRequest);
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of index request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkNodeClient delete(String index, String type, String id) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
if (metric != null) {
metric.getCurrentIngest().inc(index, type, id);
}
bulkProcessor.add(new DeleteRequest(index).type(type).id(id));
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of delete failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkNodeClient bulkDelete(DeleteRequest deleteRequest) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
if (metric != null) {
metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
}
bulkProcessor.add(deleteRequest);
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of delete failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkNodeClient update(String index, String type, String id, String source) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
if (metric != null) {
metric.getCurrentIngest().inc(index, type, id);
}
bulkProcessor.add(new UpdateRequest().index(index).type(type).id(id).upsert(source));
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of update request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkNodeClient bulkUpdate(UpdateRequest updateRequest) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
if (metric != null) {
metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id());
}
bulkProcessor.add(updateRequest);
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of update request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkNodeClient flushIngest() {
if (closed) {
throw new ElasticsearchException("client is closed");
}
logger.debug("flushing bulk processor");
bulkProcessor.flush();
return this;
}
@Override
public BulkNodeClient waitForResponses(TimeValue maxWaitTime) throws InterruptedException, ExecutionException {
if (closed) {
throw new ElasticsearchException("client is closed");
}
while (!bulkProcessor.awaitClose(maxWaitTime.getMillis(), TimeUnit.MILLISECONDS)) {
logger.warn("still waiting for responses");
}
return this;
}
@Override
public BulkNodeClient startBulk(String index, long startRefreshIntervalMillis, long stopRefreshItervalMillis)
throws IOException {
if (control == null) {
return this;
}
if (!control.isBulk(index)) {
control.startBulk(index, startRefreshIntervalMillis, stopRefreshItervalMillis);
updateIndexSetting(index, "refresh_interval", startRefreshIntervalMillis + "ms");
}
return this;
}
@Override
public BulkNodeClient stopBulk(String index) throws IOException {
if (control == null) {
return this;
}
if (control.isBulk(index)) {
updateIndexSetting(index, "refresh_interval", control.getStopBulkRefreshIntervals().get(index) + "ms");
control.finishBulk(index);
}
return this;
}
@Override
public synchronized void shutdown() {
try {
if (bulkProcessor != null) {
logger.debug("closing bulk processor...");
bulkProcessor.close();
}
if (control != null && control.indices() != null && !control.indices().isEmpty()) {
logger.debug("stopping bulk mode for indices {}...", control.indices());
for (String index : ImmutableSet.copyOf(control.indices())) {
stopBulk(index);
}
metric.stop();
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
@Override
public BulkNodeClient newIndex(String index) {
return newIndex(index, null, null);
}
@Override
public BulkNodeClient newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException {
resetSettings();
setting(settings);
mapping(type, mappings);
return newIndex(index, settings(), mappings());
}
@Override
public BulkNodeClient newIndex(String index, Settings settings, Map<String, String> mappings) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
if (client == null) {
logger.warn("no client for create index");
return this;
}
if (index == null) {
logger.warn("no index name given to create index");
return this;
}
CreateIndexRequestBuilder createIndexRequestBuilder =
new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index);
if (settings != null) {
logger.info("settings = {}", settings.getAsStructuredMap());
createIndexRequestBuilder.setSettings(settings);
}
if (mappings != null) {
for (String type : mappings.keySet()) {
logger.info("found mapping for {}", type);
createIndexRequestBuilder.addMapping(type, mappings.get(type));
}
}
createIndexRequestBuilder.execute().actionGet();
logger.info("index {} created", index);
return this;
}
@Override
public BulkNodeClient newMapping(String index, String type, Map<String, Object> mapping) {
PutMappingRequestBuilder putMappingRequestBuilder =
new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE)
.setIndices(index)
.setType(type)
.setSource(mapping);
putMappingRequestBuilder.execute().actionGet();
logger.info("mapping created for index {} and type {}", index, type);
return this;
}
@Override
public BulkNodeClient deleteIndex(String index) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
if (client == null) {
logger.warn("no client");
return this;
}
if (index == null) {
logger.warn("no index name given to delete index");
return this;
}
DeleteIndexRequestBuilder deleteIndexRequestBuilder =
new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index);
deleteIndexRequestBuilder.execute().actionGet();
return this;
}
@Override
public boolean hasThrowable() {
return throwable != null;
}
@Override
public Throwable getThrowable() {
return throwable;
}
public Settings getSettings() {
return settings();
}
public Settings.Builder getSettingsBuilder() {
return settingsBuilder();
}
private class BulkNode extends Node {
BulkNode(Environment env, Collection<Class<? extends Plugin>> classpathPlugins) {
super(env, Version.CURRENT, classpathPlugins);
}
}
}

View file

@ -0,0 +1,4 @@
/**
* Classes for Elasticsearch node client extras.
*/
package org.xbib.elasticsearch.extras.client.node;

View file

@ -0,0 +1,4 @@
/**
* Classes for Elasticsearch client extras.
*/
package org.xbib.elasticsearch.extras.client;

View file

@ -0,0 +1,581 @@
package org.xbib.elasticsearch.extras.client.transport;
import com.google.common.collect.ImmutableSet;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.xbib.elasticsearch.extras.client.AbstractClient;
import org.xbib.elasticsearch.extras.client.BulkProcessor;
import org.xbib.elasticsearch.extras.client.BulkMetric;
import org.xbib.elasticsearch.extras.client.BulkControl;
import org.xbib.elasticsearch.extras.client.ClientMethods;
import org.xbib.elasticsearch.extras.client.NetworkUtils;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
* Transport client with addtitional methods using the BulkProcessor.
*/
public class BulkTransportClient extends AbstractClient implements ClientMethods {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportClient.class.getName());
private int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST;
private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS;
private ByteSizeValue maxVolumePerRequest = DEFAULT_MAX_VOLUME_PER_REQUEST;
private TimeValue flushInterval = DEFAULT_FLUSH_INTERVAL;
private BulkProcessor bulkProcessor;
private Throwable throwable;
private boolean closed;
private TransportClient client;
private BulkMetric metric;
private BulkControl control;
private boolean ignoreBulkErrors;
private boolean isShutdown;
public BulkTransportClient() {
}
@Override
public BulkTransportClient init(ElasticsearchClient client, BulkMetric metric, BulkControl control) throws IOException {
return init(findSettings(), metric, control);
}
@Override
public BulkTransportClient init(Settings settings, final BulkMetric metric, final BulkControl control) {
createClient(settings);
this.metric = metric;
this.control = control;
if (metric != null) {
metric.start();
}
resetSettings();
BulkProcessor.Listener listener = new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
long l = -1L;
if (metric != null) {
metric.getCurrentIngest().inc();
l = metric.getCurrentIngest().getCount();
int n = request.numberOfActions();
metric.getSubmitted().inc(n);
metric.getCurrentIngestNumDocs().inc(n);
metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
}
logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
executionId,
request.numberOfActions(),
request.estimatedSizeInBytes(),
l);
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
long l = -1L;
if (metric != null) {
metric.getCurrentIngest().dec();
l = metric.getCurrentIngest().getCount();
metric.getSucceeded().inc(response.getItems().length);
}
int n = 0;
for (BulkItemResponse itemResponse : response.getItems()) {
if (metric != null) {
metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
if (itemResponse.isFailed()) {
n++;
metric.getSucceeded().dec(1);
metric.getFailed().inc(1);
}
}
}
if (metric != null) {
logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] [concurrent requests={}]",
executionId,
metric.getSucceeded().getCount(),
metric.getFailed().getCount(),
response.getTook().millis(),
l);
}
if (n > 0) {
logger.error("bulk [{}] failed with {} failed items, failure message = {}",
executionId, n, response.buildFailureMessage());
} else {
if (metric != null) {
metric.getCurrentIngestNumDocs().dec(response.getItems().length);
}
}
}
@Override
public void afterBulk(long executionId, BulkRequest requst, Throwable failure) {
if (metric != null) {
metric.getCurrentIngest().dec();
}
throwable = failure;
if (!ignoreBulkErrors) {
closed = true;
}
logger.error("bulk [" + executionId + "] error", failure);
}
};
BulkProcessor.Builder builder = BulkProcessor.builder(client, listener)
.setBulkActions(maxActionsPerRequest)
.setConcurrentRequests(maxConcurrentRequests)
.setFlushInterval(flushInterval);
if (maxVolumePerRequest != null) {
builder.setBulkSize(maxVolumePerRequest);
}
this.bulkProcessor = builder.build();
try {
Collection<InetSocketTransportAddress> addrs = findAddresses(settings);
if (!connect(addrs, settings.getAsBoolean("autodiscover", false))) {
throw new NoNodeAvailableException("no cluster nodes available, check settings "
+ settings.getAsMap());
}
} catch (IOException e) {
logger.error(e.getMessage(), e);
}
this.closed = false;
return this;
}
@Override
public ClientMethods newMapping(String index, String type, Map<String, Object> mapping) {
new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE)
.setIndices(index)
.setType(type)
.setSource(mapping)
.execute().actionGet();
logger.info("mapping created for index {} and type {}", index, type);
return this;
}
@Override
protected void createClient(Settings settings) {
if (client != null) {
logger.warn("client is open, closing...");
client.close();
client.threadPool().shutdown();
logger.warn("client is closed");
client = null;
}
if (settings != null) {
String version = System.getProperty("os.name")
+ " " + System.getProperty("java.vm.name")
+ " " + System.getProperty("java.vm.vendor")
+ " " + System.getProperty("java.runtime.version")
+ " " + System.getProperty("java.vm.version");
logger.info("creating transport client on {} with effective settings {}",
version, settings.getAsMap());
this.client = TransportClient.builder()
.settings(settings)
.build();
this.ignoreBulkErrors = settings.getAsBoolean("ignoreBulkErrors", true);
}
}
public boolean isShutdown() {
return isShutdown;
}
@Override
public BulkTransportClient maxActionsPerRequest(int maxActionsPerRequest) {
this.maxActionsPerRequest = maxActionsPerRequest;
return this;
}
@Override
public BulkTransportClient maxConcurrentRequests(int maxConcurrentRequests) {
this.maxConcurrentRequests = maxConcurrentRequests;
return this;
}
@Override
public BulkTransportClient maxVolumePerRequest(ByteSizeValue maxVolumePerRequest) {
this.maxVolumePerRequest = maxVolumePerRequest;
return this;
}
@Override
public BulkTransportClient flushIngestInterval(TimeValue flushInterval) {
this.flushInterval = flushInterval;
return this;
}
@Override
public ElasticsearchClient client() {
return client;
}
@Override
public BulkMetric getMetric() {
return metric;
}
@Override
public ClientMethods newIndex(String index) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
return newIndex(index, null, null);
}
@Override
public ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException {
resetSettings();
setting(settings);
mapping(type, mappings);
return newIndex(index, settings(), mappings());
}
@Override
public ClientMethods newIndex(String index, Settings settings, Map<String, String> mappings) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
if (client == null) {
logger.warn("no client for create index");
return this;
}
if (index == null) {
logger.warn("no index name given to create index");
return this;
}
CreateIndexRequestBuilder createIndexRequestBuilder =
new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index);
if (settings != null) {
logger.info("settings = {}", settings.getAsStructuredMap());
createIndexRequestBuilder.setSettings(settings);
}
if (mappings != null) {
for (String type : mappings.keySet()) {
logger.info("found mapping for {}", type);
createIndexRequestBuilder.addMapping(type, mappings.get(type));
}
}
createIndexRequestBuilder.execute().actionGet();
logger.info("index {} created", index);
return this;
}
@Override
public ClientMethods deleteIndex(String index) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
if (client == null) {
logger.warn("no client for delete index");
return this;
}
if (index == null) {
logger.warn("no index name given to delete index");
return this;
}
new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index).execute().actionGet();
return this;
}
@Override
public ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds)
throws IOException {
if (control == null) {
return this;
}
if (!control.isBulk(index)) {
control.startBulk(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds);
updateIndexSetting(index, "refresh_interval", startRefreshIntervalSeconds + "s");
}
return this;
}
@Override
public ClientMethods stopBulk(String index) throws IOException {
if (control == null) {
return this;
}
if (control.isBulk(index)) {
updateIndexSetting(index, "refresh_interval", control.getStopBulkRefreshIntervals().get(index) + "s");
control.finishBulk(index);
}
return this;
}
@Override
public BulkTransportClient index(String index, String type, String id, String source) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
metric.getCurrentIngest().inc(index, type, id);
bulkProcessor.add(new IndexRequest().index(index).type(type).id(id).create(false).source(source));
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of index request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkTransportClient bulkIndex(IndexRequest indexRequest) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id());
bulkProcessor.add(indexRequest);
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of index request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkTransportClient delete(String index, String type, String id) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
metric.getCurrentIngest().inc(index, type, id);
bulkProcessor.add(new DeleteRequest().index(index).type(type).id(id));
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of delete request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkTransportClient bulkDelete(DeleteRequest deleteRequest) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
bulkProcessor.add(deleteRequest);
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of delete request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkTransportClient update(String index, String type, String id, String source) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
metric.getCurrentIngest().inc(index, type, id);
bulkProcessor.add(new UpdateRequest().index(index).type(type).id(id).upsert(source));
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of update request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public BulkTransportClient bulkUpdate(UpdateRequest updateRequest) {
if (closed) {
throw new ElasticsearchException("client is closed");
}
try {
metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id());
bulkProcessor.add(updateRequest);
} catch (Exception e) {
throwable = e;
closed = true;
logger.error("bulk add of update request failed: " + e.getMessage(), e);
}
return this;
}
@Override
public synchronized BulkTransportClient flushIngest() {
if (closed) {
throw new ElasticsearchException("client is closed");
}
if (client == null) {
logger.warn("no client");
return this;
}
logger.debug("flushing bulk processor");
bulkProcessor.flush();
return this;
}
@Override
public synchronized BulkTransportClient waitForResponses(TimeValue maxWaitTime)
throws InterruptedException, ExecutionException {
if (closed) {
throw new ElasticsearchException("client is closed");
}
if (client == null) {
logger.warn("no client");
return this;
}
bulkProcessor.awaitClose(maxWaitTime.getMillis(), TimeUnit.MILLISECONDS);
return this;
}
@Override
public synchronized void shutdown() {
if (closed) {
shutdownClient();
throw new ElasticsearchException("client is closed");
}
if (client == null) {
logger.warn("no client");
return;
}
try {
if (bulkProcessor != null) {
logger.debug("closing bulk processor...");
bulkProcessor.close();
}
if (control != null && control.indices() != null && !control.indices().isEmpty()) {
logger.debug("stopping bulk mode for indices {}...", control.indices());
for (String index : ImmutableSet.copyOf(control.indices())) {
stopBulk(index);
}
metric.stop();
}
logger.debug("shutting down...");
shutdownClient();
logger.debug("shutting down completed");
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
@Override
public boolean hasThrowable() {
return throwable != null;
}
@Override
public Throwable getThrowable() {
return throwable;
}
private Settings findSettings() {
Settings.Builder settingsBuilder = Settings.settingsBuilder();
settingsBuilder.put("host", "localhost");
try {
String hostname = NetworkUtils.getLocalAddress().getHostName();
logger.debug("the hostname is {}", hostname);
settingsBuilder.put("host", hostname)
.put("port", 9300);
} catch (Exception e) {
logger.warn(e.getMessage(), e);
}
return settingsBuilder.build();
}
private Collection<InetSocketTransportAddress> findAddresses(Settings settings) throws IOException {
String[] hostnames = settings.getAsArray("host", new String[]{"localhost"});
int port = settings.getAsInt("port", 9300);
Collection<InetSocketTransportAddress> addresses = new ArrayList<>();
for (String hostname : hostnames) {
String[] splitHost = hostname.split(":", 2);
if (splitHost.length == 2) {
String host = splitHost[0];
InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null);
try {
port = Integer.parseInt(splitHost[1]);
} catch (Exception e) {
// ignore
}
addresses.add(new InetSocketTransportAddress(inetAddress, port));
}
if (splitHost.length == 1) {
String host = splitHost[0];
InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null);
addresses.add(new InetSocketTransportAddress(inetAddress, port));
}
}
return addresses;
}
private void shutdownClient() {
if (client != null) {
logger.debug("shutdown started");
client.close();
client.threadPool().shutdown();
client = null;
logger.debug("shutdown complete");
}
isShutdown = true;
}
private boolean connect(Collection<InetSocketTransportAddress> addresses, boolean autodiscover) {
logger.info("trying to connect to {}", addresses);
client.addTransportAddresses(addresses);
if (client.connectedNodes() != null) {
List<DiscoveryNode> nodes = client.connectedNodes();
if (!nodes.isEmpty()) {
logger.info("connected to {}", nodes);
if (autodiscover) {
logger.info("trying to auto-discover all cluster nodes...");
ClusterStateRequestBuilder clusterStateRequestBuilder =
new ClusterStateRequestBuilder(client, ClusterStateAction.INSTANCE);
ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet();
DiscoveryNodes discoveryNodes = clusterStateResponse.getState().getNodes();
client.addDiscoveryNodes(discoveryNodes);
logger.info("after auto-discovery connected to {}", client.connectedNodes());
}
return true;
}
return false;
}
return false;
}
}

View file

@ -0,0 +1,156 @@
package org.xbib.elasticsearch.extras.client.transport;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.xbib.elasticsearch.extras.client.BulkControl;
import org.xbib.elasticsearch.extras.client.BulkMetric;
import java.io.IOException;
import java.util.Map;
/**
* Mock client, it does not perform actions on a cluster.
* Useful for testing or dry runs.
*/
public class MockTransportClient extends BulkTransportClient {
public MockTransportClient() {
}
@Override
public ElasticsearchClient client() {
return null;
}
@Override
public MockTransportClient init(ElasticsearchClient client, BulkMetric metric, BulkControl control) {
return this;
}
@Override
public MockTransportClient init(Settings settings, BulkMetric metric, BulkControl control) {
return this;
}
@Override
public MockTransportClient maxActionsPerRequest(int maxActions) {
return this;
}
@Override
public MockTransportClient maxConcurrentRequests(int maxConcurrentRequests) {
return this;
}
@Override
public MockTransportClient maxVolumePerRequest(ByteSizeValue maxVolumePerRequest) {
return this;
}
@Override
public MockTransportClient flushIngestInterval(TimeValue interval) {
return this;
}
@Override
public MockTransportClient index(String index, String type, String id, String source) {
return this;
}
@Override
public MockTransportClient delete(String index, String type, String id) {
return this;
}
@Override
public MockTransportClient update(String index, String type, String id, String source) {
return this;
}
@Override
public MockTransportClient bulkIndex(IndexRequest indexRequest) {
return this;
}
@Override
public MockTransportClient bulkDelete(DeleteRequest deleteRequest) {
return this;
}
@Override
public MockTransportClient bulkUpdate(UpdateRequest updateRequest) {
return this;
}
@Override
public MockTransportClient flushIngest() {
return this;
}
@Override
public MockTransportClient waitForResponses(TimeValue timeValue) throws InterruptedException {
return this;
}
@Override
public MockTransportClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) {
return this;
}
@Override
public MockTransportClient stopBulk(String index) {
return this;
}
@Override
public MockTransportClient deleteIndex(String index) {
return this;
}
@Override
public MockTransportClient newIndex(String index) {
return this;
}
@Override
public MockTransportClient newMapping(String index, String type, Map<String, Object> mapping) {
return this;
}
@Override
public void putMapping(String index) {
}
@Override
public void refreshIndex(String index) {
}
@Override
public void flushIndex(String index) {
}
@Override
public void waitForCluster(String healthColor, TimeValue timeValue) throws IOException {
}
@Override
public int waitForRecovery(String index) throws IOException {
return -1;
}
@Override
public int updateReplicaLevel(String index, int level) throws IOException {
return -1;
}
@Override
public void shutdown() {
// do nothing
}
}

View file

@ -0,0 +1,517 @@
package org.xbib.elasticsearch.extras.client.transport;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
import com.google.common.collect.ImmutableMap;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.GenericAction;
import org.elasticsearch.action.TransportActionNodeProxy;
import org.elasticsearch.action.admin.cluster.node.liveness.LivenessRequest;
import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse;
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.support.AbstractClient;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.client.transport.ClientTransportModule;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterNameModule;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolModule;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.FutureTransportResponseHandler;
import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Stripped-down transport client without node sampling.
* Merged together: original TransportClient, TransportClientNodesServce, TransportClientProxy
* Configurable ping interval setting added
*/
public class TransportClient extends AbstractClient {
private static final String CLIENT_TYPE = "transport";
private final Injector injector;
private final ProxyActionMap proxyActionMap;
private final long pingTimeout;
private final ClusterName clusterName;
private final TransportService transportService;
private final Version minCompatibilityVersion;
private final Headers headers;
private final AtomicInteger tempNodeId = new AtomicInteger();
private final AtomicInteger nodeCounter = new AtomicInteger();
private final Object mutex = new Object();
private volatile List<DiscoveryNode> listedNodes = Collections.emptyList();
private volatile List<DiscoveryNode> nodes = Collections.emptyList();
private volatile List<DiscoveryNode> filteredNodes = Collections.emptyList();
private volatile boolean closed;
private TransportClient(Injector injector) {
super(injector.getInstance(Settings.class), injector.getInstance(ThreadPool.class),
injector.getInstance(Headers.class));
this.injector = injector;
this.clusterName = injector.getInstance(ClusterName.class);
this.transportService = injector.getInstance(TransportService.class);
this.minCompatibilityVersion = injector.getInstance(Version.class).minimumCompatibilityVersion();
this.headers = injector.getInstance(Headers.class);
this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis();
this.proxyActionMap = injector.getInstance(ProxyActionMap.class);
}
public static Builder builder() {
return new Builder();
}
/**
* Returns the current registered transport addresses to use.
*
* @return list of transport addresess
*/
public List<TransportAddress> transportAddresses() {
List<TransportAddress> lstBuilder = new ArrayList<>();
for (DiscoveryNode listedNode : listedNodes) {
lstBuilder.add(listedNode.address());
}
return Collections.unmodifiableList(lstBuilder);
}
/**
* Returns the current connected transport nodes that this client will use.
* The nodes include all the nodes that are currently alive based on the transport
* addresses provided.
*
* @return list of nodes
*/
public List<DiscoveryNode> connectedNodes() {
return this.nodes;
}
/**
* The list of filtered nodes that were not connected to, for example, due to
* mismatch in cluster name.
*
* @return list of nodes
*/
public List<DiscoveryNode> filteredNodes() {
return this.filteredNodes;
}
/**
* Returns the listed nodes in the transport client (ones added to it).
*
* @return list of nodes
*/
public List<DiscoveryNode> listedNodes() {
return this.listedNodes;
}
/**
* Adds a list of transport addresses that will be used to connect to.
* The Node this transport address represents will be used if its possible to connect to it.
* If it is unavailable, it will be automatically connected to once it is up.
* In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}.
*
* @param discoveryNodes nodes
* @return this transport client
*/
public TransportClient addDiscoveryNodes(DiscoveryNodes discoveryNodes) {
Collection<InetSocketTransportAddress> addresses = new ArrayList<>();
for (DiscoveryNode discoveryNode : discoveryNodes) {
addresses.add((InetSocketTransportAddress) discoveryNode.address());
}
addTransportAddresses(addresses);
return this;
}
public TransportClient addTransportAddresses(Collection<InetSocketTransportAddress> transportAddresses) {
synchronized (mutex) {
if (closed) {
throw new IllegalStateException("transport client is closed, can't add addresses");
}
List<TransportAddress> filtered = new ArrayList<>(transportAddresses.size());
for (TransportAddress transportAddress : transportAddresses) {
boolean found = false;
for (DiscoveryNode otherNode : listedNodes) {
if (otherNode.address().equals(transportAddress)) {
found = true;
logger.debug("address [{}] already exists with [{}], ignoring...", transportAddress, otherNode);
break;
}
}
if (!found) {
filtered.add(transportAddress);
}
}
if (filtered.isEmpty()) {
return this;
}
List<DiscoveryNode> discoveryNodeList = new ArrayList<>();
discoveryNodeList.addAll(listedNodes());
for (TransportAddress transportAddress : filtered) {
DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeId.incrementAndGet(), transportAddress,
minCompatibilityVersion);
logger.debug("adding address [{}]", node);
discoveryNodeList.add(node);
}
listedNodes = Collections.unmodifiableList(discoveryNodeList);
connect();
}
return this;
}
/**
* Removes a transport address from the list of transport addresses that are used to connect to.
*
* @param transportAddress transport address to remove
* @return this transport client
*/
public TransportClient removeTransportAddress(TransportAddress transportAddress) {
synchronized (mutex) {
if (closed) {
throw new IllegalStateException("transport client is closed, can't remove an address");
}
List<DiscoveryNode> builder = new ArrayList<>();
for (DiscoveryNode otherNode : listedNodes) {
if (!otherNode.address().equals(transportAddress)) {
builder.add(otherNode);
} else {
logger.debug("removing address [{}]", otherNode);
}
}
listedNodes = Collections.unmodifiableList(builder);
}
return this;
}
@Override
@SuppressWarnings("rawtypes")
public void close() {
synchronized (mutex) {
if (closed) {
return;
}
closed = true;
for (DiscoveryNode node : nodes) {
transportService.disconnectFromNode(node);
}
for (DiscoveryNode listedNode : listedNodes) {
transportService.disconnectFromNode(listedNode);
}
nodes = Collections.emptyList();
}
injector.getInstance(TransportService.class).close();
try {
injector.getInstance(MonitorService.class).close();
} catch (Exception e) {
// ignore, might not be bounded
}
for (Class<? extends LifecycleComponent> plugin : injector.getInstance(PluginsService.class).nodeServices()) {
injector.getInstance(plugin).close();
}
try {
ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS);
} catch (Exception e) {
// ignore
}
injector.getInstance(PageCacheRecycler.class).close();
}
private void connect() {
Set<DiscoveryNode> newNodes = new HashSet<>();
Set<DiscoveryNode> newFilteredNodes = new HashSet<>();
for (DiscoveryNode listedNode : listedNodes) {
if (!transportService.nodeConnected(listedNode)) {
try {
logger.trace("connecting to listed node (light) [{}]", listedNode);
transportService.connectToNodeLight(listedNode);
} catch (Throwable e) {
logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode);
continue;
}
}
try {
LivenessResponse livenessResponse = transportService.submitRequest(listedNode,
TransportLivenessAction.NAME, headers.applyTo(new LivenessRequest()),
TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE)
.withTimeout(pingTimeout).build(),
new FutureTransportResponseHandler<LivenessResponse>() {
@Override
public LivenessResponse newInstance() {
return new LivenessResponse();
}
}).txGet();
if (!clusterName.equals(livenessResponse.getClusterName())) {
logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName);
newFilteredNodes.add(listedNode);
} else if (livenessResponse.getDiscoveryNode() != null) {
DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode();
newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(),
nodeWithInfo.getHostAddress(), listedNode.address(), nodeWithInfo.attributes(),
nodeWithInfo.version()));
} else {
logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node",
listedNode);
newNodes.add(listedNode);
}
} catch (Throwable e) {
logger.info("failed to get node info for {}, disconnecting...", e, listedNode);
transportService.disconnectFromNode(listedNode);
}
}
for (Iterator<DiscoveryNode> it = newNodes.iterator(); it.hasNext(); ) {
DiscoveryNode node = it.next();
if (!transportService.nodeConnected(node)) {
try {
logger.trace("connecting to node [{}]", node);
transportService.connectToNode(node);
} catch (Throwable e) {
it.remove();
logger.debug("failed to connect to discovered node [" + node + "]", e);
}
}
}
this.nodes = Collections.unmodifiableList(new ArrayList<>(newNodes));
this.filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes));
}
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
protected <Request extends ActionRequest, Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
void doExecute(Action<Request, Response, RequestBuilder> action, final Request request,
ActionListener<Response> listener) {
final TransportActionNodeProxy<Request, Response> proxyAction = proxyActionMap.getProxies().get(action);
if (proxyAction == null) {
throw new IllegalStateException("undefined action " + action);
}
NodeListenerCallback<Response> callback = new NodeListenerCallback<Response>() {
@Override
public void doWithNode(DiscoveryNode node, ActionListener<Response> listener) {
proxyAction.execute(node, request, listener);
}
};
List<DiscoveryNode> nodes = this.nodes;
if (nodes.isEmpty()) {
throw new NoNodeAvailableException("none of the configured nodes are available: " + this.listedNodes);
}
int index = nodeCounter.incrementAndGet();
if (index < 0) {
index = 0;
nodeCounter.set(0);
}
RetryListener<Response> retryListener = new RetryListener<>(callback, listener, nodes, index);
DiscoveryNode node = nodes.get((index) % nodes.size());
try {
callback.doWithNode(node, retryListener);
} catch (Throwable t) {
listener.onFailure(t);
}
}
/**
*
* @param <Response>
*/
interface NodeListenerCallback<Response> {
void doWithNode(DiscoveryNode node, ActionListener<Response> listener);
}
/**
*
*/
public static class Builder {
private Settings settings = Settings.EMPTY;
private List<Class<? extends Plugin>> pluginClasses = new ArrayList<>();
public Builder settings(Settings.Builder settings) {
return settings(settings.build());
}
public Builder settings(Settings settings) {
this.settings = settings;
return this;
}
public Builder addPlugin(Class<? extends Plugin> pluginClass) {
pluginClasses.add(pluginClass);
return this;
}
public TransportClient build() {
Settings settings = InternalSettingsPreparer.prepareSettings(this.settings);
settings = settingsBuilder()
.put("transport.ping.schedule", this.settings.get("ping.interval", "30s"))
.put(settings)
.put("network.server", false)
.put("node.client", true)
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
.build();
PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
this.settings = pluginsService.updatedSettings();
Version version = Version.CURRENT;
final ThreadPool threadPool = new ThreadPool(settings);
boolean success = false;
try {
ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version));
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(this.settings));
modules.add(new NetworkModule());
modules.add(new ClusterNameModule(this.settings));
modules.add(new ThreadPoolModule(threadPool));
modules.add(new TransportModule(this.settings));
modules.add(new SearchModule() {
@Override
protected void configure() {
// noop
}
});
modules.add(new ActionModule(true));
modules.add(new ClientTransportModule());
modules.add(new CircuitBreakerModule(this.settings));
pluginsService.processModules(modules);
Injector injector = modules.createInjector();
injector.getInstance(TransportService.class).start();
TransportClient transportClient = new TransportClient(injector);
success = true;
return transportClient;
} finally {
if (!success) {
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
}
}
}
private static class RetryListener<Response> implements ActionListener<Response> {
private final ESLogger logger = ESLoggerFactory.getLogger(RetryListener.class.getName());
private final NodeListenerCallback<Response> callback;
private final ActionListener<Response> listener;
private final List<DiscoveryNode> nodes;
private final int index;
private volatile int n;
RetryListener(NodeListenerCallback<Response> callback, ActionListener<Response> listener,
List<DiscoveryNode> nodes, int index) {
this.callback = callback;
this.listener = listener;
this.nodes = nodes;
this.index = index;
}
@Override
public void onResponse(Response response) {
listener.onResponse(response);
}
@Override
public void onFailure(Throwable e) {
if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) {
int n = ++this.n;
if (n >= nodes.size()) {
listener.onFailure(new NoNodeAvailableException("none of the configured nodes were available: "
+ nodes, e));
} else {
try {
logger.warn("retrying on another node (n={}, nodes={})", n, nodes.size());
callback.doWithNode(nodes.get((index + n) % nodes.size()), this);
} catch (final Throwable t) {
listener.onFailure(t);
}
}
} else {
listener.onFailure(e);
}
}
}
/**
* The {@link ProxyActionMap} must be declared public.
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public static class ProxyActionMap {
private final ImmutableMap<Action, TransportActionNodeProxy> proxies;
@Inject
public ProxyActionMap(Settings settings, TransportService transportService, Map<String, GenericAction> actions) {
MapBuilder<Action, TransportActionNodeProxy> actionsBuilder = new MapBuilder<>();
for (GenericAction action : actions.values()) {
if (action instanceof Action) {
actionsBuilder.put((Action) action, new TransportActionNodeProxy(settings, action, transportService));
}
}
this.proxies = actionsBuilder.immutableMap();
}
public ImmutableMap<Action, TransportActionNodeProxy> getProxies() {
return proxies;
}
}
}

View file

@ -0,0 +1,4 @@
/**
* Classes for Elasticsearch transport client extras.
*/
package org.xbib.elasticsearch.extras.client.transport;

View file

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration status="OFF">
<appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="[%d{ABSOLUTE}][%-5p][%-25c][%t] %m%n"/>
</Console>
</appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console" />
</Root>
</Loggers>
</configuration>