From 7069e31fe00ae669e348533b9c6fdc310f04f025 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=CC=88rg=20Prante?= Date: Tue, 1 Nov 2016 17:28:05 +0100 Subject: [PATCH] initial commit --- .gitignore | 13 + .travis.yml | 12 + LICENSE.txt | 202 ++++++ README.md | 82 +++ build.gradle | 117 ++++ config/checkstyle/checkstyle.xml | 323 ++++++++++ gradle/ext.gradle | 8 + gradle/publish.gradle | 63 ++ gradle/publish.gradle~ | 104 ++++ gradle/sonarqube.gradle | 41 ++ gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 52928 bytes gradle/wrapper/gradle-wrapper.properties | 6 + gradlew | 169 +++++ gradlew.bat | 84 +++ settings.gradle | 1 + .../java/org/elasticsearch/node/MockNode.java | 38 ++ .../org/elasticsearch/node/package-info.java | 4 + .../org/xbib/elasticsearch/AliasTest.java | 92 +++ .../org/xbib/elasticsearch/NodeTestUtils.java | 204 ++++++ .../org/xbib/elasticsearch/SearchTest.java | 66 ++ .../org/xbib/elasticsearch/SimpleTest.java | 59 ++ .../org/xbib/elasticsearch/WildcardTest.java | 70 +++ .../extras/client/NetworkTest.java | 44 ++ .../client/node/BulkNodeClientTest.java | 208 +++++++ .../client/node/BulkNodeClusterBlockTest.java | 49 ++ .../client/node/BulkNodeDuplicateIDTest.java | 60 ++ .../client/node/BulkNodeIndexAliasTest.java | 77 +++ .../client/node/BulkNodeReplicaTest.java | 105 ++++ .../node/BulkNodeUpdateReplicaLevelTest.java | 67 ++ .../extras/client/node/package-info.java | 4 + .../extras/client/package-info.java | 4 + .../transport/BulkTransportClientTest.java | 201 ++++++ .../BulkTransportDuplicateIDTest.java | 61 ++ .../transport/BulkTransportReplicaTest.java | 108 ++++ .../BulkTransportUpdateReplicaLevelTest.java | 69 +++ .../org/xbib/elasticsearch/package-info.java | 4 + .../java/suites/BulkNodeTestSuite.java | 23 + .../java/suites/BulkTransportTestSuite.java | 22 + .../java/suites/ListenerSuite.java | 23 + .../java/suites/MiscTestSuite.java | 21 + .../java/suites/TestListener.java | 44 ++ src/integration-test/resources/log4j2.xml | 13 + .../elasticsearch/extras/client/settings.json | 3 + .../extras/client/AbstractClient.java | 495 +++++++++++++++ .../extras/client/BulkControl.java | 22 + .../extras/client/BulkMetric.java | 31 + .../extras/client/BulkProcessor.java | 473 ++++++++++++++ .../extras/client/ClientBuilder.java | 105 ++++ .../extras/client/ClientMethods.java | 391 ++++++++++++ .../extras/client/IndexAliasAdder.java | 11 + .../extras/client/NetworkUtils.java | 264 ++++++++ .../extras/client/Parameters.java | 28 + .../extras/client/SimpleBulkControl.java | 54 ++ .../extras/client/SimpleBulkMetric.java | 82 +++ .../extras/client/node/BulkNodeClient.java | 502 +++++++++++++++ .../extras/client/node/package-info.java | 4 + .../extras/client/package-info.java | 4 + .../client/transport/BulkTransportClient.java | 581 ++++++++++++++++++ .../client/transport/MockTransportClient.java | 156 +++++ .../client/transport/TransportClient.java | 517 ++++++++++++++++ .../extras/client/transport/package-info.java | 4 + src/test/resources/log4j2.xml | 13 + 62 files changed, 6705 insertions(+) create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 LICENSE.txt create mode 100644 README.md create mode 100644 build.gradle create mode 100644 config/checkstyle/checkstyle.xml create mode 100644 gradle/ext.gradle create mode 100644 gradle/publish.gradle create mode 100644 gradle/publish.gradle~ create mode 100644 gradle/sonarqube.gradle create mode 100644 gradle/wrapper/gradle-wrapper.jar create mode 100644 gradle/wrapper/gradle-wrapper.properties create mode 100755 gradlew create mode 100644 gradlew.bat create mode 100644 settings.gradle create mode 100644 src/integration-test/java/org/elasticsearch/node/MockNode.java create mode 100644 src/integration-test/java/org/elasticsearch/node/package-info.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/AliasTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/NodeTestUtils.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/SearchTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/SimpleTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/WildcardTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/NetworkTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClientTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClusterBlockTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeDuplicateIDTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeIndexAliasTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeReplicaTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeUpdateReplicaLevelTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/node/package-info.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/package-info.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClientTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportDuplicateIDTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportReplicaTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportUpdateReplicaLevelTest.java create mode 100644 src/integration-test/java/org/xbib/elasticsearch/package-info.java create mode 100644 src/integration-test/java/suites/BulkNodeTestSuite.java create mode 100644 src/integration-test/java/suites/BulkTransportTestSuite.java create mode 100644 src/integration-test/java/suites/ListenerSuite.java create mode 100644 src/integration-test/java/suites/MiscTestSuite.java create mode 100644 src/integration-test/java/suites/TestListener.java create mode 100644 src/integration-test/resources/log4j2.xml create mode 100644 src/integration-test/resources/org/xbib/elasticsearch/extras/client/settings.json create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/AbstractClient.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/BulkControl.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/BulkMetric.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/BulkProcessor.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/ClientBuilder.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/ClientMethods.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/IndexAliasAdder.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/NetworkUtils.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/Parameters.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkControl.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkMetric.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClient.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/node/package-info.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/package-info.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClient.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/transport/MockTransportClient.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/transport/TransportClient.java create mode 100644 src/main/java/org/xbib/elasticsearch/extras/client/transport/package-info.java create mode 100644 src/test/resources/log4j2.xml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bf3e9b4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +/data +/work +/logs +/.idea +/target +.DS_Store +*.iml +/.settings +/.classpath +/.project +/.gradle +/build +/plugins \ No newline at end of file diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..a830350 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,12 @@ +language: java +sudo: required +jdk: + - oraclejdk8 +cache: + directories: + - $HOME/.m2 +after_success: + - ./gradlew sonarqube -Dsonar.host.url=https://sonarqube.com -Dsonar.login=$SONAR_TOKEN +env: + global: + secure: n1Ai4q/yMLn/Pg5pA4lTavoJoe7mQYB1PSKnZAqwbgyla94ySzK6iyBCBiNs/foMPisB/x+DHvmUXTsjvquw9Ay48ZITCV3xhcWzD0eZM2TMoG19CpRAEe8L8LNuYiti9k89ijDdUGZ5ifsvQNTGNHksouayAuApC3PrTUejJfR6SYrp1ZsQTbsMlr+4XU3p7QknK5rGgOwATIMP28F+bVnB05WJtlJA3b0SeucCurn3wJ4FGBQXRYmdlT7bQhNE4QgZM1VzcUFD/K0TBxzzq/otb/lNRSifyoekktDmJwQnaT9uQ4R8R6KdQ2Kb38Rvgjur+TKm5i1G8qS2+6LnIxQJG1aw3JvKK6W0wWCgnAVVRrXaCLday9NuY59tuh1mfjQ10UcsMNKcTdcKEMrLow506wSETcXc7L/LEnneWQyJJeV4vhPqR7KJfsBbeqgz3yIfsCn1GZVWFlfegzYCN52YTl0Y0uRD2Z+TnzQu+Bf4DzaWXLge1rz31xkhyeNNspub4h024+XqBjcMm6M9mlMzmmK8t2DIwPy/BlQbFBUyhrxziuR/5/2NEDPyHltvWkRb4AUIa25WJqkV0gTBegbMadZ9DyOo6Ea7aoVFBae2WGR08F1kzABsWrd1S7UJmWxW35iyMEtoAIayXphIK98qO5aCutwZ+3iOQazxbAs= diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..4989a18 --- /dev/null +++ b/README.md @@ -0,0 +1,82 @@ +![Helper](https://github.com/jprante/elasticsearch-helper/raw/master/src/site/resources/helper.jpg) + +# Elasticsearch helper plugin + +This plugin offers some Java helper classes for easier use of Elasticsearch API. + +## Compatibility matrix + +| Elasticsearch | Plugin | Release date | +| ----------------- | -------------- | -------------| +| 2.4.1 | 2.4.1.0 | Oct 4, 2016 | +| 2.4.0 | 2.4.0.0 | Oct 4, 2016 | +| 2.3.5 | 2.3.5.0 | Aug 4, 2016 | +| 2.3.3 | 2.3.3.1 | Jul 6, 2016 | +| 2.3.3 | 2.3.3.0 | May 23, 2016 | +| 2.3.2 | 2.3.2.0 | May 9, 2016 | +| 2.3.1 | 2.3.1.1 | May 9, 2016 | +| 2.3.1 | 2.3.1.0 | Apr 9, 2016 | +| 2.3.0 | 2.3.0.0 | Apr 9, 2016 | +| 2.2.1 | 2.2.1.1 | Mar 30, 2016 | +| 2.2.0 | 2.2.0.5 | Mar 15, 2016 | +| 2.2.0 | 2.2.0.4 | Mar 10, 2016 | +| 2.2.0 | 2.2.0.3 | Feb 16, 2016 | +| 2.2.0 | 2.2.0.2 | Feb 12, 2016 | +| 2.2.0 | 2.2.0.0 | Feb 3, 2016 | +| 2.1.1 | 2.1.1.0 | Dec 21, 2015 | +| 2.1.0 | 2.1.0.0 | Nov 29, 2015 | +| 2.0.0 | 2.0.0.2 | Nov 3, 2015 | +| 2.0.0 | 2.0.0.1 | Oct 29, 2015 | +| 2.0.0 | 2.0.0.0 | Oct 28, 2015 | +| 1.6.0 | 1.6.0.0 | Jul 1, 2015 | +| 1.5.2 | 1.5.2.2 | May 11, 2015 | +| 1.5.2 | 1.5.2.1 | May 3, 2015 | +| 1.5.1 | 1.5.1.0 | Apr 23, 2015 | +| 1.3.1 | 1.3.0.3 | Aug 8, 2014 | +| 1.3.1 | 1.3.0.1 | Aug 4, 2014 | +| 1.3.0 | 1.3.0.0 | Jul 23, 2014 | +| 1.2.2 | 1.2.2.0 | Jul 19, 2014 | +| 1.2.1 | 1.2.1.0 | Jun 4, 2014 | +| 1.2.0 | 1.2.0.1 | May 28, 2014 | +| 1.2.0 | 1.2.0.0 | May 22, 2014 | +| 1.1.0 | 1.1.0.7 | May 11, 2014 | +| 1.0.0.RC2 | 1.0.0.RC2.1 | Feb 3, 2014 | +| 0.90.7 | 0.90.7.1 | Dec 3, 2013 | +| 0.20.6 | 0.20.6.1 | Feb 4, 2014 | +| 0.19.11.2 | 0.19.11.2 | Feb 1, 2013 | + +## Installation 2.x + + ./bin/plugin install http://xbib.org/repository/org/xbib/elasticsearch/plugin/elasticsearch-helper/2.3.3.1/elasticsearch-helper-2.3.3.1-plugin.zip + +## Installation 1.x + + ./bin/plugin -install helper -url http://xbib.org/repository/org/xbib/elasticsearch/plugin/elasticsearch-helper/1.6.0.0/elasticsearch-helper-1.6.0.0.zip + +Do not forget to restart the node after installing. + +## Project docs + +The Maven project site is available at `Github `_ + +## Issues + +All feedback is welcome! If you find issues, please post them at `Github `_ + +# License + +Elasticsearch Helper Plugin (formerly Elasticsearch Support Plugin) + +Copyright (C) 2013 Jörg Prante + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/build.gradle b/build.gradle new file mode 100644 index 0000000..fd72ee1 --- /dev/null +++ b/build.gradle @@ -0,0 +1,117 @@ + +plugins { + id "org.sonarqube" version "2.2" + id "org.ajoberstar.github-pages" version "1.6.0-rc.1" + id "org.xbib.gradle.plugin.jbake" version "1.1.0" +} + +group = 'org.xbib' +version = '2.2.1.0' + +printf "Host: %s\nOS: %s %s %s\nJVM: %s %s %s %s\nGroovy: %s\nGradle: %s\n" + + "Build: group: ${project.group} name: ${project.name} version: ${project.version}\n", + InetAddress.getLocalHost(), + System.getProperty("os.name"), + System.getProperty("os.arch"), + System.getProperty("os.version"), + System.getProperty("java.version"), + System.getProperty("java.vm.version"), + System.getProperty("java.vm.vendor"), + System.getProperty("java.vm.name"), + GroovySystem.getVersion(), + gradle.gradleVersion + +apply plugin: 'java' +apply plugin: 'maven' +apply plugin: 'signing' +apply plugin: 'findbugs' +apply plugin: 'pmd' +apply plugin: 'checkstyle' +apply plugin: "jacoco" +apply plugin: 'org.ajoberstar.github-pages' + +apply from: 'gradle/ext.gradle' + +sourceSets { + integrationTest { + java { + srcDir file('src/integration-test/java') + compileClasspath += main.output + compileClasspath += test.output + } + resources { + srcDir file('src/integration-test/resources') + } + } +} + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 + +configurations { + wagon + integrationTestCompile.extendsFrom testCompile + integrationTestRuntime.extendsFrom testRuntime +} + +dependencies { + compile "org.xbib:metrics:1.0.0" + compile "org.elasticsearch:elasticsearch:2.2.1" + testCompile "net.java.dev.jna:jna:4.1.0" + testCompile "junit:junit:4.12" + testCompile "org.apache.logging.log4j:log4j-core:2.7" + testCompile "org.apache.logging.log4j:log4j-slf4j-impl:2.7" + wagon 'org.apache.maven.wagon:wagon-ssh-external:2.10' +} + +tasks.withType(JavaCompile) { + options.compilerArgs << "-Xlint:all" << "-profile" << "compact3" +} + +task integrationTest(type: Test) { + include '**/MiscTestSuite.class' + include '**/BulkNodeTestSuite.class' + include '**/BulkTransportTestSuite.class' + testClassesDir = sourceSets.integrationTest.output.classesDir + classpath = configurations.integrationTestCompile + classpath += configurations.integrationTestRuntime + classpath += sourceSets.main.output + classpath += sourceSets.test.output + classpath += sourceSets.integrationTest.output + outputs.upToDateWhen { false } + systemProperty 'path.home', projectDir.absolutePath + testLogging.showStandardStreams = true +} + +integrationTest.mustRunAfter test +check.dependsOn integrationTest + +clean { + delete "plugins" + delete "logs" +} + +task javadocJar(type: Jar, dependsOn: classes) { + from javadoc + into "build/tmp" + classifier 'javadoc' +} + +task sourcesJar(type: Jar, dependsOn: classes) { + from sourceSets.main.allSource + into "build/tmp" + classifier 'sources' +} + +artifacts { + archives javadocJar, sourcesJar +} + +if (project.hasProperty('signing.keyId')) { + signing { + sign configurations.archives + } +} + +apply from: 'gradle/publish.gradle' +apply from: 'gradle/sonarqube.gradle' diff --git a/config/checkstyle/checkstyle.xml b/config/checkstyle/checkstyle.xml new file mode 100644 index 0000000..52fe33c --- /dev/null +++ b/config/checkstyle/checkstyle.xml @@ -0,0 +1,323 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gradle/ext.gradle b/gradle/ext.gradle new file mode 100644 index 0000000..1a3a910 --- /dev/null +++ b/gradle/ext.gradle @@ -0,0 +1,8 @@ +ext { + user = 'jprante' + name = 'elasticsearch-extras-client' + description = 'Some extras implemented for using Elasticsearch clients (node and transport)' + scmUrl = 'https://github.com/' + user + '/' + name + scmConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' + scmDeveloperConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' +} diff --git a/gradle/publish.gradle b/gradle/publish.gradle new file mode 100644 index 0000000..6882a43 --- /dev/null +++ b/gradle/publish.gradle @@ -0,0 +1,63 @@ + +task xbibUpload(type: Upload) { + configuration = configurations.archives + uploadDescriptor = true + repositories { + if (project.hasProperty("xbibUsername")) { + mavenDeployer { + configuration = configurations.wagon + repository(url: 'scpexe://xbib.org/repository') { + authentication(userName: xbibUsername, privateKey: xbibPrivateKey) + } + } + } + } +} + +task sonaTypeUpload(type: Upload) { + configuration = configurations.archives + uploadDescriptor = true + repositories { + if (project.hasProperty('ossrhUsername')) { + mavenDeployer { + beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) } + repository(url: 'https://oss.sonatype.org/service/local/staging/deploy/maven2') { + authentication(userName: ossrhUsername, password: ossrhPassword) + } + snapshotRepository(url: 'https://oss.sonatype.org/content/repositories/snapshots') { + authentication(userName: ossrhUsername, password: ossrhPassword) + } + pom.project { + name name + description description + packaging 'jar' + inceptionYear '2012' + url scmUrl + organization { + name 'xbib' + url 'http://xbib.org' + } + developers { + developer { + id user + name 'Jörg Prante' + email 'joergprante@gmail.com' + url 'https://github.com/jprante' + } + } + scm { + url scmUrl + connection scmConnection + developerConnection scmDeveloperConnection + } + licenses { + license { + name 'The Apache License, Version 2.0' + url 'http://www.apache.org/licenses/LICENSE-2.0.txt' + } + } + } + } + } + } +} diff --git a/gradle/publish.gradle~ b/gradle/publish.gradle~ new file mode 100644 index 0000000..e04b20b --- /dev/null +++ b/gradle/publish.gradle~ @@ -0,0 +1,104 @@ + +task xbibUpload(type: Upload) { + configuration = configurations.archives + uploadDescriptor = true + repositories { + if (project.hasProperty("xbibUsername")) { + mavenDeployer { + configuration = configurations.wagon + repository(url: 'scpexe://xbib.org/repository') { + authentication(userName: xbibUsername, privateKey: xbibPrivateKey) + } + } + } + } +} + +task sonaTypeUpload(type: Upload) { + configuration = configurations.archives + uploadDescriptor = true + repositories { + if (project.hasProperty('ossrhUsername')) { + mavenDeployer { + beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) } + repository(url: 'https://oss.sonatype.org/service/local/staging/deploy/maven2') { + authentication(userName: ossrhUsername, password: ossrhPassword) + } + snapshotRepository(url: 'https://oss.sonatype.org/content/repositories/snapshots') { + authentication(userName: ossrhUsername, password: ossrhPassword) + } + pom.project { + name name + description description + packaging 'jar' + inceptionYear '2012' + url scmUrl + organization { + name 'xbib' + url 'http://xbib.org' + } + developers { + developer { + id user + name 'Jörg Prante' + email 'joergprante@gmail.com' + url 'https://github.com/jprante' + } + } + scm { + url scmUrl + connection scmConnection + developerConnection scmDeveloperConnection + } + licenses { + license { + name 'The Apache License, Version 2.0' + url 'http://www.apache.org/licenses/LICENSE-2.0.txt' + } + } + } + } + } + } +} + + +task hbzUpload(type: Upload) { + configuration = configurations.archives + uploadDescriptor = true + repositories { + if (project.hasProperty('hbzUserName')) { + mavenDeployer { + configuration = configurations.wagon + beforeDeployment { MavenDeployment deployment -> + signing.signPom(deployment) + } + repository(url: uri(hbzUrl)) { + authentication(userName: hbzUserName, privateKey: hbzPrivateKey) + } + pom.project { + developers { + developer { + id 'jprante' + name 'Jörg Prante' + email 'joergprante@gmail.com' + url 'https://github.com/jprante' + } + } + scm { + url 'https://github.com/xbib/elasticsearch-webapp-libraryservice' + connection 'scm:git:git://github.com/xbib/elasticsaerch-webapp-libraryservice.git' + developerConnection 'scm:git:git://github.com/xbib/elasticsaerch-webapp-libraryservice.git' + } + inceptionYear '2016' + licenses { + license { + name 'The Apache License, Version 2.0' + url 'http://www.apache.org/licenses/LICENSE-2.0.txt' + } + } + } + } + } + } +} diff --git a/gradle/sonarqube.gradle b/gradle/sonarqube.gradle new file mode 100644 index 0000000..5de408d --- /dev/null +++ b/gradle/sonarqube.gradle @@ -0,0 +1,41 @@ +tasks.withType(FindBugs) { + ignoreFailures = true + reports { + xml.enabled = true + html.enabled = false + } +} +tasks.withType(Pmd) { + ignoreFailures = true + reports { + xml.enabled = true + html.enabled = true + } +} +tasks.withType(Checkstyle) { + ignoreFailures = true + reports { + xml.enabled = true + html.enabled = true + } +} + +jacocoTestReport { + reports { + xml.enabled true + csv.enabled false + xml.destination "${buildDir}/reports/jacoco-xml" + html.destination "${buildDir}/reports/jacoco-html" + } +} + +sonarqube { + properties { + property "sonar.projectName", "${project.group} ${project.name}" + property "sonar.sourceEncoding", "UTF-8" + property "sonar.tests", "src/integration-test/java" + property "sonar.scm.provider", "git" + property "sonar.java.coveragePlugin", "jacoco" + property "sonar.junit.reportsPath", "build/test-results/test/" + } +} diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..6ffa237849ef3607e39c3b334a92a65367962071 GIT binary patch literal 52928 zcmagGb95)swk{ew9kXNGwr$(C^NZ85ZQHifv2EM7)3?vv`<-+5e*3;xW6Y}hW3I6< zCcd@iSEV2g3I+oN1O)|jZN@AK^!Eb!uiM`X`me}}stD3b%8Ai~0xA59VoR-HEdO5x zmA``ee=5of%1MfeDyz`Riap3qPRK~p(#^q3(^5@O&NM19EHdvN-A~evN>0g6QA^SQ z!<>hhq#PD$QMO@_mK+utjrKQVUtrxk-8ljOA06)g+sMHFc4+Tp{x5_2cOBS&>X1WSG!pV|VNad#DJM1TXs(`9L%opN1UGD9Zg1&fIj6|1q{So)3U-a4CWoQ*xDe5m z9`W<5i~8J4RP+_u%df<<9!9wKqF2xU;C4v(Y!-T*OjUIq^ zrN#C6w^bh64qit6YXA;^mssTgXO7Aq&Mv053QqQa7t6)c)cNllz(dg0#lqCi#nRZ& z#op;3i%_g=YmY35=!;GfIx@FkZcv@PzU--T6k$JSfDIiT4$UZAAuGdgYY1vy<8ERf ze_#6;Y0Gj4`C1s&D3DA5jB+zDeaZ7M$-~|Ga&WS812hh>B8m=xh6M+;rrcz!kBLTQ zQ`T6%#zoO?vnKj6^1J1i7u*WNSiW`iNs=miGfCi*Dt^VFDLpvE&ns6(aHeC z3qt$jqc5sVSqlbZ75*bJsob;aDw2{15z$SP{#8W_RMN^WRTA9t1p#8i@dE|&pob=c z>4dH1_G9oyVwbRrJN+fN?`US`1FRZminh>|a=RWyrg0hu1l&)#`tM(Uhjs)>+`Q#R zyL_M$JmrSVd^<}^2Z=lmXzpB8b#R7CX6&K$>&L2@1r+F zgz!9d3IWpYw~%eSRwg3?YyHAJ^SF3F0sVC!egmeXUuvAdRnu8O!fpbO9W`cf>gOAno#99T}(kXhV=q)pdA2M=qnp%m01S6(e)rKH8I>ea*Ki-hqr4*x& zdI`U`<+68^vOuMe#HwA3# z8s`VAKDK^XtT>34)+UF(wn+a!!Q{XE_T*B-x#F+2ZTuCY|7>-V|Bq|^!=^-|`~Er> zT*#lvvtv}GE*QNhqr0w37*IilN4-`iHYx6N7rsnL{NJI-+{su_W2v8S58hk&K zSF4XUH^2Qr|8=Hd<(^wQfBj4GuYb}0=b4KC?|`N1Z0aOoZ)+-JZ*T4D@Q+DHD{ISR z3!;9D#p^CVDOFK4w^(U|X|HKrsV)poRD`QQ5kSkE1Vh)*b((0}e5!YoSXs@F@I8vN z@(w6bj|O&*wNJVCI3G_=-thDLf@t(t1Sn390Sb00b0otkp$zoIbY8;|#p($5+5_T% zx)D7U#gr^$`=z0!;S#mqpWg+k^w-B~?28}g1?6T^+!k_OLLAOlIapaH>MFISon<>a z#u>JvsZATsqH?A%q`f@j4J{Vxf94o^fe%Y~;p-IZp4PQ3J;GyY z>%3S5j62F@xLWzNts|LZMw5TiX2i7EYGpCpYpq$i7awMoXdfH>B3zGTrc6*3VGz{e z^JtI^J46yB=~AkXLW6iRg9rALy}hn40|cU>y&#&Uo#uRqEi_SWnnLL=R01O17i&`< zVJiW#2yhSXIFPQly%O)YX{p^Y3fEP8ci004$zE71gxEJxgy_9Kq{!WNV*q-BY{$6M zHXo@Fi>Vq0n(gogULZ$-oP$uz>k+TTVVbRXMNizXun5Zpv>{B({=_YDKhy*EPXfYp z4&j7E)Hw;}c~Dpk1AXl{iZKIfju=Q1ReXO+9unMs7QE&}c`cJimI0sCQ~K+#0MB57 z_%eu|pup5PF5&)H2+g#WU?LGXBDF9#xLC<)f{^x$eq?kCvS-qT^WK_NN^Ngtbtp9Y zydcQ(Z_dSA#BgUgzfhXze!ELjA*T{mx95Lz$XYnLX9fr$pyt0>l=(lKsVKnM#?{%< z%~Z_N##GSQ*woos*3iz--1MJOKUrG5-<=bH#0oRL^=1%Gr~b!v`u%NysCSn&s(bp|(V<9HJ=ETn z>>!)L{ri-i58563tM8*7{92&ZhzCa-0d>;lLTy@kt5pnfFkWoWgRp#Q-RDYWeefS; zUwH+Ol}B+}KPr#HLQ1I|RB&U5>fz)^42=YYdq9FY)To}5;~a5D46?*R_Ujx9KnA38 zb$`WEaX1`c4l%3VDlG0=Q&@6P*3qmauQ^u{XKuJwnfr;0Kj#VPUI%&1%dC|!r=8#N zPGH%fl})$F&9US5&NN9Y<;}N>6=~mdM}iPhD=?q0yDi@pyU#bFF)o{Nrt}IEWK5c6 zzJn2AwF>mDXB~}p7smsiJ!OEls620WS-zy_6kjV3hVh#yN>iP929^uX(5y1C9;X); z&P!i$CAUh8UKCx{*{tQvOc>QKxJ(M3Az4hqL4jP2%_2<5QuZw0s@>CaC%b2Rk3AF} zlrojrQb$(HHg;WOa}Yl8C0n|zJ#6^Aw`Hc;u=CT*B06!YWZ^U6imwm-2HCDfDpIfGUOBbO1wO(mK_{!Y4>8Z9!1X zQyVEgfAV@Wd+i{tl*R~I)R{zqq&Ra1*yEbsIR|(_w~ue@6^*8wpeI+(kTX}#2rNxH zt(&jLNMuAEH2oO>tJJltAVu9V!=pFfSbt2hn)4v}H!oJJ2?pHAQ*;-(tUh*ONkpk) zS`Fzz+XYrdf7HRC=_!Dg#YJpD6SwvN{uj5Mng)m|o{u+*y(K_Eq@GHedFjV0YU?*;bj@9esA2Fa{2OTuko-eAN_~0mv4x94|1j5 zj~F|^e2SUk)Ghc!5JTXvKr!DY_FMOS9O%v9PQtqQR;EG4(h|rSS1W%ooPg-|bZc1q zuS3ccy$x@q0*@yI3TwMp;G-Szer=F?s1>oAi#%U`D+@Bw2&9NTyzhJh4oYSs4oYED zo}I{#QBh`7(9h^Hh`|;~k#}}k01Cf-xLu3ZQ#*y1Q6AT@ zHo1f2-zgpMk!`%VvLfUT;#?_VvIIyD`hQpj_f*3vFd~w2+k=+$^>bA`wak9n<%t$M zfmokkA9^g{AZF4VR||0E5cENzA}i-5-r8YG4ED;Z(@rb9vR2O+E!Hx<`|V~ZAXT9^ zTR~E**uY)OTA635woEfA8=S8&{-nZ>Wm*zX2TrU7HybySv)Pw6Vu{MD1bnCKqY+N zsRxZEkiz#R?r>9Ekc+rrPQa4l6O#%`aviL$Xbl{ixfxZ-$3NCXM}3zsv+Icx`&ElH z<|FPD6t#p33)_=p=KocMNAQKH|D)kF7L0Bwu{6XhWe-Z)2f=9^!3Bcs=@}ob>iL3H@JilKMZkX~On)W|rozPKGX)_ICfoNr|@dD1wM1e>P5*1Nj2{3kry? z2($8bnV}I>8CBuXB)o-d98!pnVm5VI@02Zx81I7de{GJlRjBHRiG{lw>WXed%Tn1P#gGR(bfHrLSS*7K=Hre|cbm z99yux$h=V>NE!lYZkdYHaD6Gyv0sgOYVlfZ=z1}$MA^Q&PS3VoXnfNoLFxN-#k`1J zv%PNoG#C>AeB7OxM+=P(5Zp^Wmgi?^z$(m{P1b zw?I{TKv(lOUSxz;{CTbr6Yoqg7g<9pZ#CZ|_T5p6QNfqv7oWc26-l;eD+?1|xbpN` zhRwY3RcYSU{KkeygPYUSx1+0NJ@3?>dOyCdjAnO*4;*EN%km}sroLlEjayOJFPV%E z0frad=6K50Pp;w8-xs@>U58}={tg9F3cCwO9eTaWr-#x zdFVk~?k9eHu~wIam=&35zVE1mvQK&*b7$xC4wmkmq?49SFg4}S@32Tpb}Iq9Gwy)P zj!NK&WugSlBzS=K_pL|_yBh)O55woawp3gZ98)1!do_gQIDvCf`VFWOB0-{5ToqhH z9$0%J#Mn4NtmH!xf`p>K45gqF)2K74gerVOf?$ed<2+;$iGY<}0 zljI=88SiG1&@O5Y(pI9EAZ?;TB|!rrg(}uNC(I%X4RPKdlLWSZde_p&F+UHq|1r%m zy_m`{8s+mMUcMtobhtcj((t@)?c;UT+}pe&_x=76%MaWYX76)4R1`pof6j0=;3`9% zcGpK7ZU2^Mpe9G8)S16)3+@ba>|@bigrUeuCs9u^B#W;?BMGQNngEm{QEMdcr)(aU zU|92Q4tFYbkq>4N~*_cZ9#!_myn30TMp4L6e1>bjT|g6)PDSM^5zs@?EhE4pnY%E&BpV}@kw6zV!k zhw>R32R=oSwMIS`ad9Xl%tDoMkW5@DOKU2NUdlK!vNEYeMw(plDsY^s4)W7*-?yy{#8?|q9xWM*xU#HS^kRq|tWH{s?Zow#+_{JZtrWV}*6xH6eZxx@i6cuNEv4!9=TOMd0*@DF;i69S6V5PMoNmd_$s&M#N^0W3 zF&7ylage1Kqr7ny5>}mG^er2Ww|XN~X#j4JB>^G`YZC<{O}X{umdi}<@2AV}wCmA> z)NMe<;edu&T&-;V*;=IOZNf-i^*U7^NwbW~jv-WQI{vsazo)4d6^c3kooP$WVuA5v!&X>0vnC7s7{lI%{$_gwC`zfkW}|Jqq~X z1IG+LlYeTYC;kX*)>7Y0Z5#sV5vi6C4!HQtF5lpdhXlW=&-SUfzY;AFgTG)5JZ3+G zq7F?@V5Y=wtMFp=)c`AdTHuE`AOsJ;Us(9-lQ4-@>{%>S+{t2hr4`b3lNPP_V+_x; zFjQgX$DGJxdCM`57G?o!XiL0L6F=5;{sB-n$Xq;V7CIp4LZt(2c+1!Qhxz&^rwf1o z?c8BIj^{A1FG6$fEa&0Np|1HE?{7{FwU=*BOiwtBw^Q-Ba7af^{&&U@N0xbF1bk8M zBTazgkEz4@WVrV+eSFP}6Z5bpFwPjopX9WL$c+(5t4(|ag$P(rkl%)H;I+HzQOA-x zMooM!$Y_UjhL4Eu>FdHSC~M-Lwp|gaw@j2v4uRC-XQa(dF^-S><~n`WG{N{g&MOSk z42m>zeeQqpw>PkU>9d%g*Mt7Q#!^Z(u`1}A>l&nuvg z^rntb)iMq{$fTiU!-%Sf*fWxam_Q@pLz|I(R3~NDNL%KkM*oTM3&tKA#Qy~SEQ~s7 zfk)P8jLXS!zTwP$pz{0veuv*hluwk{H3La?p#HT{My41@BcdC|Ewq{JKp+@DYY-M& z3gM2m3O%sSJixSh0#|=7d6lMT>-8I}L3d!kwse5ceY@NzQI4&%r6gmd!WfF1BdWc0 zI4FOy8CQ1>*VVx3sIV|bY*VqLrN+5*2$9t`J73`{ryO5pNQGAStUbo?j5b~Y`+iJh zsT+>^hf1!$CTPg8k@ts+tEV^5QOdA(b8sX^PAi2R$ugO7h>-@4Fmuw{S&-5Jin{CI zBUe%%wQcG8k0UfU3pnH(-`ex|`yrEi-bA~M)mI3@8yT0+dxUTySyg4hU(5`|&n zLOkgE&_~eOOasGzKJDMlbqYaRr>VoOY9i|CUtjoSb@+;4y`&Vudr=0|w&!7Vw7}Fu z7bU*4jMuj%G)ji@beeL}_V`E0IA`v=!c!o5WECv4SA_@-df?JD-?)i#<3B?z zd_q>4+Q6Vq=WFTw8=W0ec7-;Iw~2CE#I+Ws0kmDI^KkQ4 zT*33K6h&C`(!pG4-J7@j&wqgb)g+BxEvZlcLP_i&KtN>w*(4PVT`UBholR|x{yWho ztG(&}TtWInC!wWTWlLksZ6IMPgF*;gu{CTfyPrbcf(({KJtQZD-h_S;mfX@Mg*@gZ&}ty zqJ)!bMihRFMI}%Uz+>g8D<)mZYHCnPF%BAx%4rs16x5lz87b_IyNQNmQrQhT;Ak`2 zO!%GL)>H7|4UpfCVe$oIh`u*P%#41nVagpiGkNO`*`n!(?ME__+$y2!BOlRE+@di) zs>b)A53QJfi=pmB?Q1i7|J*@3p%=f~qUa$f*H^pqLE~3&u<2;3!626%X`X71uuh=? z*II6X^C~Fgj@hH&aP{!Daq_fswKTNyeHyp1vvM_bsCIW0OJ}*q$)x_fsSa87UHjv2xA8==^&Ploq2ecjJRS&J9MNqzN=EIY4<9`W4POXQ z8Gv+D9HPc1yA_6Cxorw5WvC>KwA4H7PGS9oa%bUz%vSHf8UhT_9K&l5#EPDfzfxk2 zS-hrKiQPHF_adI9wiWJva@RW4-%+FWE;9sbZl7-+5>xpW?zO&VN9^gnO_>f)2`xKC2jsm+Qbg_723Uyq-Jz$e%`U8F*J{+XTVB33x)-QW9;2v@$=cL0 zpp>ZAv_bc>hz&khMy2)d+`7ZN-(`eUh?)N2(h#{~XI~iSV-k z=pr*j=q~4&d>aK6Co^P=40&!-Z4spnYAZ_QMjM!|*kN9DAtCfmO^gkU^&MOBY%eYi zcVk~jwN#y(9dXts5buwVQ)BvSyK_-co+;Q_61D}JXJXsI3Qv4z8^+!LoYnezD2cn8w9<`xRZ$_yz_YpR?OqtKg)7tx=>jeL=?? zl(;esZbE_&6h_kcJsob+V%nfo{SGiTcO0PGN=TiH><*0OIvD$@-MBJj66qi@Mp)Y5 z)@z+3W2XI)jE9dJawjB8&n5f(*?^1N)sY)uJHpE5qjz$N+<0P#4`hO6{;KMdqi=9^ zI!xRK1b%Z;0WVp@UuT}ZAL|q6l;mX-iGzP|-kpUuy{7^{UYT=F8pAj(fG-3<#4jj% zlX_(s=7{`t_xtz3BI;HerbMy6u=c<>Qa=cPu~3i))K^Y%Vvn1FB*`yQ0x|}y)lYif z@+|w(dDW&B#PQ+~E2x>GkOfu|x)8Vgno<2Z>>pP|ElR?W>RM=_2jUzqp&TlXO(D~f zdmkMm&u7CgXP)y42MQ(_BkD?9d)E^1y4+6=o``#CCKQ1jQeyIiva`ag{dE}YX!tI? zO&e)Myj5tvxq4=F>wt5zX1cf(<+@iOT>@5=qI+m1#FFd*K!Q?GhIbYS)3{DB2SQO; zU3UZuYgU$}$bKOHbKeaM=`9Y5`}RP}z3pN>Jb(jLKI1;2Y!CR5$Hw*^0(|@CHY*D! zJhvkh(#d6&ms!M~x09nAnW$hJ`<)B20^mU28aXKS3CFzjC&c^>PWijWXEWLnM;y}l z+N9y*?2X0;w&#WM8_JV0H1<{m@-2M?q&};-D~yyrBcAIWiN@=6=rh0!1UTd+w6Ee@Jss#K;!zOwa;r2{Jk&Te4kvj;gtePY=$#4Cq4KNoWPT z2gNnW_2x{j)jzzpEri}#E}pPFeNs3zOkGdCYOYLh&q$U7;Htb%9wzftaP`I1a68BU z{96f_W4mSR=iid}B_$9L!GF$`l6KB6hBh|;IBm(+g!M)}YUU^JTw9|VphoN;w-zDt z2xZ4cgqMt4MU1;;AUHR1Pl&oCzMf7Hsn&{=TIx~Io>QxeHKJ8jl$@nlweo3s&TnpR zUQ)BRzqsn|etF*B_@H|6Gjn6lG(p^_@BK16_R2c>lXc^*ulMz_ARcZ(=!clcH=R06 z9!(UjpAi7U0&F=vR*Id+gjahDhH#fT15WW9#ndK&B@t9-RJkY}dzUB&J&(IhBjXGP z5|ky`eDpINX6F9k5@^Oyc5eaH1$zem7K=yTQ>utldG8H4W8eT(XWSIH;=t*xDy~E+ zqry>ViWP?b_CY8ZV=QV2IAcb-$gey%be>d(D*Zp>Yg6qoj&K6sk9fw~RuQruex zsy@2&-6oltnpXh_z}l<65(RIV%(nnlpIiZ3?M0$(Bju^>ZS<$UzA3%6$z)IkKLIs6 zcJ}83*`B!Zhkn_-whJF#(d^Q(iB?X0bt&f{A(2%$&xNm69N&C1y& z4JU+C2D?*kqU717|2)ytoF$LY!`iKUwR-E)+UBF>YuDPdP78pKwmxTLx9@1mFLzxS zE?LTCXRWqxcM;wyX_g6|O1V-Aa%i}DmP zFHy|Aq;k1cPgZ-5*Bn$eJn9 zL2tr-6!tSn%UL~o6ov^Pih@}(plD)+%H*v#*f4h<{VV~uiL%p7QF&#)MTQVAhpNgZ z{{ILMG5Ny&l{l_qUwEB9`i%ccl&t>s^^){U$E7s1cbqr#2|xDs$%JxYBp`nQp2_W z7Mc%9!Ve1TAL%j>VH0VL8Xj61*5w5<@30g6Du4$hnM@ngNf}g3Y?8+)q4U zpRII?S+Pl7&a4e8&CZE!DCGW;2m!>B2B?!j~5nTG;A)cdBXGj|h;9~$P zBqkd`EVE~4bew6>6_VfJFI1I<&AqtF3UzssV?wi**rrT&qd|r^^8!i>R&L{tWGT!- zU512`NXZg%_{w^WyoSMupHU)s2d*D2iJRm(bf#O4z3^t;D=1n6AP^bYpI=<53(*b@_8ib0{W*m>~~OYrER<;J>;H zC?Qh9RoJ6)xk-0P0-EnAPNUrU&JX6w@Pw1fHIU@1COns9>{yLgOY)jan{0lfMv-f~ zXlf#|5rq^j1Vt7zC`y)GUV+~4PvD;i7(s*1<}_@v-VO*+@S}Ck2kkrS7*nnwy@^dq zJ(~I$ZeU4U$0cH$iX@=o#7_Yub6^o&IKKxsPtE^iGci^Os-e_u#ra@zEkkn~s zN_>qFca|4+F{0SsN=*z*tB}@pPV7|qv1~va8%-uJLW41D8#%3AX8^3+c=01;?Rq#X z;@%TOqER73RJaNqhWcfSX<*#-V`7;DODHmEcDx>Cy!sbY6MAmevZS+g$TD)iDl59y zMhH56@(@N&|9<`PQrGs7uuckeDw?sIyIqrs1TB7JOGDd|+{usqrN=I-^pg39#{j9` ze0NnYnJ=oZEZ_Wj0^Y)T*GH`6ntW?j&mcV2GqE1Ls1X&1@q(F(rc679Gtc*`{!Z1N zU-l|*WZQ+eCx-`S$@Y8Ns^2_25m#@6Qd6K+lBYg`NA&lpd7?E5P^ZhyuBv6qsNW6a zUT*Pn{8#+5kCn2Yq`nlGiDoW7_OOJ^MP^Z7VBReQ_y7Jz$>8{gUg zlHr!45Xo>yrrFneOq9&KANaIQz9vjNCGZnJ5;s7*ZlI=RHYwGm-Og_xo6R2(+*^<~ zBCTipz33{ZaTiqIYxx1;w&Rs zf^$+-4`0T!B*$s8n!qqLB%}U@%96UO< z+&c_k{S`Vn7j*>x*!OZ-IK7cBe)faJr-Da-U-=D+zxaOPo((E@E;Hazpc6|PGXM7p zmdz}Ag=0t`owDyqXh(tLB4N&vbZW&X%?%QjxGLZ94CSY8axb|74IZwwWrS9xA+TY( zliUSf#Ys~Ppg=A0)JtysR=BHj=`6p;P+sA=5Tgm-9fC{-qnAZatyk6Tr4AWD|p=7LG!w6YUd@voC z4Z`a&jdU)pAE!^B%ZQDg5Q*Z>#1-x+dz}Ap1?@Y2oc}nt>{Hnn)R$~ic@Q=`5tLk@g-huk3-0*ziI7_l0JpO|{7&o&xm+@EKyRE04)HE(Ke;2i# zhkP;roH)!MwWFQ8C#v2VccRuDfVRL}=2?84_*ARKCFD_*x$Vo*E8I~-QhBkg?0uGB zggO`nj^j9voS%~A+9-Fx88O)rPGf0G2VPZkqUEs5nZ~sU-(0lN9#s9+fUsJ3z7dPR z$J#{jR&LupmR+;5+H^O`!`az^O!mdvpcvb8zXA*vzuD5FFh}^T;R??5F$}o5_oObw z^rXVhGCg|0H>~dTu_<{wT>5@OPh89`6f9Y5EuD3MSow9Go#b2K56F@p2Q4sr%XFOt z8N8hIORoV(HSeogA4x^aMCxsCj(Qg@T{j#k`uav32J@ul*qq#MqjH}58oPxrH6G%T zpglCcB&NjZIXhT!5&h8Ylq=^+it`Qe&JopIBKEPv5++MMXcW)g2F2mgjtz62U#M9) zf}{g-a!b>O>*3!W&n7$x4RGP5dzmKqRG_bOfwV*~Fb1GSSX|R`j%Oj99Y@qe>e6Dh zmR_rO96gtd>ICY{AQm6Tg*J=X6CBKIag?hB0eB(IGaPTp7#cDGa!;N#c*2wzj!7A6 z=T6WDC(?O*hT2Rkal-ESiQ=x~!$q0sEqWg&&QiByoAqGw0Hv1oWwKN^-lv&-QkhQp<)a3IGwcJD<}wJc3*JIA3TZp@))v+MNu#@12NN?J!+lJ#Q!v7Geb+8Yd^aeb?0iW?;~W7>8L6n zYwoLyEJN8dt@ue>?oaHtXYx25v#ian;WvTNCfFA{-T2SM#r|aPnJTXMm*K){6c^nzuC#&O@ zK!@7=$QAa&ew&>hl8P>w$BHw=n<_!>%9dm|F51kOY@MwZdCnXie9n|eHt!AB!F%-0 z>G8*lKZXLA=yO&T<(Jh7HB*v)OJo72Pqgk9wC6`#KAtd^sz&%a2<#EeSXaY)1b?8W zu7D^j%Prv*ABv5RYlP!U33MVgsV=}$fgi(Ib*gb~GdT?Zr z(b66QnzFc~d63HynR&H-i0tzCuryO;=*=~wdqDTN;rvb=H|QyalA7jn_zWEP&CrFV zJ!yiEQ>z4|yhVNr?#z8y^qFwsJ)*r~=sO3=)zU(tKQ8FpbhFTv$!N{Wo7=!s&9mBH z*zx0Ye0wUKl_n52R^^XbvtUAr)8!RE!gs`e7Pu@|vCn=nAQM4U;^jdS4vDOb z?i7JCSQC<-5mwy>hz0eCA`Dbj{(jm3JnXec{Fry&Eztqx7m6(bet;7Lv!Xq!v-Xu< zQg$I5U|)c=^wl;jf6=6}eo$$_Bh2gE#}uQEmwGM@trYv=l~ZueBLzB|2%1MdOGe;~ zoF@y+*IAb1>FmFqu+%gJ0kL5p+ehS~(Vp^S?jY|4Y|Ip;)GJ3szD6#zoWFXZvCiMY zw#qa9e1aOXtYlf6v`pUtgBF4S-HukLXthASsl{WizO9+Ix1xCp<1vD&`7L$D0YOoIouAU71#2I$_t#rSt9Uor%NFP~XXbXnK9S|ekn z88NTv362NK=gFGA{Dxyf!TNd!ubYt6=rUUM6>0QcpnLHx*b< z;B>G?IuJeh`b}$~3zh2c4)Jui+SJ2zzhIA{RMC7iR7dA z3ftAotyhko!#rg#C>{9eAH*;&b9@g3cqK<|(Y*>_E_d8kxOorT9`o{=Ddjfo8tSUh zgWcYcWne3p`wi?v_O|QK&QmxN4%4{heht}RjK_u2!8yRAvNL}*w}3T7d9iKWa_iV9 zJgCbakZqF8Tm*Qg6&mBtaf{ZUNEI9vm{tx2gm>)^%L}zbG)X9oW}H0B9~-s++>h3gmyS|y?|8V*>0%2Pei9Mo?L++j=$ZY(F(T^E_Prew?bu4e7%N^M0{xX zeOZ^Ai2J<|PgV$f>;+|R#0Po8C;(8;zL-sT10N`LkA|P{ATc9AgYH0GI|tI<8}JU# z1lYau^!?{6h94f`dJrSLERlM^s9(D&dFp4Zfc6x(d4$u@+Xj>5l{4{EZkyh()sW{< z%^5$avWSz?*`JTfKi5T1JqAMDa&*j!*{5w^kymro0B1-YmyYuv<=yQC42$x6U62$z zUbD^&KRtulVhexIFlInB$wFF*1X}(GX0cBuo7HyHI2sG~$e0O?Q>tt4>W>Mv;)!z2 zQEjBEhtATo8>ntbwmqf76 zJ&wzy_rfJ^iS%qO>CtBY$SH)-jmshSve0O*NQtjtwm8>F;Ou80D&%12Mo2XLEE;H% z05IaClsePtPXg)P94>(a+}+ZE@p{k^dyJCKq6Pb2@UM{{f`zy)oCbogn$OJsIslX$ z_C6{0=vAVf{?iQ>Xiv!eH=a`sXxf?lmpH~9$l{&AWqb;`KR{A`0(M`-7Cu2-7x;o_ z#43n>(`5M`t5!ROJN+%(byMFzDoKZKrq&(led4w`o&zd=+aaG9A_y*lVmT0P${rGX{95rdF#hp3jo8Z{m)K^_&W9G-%&49b}E$8&%`81C^8~)97 zr_&~M6@zn5CgW=MGjx=o2My1|Y9yDyy&c|GpJ*AZLsi4c+@F8re&2X%rQZDIeJuUu zef(cIO+xneF3v80iA}PGmUhZ6hE6X3L;BEkUr$z zfN6sU%Mvh#V$DY#>Tv^WNE&A%*}~{}LAYH{?McDNOi}iHU-z5i7vffLK(=?t$Z}3y z>rLk-{`2+uVh*I&C(k4V&>l9Nl-7uI5F93;8`l^l#Y&CepGbhiPchZ$Q|;+O8H6b< z3Tz|W>jA&HkDMBdA`MJZ88IGm;SEMzi_U3QTM4QV^*~=PGTd) z4Ao3G!i)@^C3}kNu1xJ0&to$ZIzcBLeTHa6aiyZBKui9vO6 zEps4de}Vsx=pRn&G=E#|S_E&P(M{7*a0DoVGZUbE=*4jXOYw^L>24;Ob>hr^`*W_^$B~+`fs(Bbhnip zptMp@rv%xfdPm-zqFs9GQT}XEGvQ7~Wyocj@T4I zVdim?D`c{`FtsCO;#0$prGCZZzn%3(W%%azipel%uj>f=Z1M03))SCtQQLuxY>Xz= zsgf+wB2gycoK*q4L3+eU%igA2t6E)k%n3GwFV#)=QEg5W93VV~hn!h}ZF&tVqke6M zk1St|^=dSY{CR)<1#hQIh0rs2X_DE_fa(+8DVIIn+{b`!n=M5PWIfxZ>DUrItraPE zX^LX{ClbIi#d^@=5xaeR=E-hH%+gjKZ&XcE;hypm>OWV)xd2g>i0E08HA8O7vdm;d z0@7P`551)kl1;h2AVZv8V5KY^H}uPmvPV+jp+r5q=hT#x}Zk-u#WhC-*HE z0VLO>39nc?!0ngY9}%>EV=fnisAYfQ%~<0msv4kSq~dHmCNhZ#YJPIg@aNA#6*Sxl z`MrrSKY_{D66#xZL<#<1DuQ(p(^z-Vhjki#IdzyyRIA(v2p___BwN{cPx<7!f;Tb* zIEZ1$*p5QTzj396vut9C!Vey`#Yn8lPyeV2P1)uQT9dd|q`;=W zaNTVaL`Wnpm<0C_G$*Nz8T&s^$TOy;!(CMx`GM~hmzz8HDDV3NPR*la;KRrP5LB2j zxMjP$#9;m3`uGw3^nbn|P(zvW4e1f&8I(j1XR`a@60~#tp@1{UBu37>BM2PJoDbOo z@UjRQd|i-UWD8vLQdxHTH9=P8lljHT<3n2$6DH({Ub#7LUM2sX4N5*rA36*L1Qh$X zEJ5*~OA`NNgNg!7ja~oy%d=$la4(d<1^jAS&HDz-I7S0wWMGIO660%!;6=8Qwx@h8 zw#Aa@#+2n}WKC){>fe_0K}}P0olTa)p1Do38)@h?*zEb_O=mtkEBy1d%=Q?Tr1VL? z-=Eu=><_-qUFjZ`E8h?Il|XL0JHR~Hzl{aEbj9HD5O$%le6vys_awJH#DQ+$_H}`I zNDnM|hzs#%#x(*SfV;GZnX+oh$6ju4_3-{u#&>20Ak#kj2>1pcz_;HRYWa`{oq7C{ zLSw=2cxEutzd&mF@CL~N-y{gRF#8KUej%YV5V!3Fd@~Td`ye%Ihfcok8tp@QczFXN;74+SghPj@l7(k+^!1!^LE6Ut&3HzI#Z}D(6nek zy4hoR7(%4XbDZ9jLpsWRGI$p_JMHE-0H-4T{`30nL4Xs$p{x{pZm2vU(YYjkK}LGz z9$9Uz1cQcmA)ewY@eSUzywEe`(!P$=wJ^{!p-dWUP0~lIf0=I}>pYEV$wlmf!@7c# zYsIlYr!!clPla}CbUuGra)5-lv?<9|gx!*5QWLt9IC)xA&G<295S| zsd@9S{>dP-AC)LJ)@3si<>?0%<;+($4LdP<5tM6tKx7p-)!aqqt`~Jdw5n*Z}m(DkE#5)pu9U*+VoqOx>!#PlS(b=1Zn zt+X;I?M6#8ln>1dLw_&{wGxsxZA<0{C8OZIwo(GgE1z1~)Ef`Oy>$ikpXh+9_8pHJ z9kh6^RdO_Qa_2FP^Tj&5M2(9D^elZQ=G_Hqpjg}baj3-fkMX-aqsP7+*@GOG8?C%-(v z2Oz$|nSIBi#A5j*1uxlP(wm@A*-LLVW79H*qr%SEVis#rqZ(^_&31QoTVZ@hDt7`C z>@_3-^f~b7F=2OWVZK9pA#V}D z{|xq$xctC0jKA1x>|n+9)yQnV;muu8dM~Oe9`IaV$kfIXqO4!GRd!;tE_u65v;w&=G$}84X0mv&GAQ-Z=zrN& zg{kmq(ky8_bvOtq`iSwUvL1H_RT6SMp--%LT347zQ_59++G;XoV=p+jaB<#{*~GwK zJJ=9;p44#h*kYT%p@@p)=NKfETSou#lx;SNMjqrV1y&@|K`Y zB#3U=H6yW5xfBK@`u`iuWXxQ7^)v>d~^s#x_ zCw8}LUfQ+?r2y}Jscn2ReEC)~0p9T$HH9K@$TlZ)UD`yC>*aZ-Okc4%SYg~TmSW3W=D zaZP6=@0#(LI1kK{Fz%F5<(`zHI`(P0U*?%LT|ah#Ugw^q(_PDto;$+#zT*dd4>WV- z!p)jA2X8^tM|NLaUepq71q7xhKW9t8N*G~)GF5#Dn~8{SR!wbs&V^)oUg45UMFw@0 zqDlE)`m$M`p3TyDC&=yhLuF-Cm<}>9CJ>oLhU%o^#-v==6x~k8`6KqLNhQ+i# zlZw^ajL)-c9m~3)ir=Awb`wgOf>o6VW-_?-T&@%loOh5ki%c{YCGiV!%g#3PI$L1?Mux&!~(HFr?gsh;*%_}2sH3%UY6>_YGC#PN4FM0AH3@7&@9 z-yowq3XfaNWd-z>edvAuvj}7Dv6PksTH6@fF0Q~vq8QqsSO2hO8%j@Tt3aA1s*b5I z-_nh!_cnli^JWDH&sBtlwWNDxp*E41=C|!1@PDN;wuT#A6#V^R0~O|9`OjGBA72pv z6A}H_PV%2b4Gl+|VNvhE8m2~Ejc9MESnnb$5i-Sgcquf3g(G}57Wwfwth+b=y|J~tmd zBZ+@JUKxQh2hq{E9a%_CT@S zPo|F(E%so5iZ39x;uVvxW*-G0)JKlyEj7f(Q@+3O8ik*m%#!2}~e)af}AMugdxBKJqvubZo<^DAiT9?V=%kQm1A3pstw&hP4-Bml>K3;05 zzH)<)H(+gV@`EwghUomga_Dc;gCIV1@(mR=OrG% zhYh(9!h`DtVWmw%dipx=v3@m^xOt7?)n|Ct@YjQFz%3DGD5}K)sjL zS;%U;$EBl##S&h`C!lkr&-V94nL5X}@ambJw>|;8OE47)Y%1ogNUSQV zJuDAxJ$u!bsR6e&haS?!=)!`!I6e{s;o&Np^hrLo@kwV)*#ywLnsm%rZO*)hbrX(9 zV~O4%>v&LB?TFtjk$b8LEjuO%!*%J{`y-Dih?_NYh?!NrRjQYt^$qS5L;YyCTUH~*kWrT-rr+hxh}h(er1Hzus070F z;Kf|TXWB)xdAN5w>r%QTz+QEsaNw05vmK-fe?+aJxQNEa$m@l6lT8)6&aW&v1D|C% z&nlfNBK|^9p;>nX_U3RZ~-}6XjEskzg2&FD|pK{gM?0kmN}SXTJm_01}mhxRCfmkdqe-_ zT-SE<%BqSSxqVtT9dG(aT%;qtS#BM%V@jzmy1O|>-og#L{(w3^x3aiQ28qE?k9W8c{%YV}w6N?IXQ){PT2$TNt7d+02_5Hqtp@7w zQyA71Gw3|=%9$>@Y55tfsLtFA=|1mL^__BOo9cS^COg(0nmY_`&SAxzI#5a!yVX|N=<45NuUj08N#1=k z$+fPx=U|)Tim38K&)sy|qO~lx)e5hZ60Mrw^}-F;;bafqhk71!2&U(ofo+}2jNhKS z%c@I`Q&My3o|GMtb-b`h%ZAzDm5K+_Brx>GG~CRq8z6_`k0hS^8oCn33+G|9e2pe* zxABK=8u1H$VI0SE(>4};5Tdvp?sM(C1&J{ry3e>lIMhK^d54^X*AHdd{XvI6{G;o1 z3|R1~YmPOT7@_fL{f3-TM=6-ERO3bLl782a%=;T6p+o2_ZZfdu(Se`kswzXs613Pa@Cm*oy|YJE zNmk+vsLZMJL;}@bo}K({Zce+Y5`)d62Z3FOj#G;xvm#fb6CiUxiGdZE-Dlj2ux5EyG165HB=YpMA{J@2yG%D-xHrX zX1|V+pc_1}gC33x6x67~Il|HJh&A##{XEBZV8uV)X(eq5p&h_|XD ziV}J54dl{{@9(51TiBQjsbJ~$;+*JUKtBm$yHSbt0%{1lO+ISvujp0LvRkrq*v^H? z3dMR$r;qH(@#?9}bLysd(E3)g@%+WXiaPWuJ+Mz}cov90fHu=lndkF%$CX8+)tjWt z`i_|D1dpq-z9aZ%qgvJgDdtwIt9Q!Z>3z++LonL*(M;)Jv;eDLm{Md6c12eT=UBQc zGyGu{yJ_BQ&Any%yvarP?@M>zbC;d8s3kkMHgFl)oVT#{SBDlE9ZE{3(5qb>2ux6lE;^ znFg44H{f%<#!U(ZWcr%>E-fkiuw zz@i=E{~YQ5(}?`vxUDQTOIMsl^vz{#jc^_bpnR?n0?t7AZAB6uhE!JYE4QBjBa!Uh zkc`&Q9AOM|wt^T5MIKUaXCKK7Xi=&w0kWACj%FoCAwrBxRrR9JxtI@xZ>}*xl+k$o z9{C?lzQ--*AIio4VoY0#ttne^ko*y$A8$sO?xkLFN`ufa*ygX8~LVj!k1jMaa#0R8=Qi~O*hYb ztd`rF-EyTsbys$r4PDiUJd%pUy3HK$NYxl7Ge!LZmRw?|VSS)sNVe^e1(warKFauh z|I<4lvr%%QzWOdViQYYUh04iTn?7gCQ*?@LUW~Uuo}uereCA;}!@(z?apJmlw!{3+ z&RmfQAG`s9A_u?tXTouys_zE1i;et=8HVu;)l0B-#3tK#-5VZk3`Gi)c50wW!I~z$ zheCvcE1vSq%O-9^HgrF$A}W zC|wA~(;-GJ8T~P#o13d8eXeja7#dK`S8J18;eQ>>{+=3ybhaN^v>LsW9+rD#Rst>5 zSkZ4eFrL)A@nHSMYcPa*=~CaUWu*U@`q4V>xKqOA6O8F>)vJn!Q>!3W6RKrb5iPS) zxzDyyU4TSCtL9CKM{n5DmlPGas#1TRd3yT9sXKcE*E#0#r&Y{Jdb|~>=F|<(_QXwT zKr7!9=ZZ4W6E^tx_ft`+Z_p2B%_;+@c3GZxa(`Fn&Jd-)SR*KJs>4^;o_GX%NL(MG z^C&>5h}@GSIKl5!6n^9LV|@m_wPJc%aUAl6Khg7>fC*7M)nN(_%-$bPZ|KKqZD3PE zs(FOXq&-~QMY|J3IHFb+OrxaMfp-FCSClODyp z8DIz5eaXd8BJ;G}X0}1`^}{L0GQKrLwV->#9KchLNzt__0+YSY(2Vld@Gq`z4dqhzpWxKgSU^EYQI+zlAv-tIsw`b+;{F52s;~WUxl?n6h6N zPHNx6tQi6B+AJnv(k7k>amd8)&X@gJx)hmg_wSbq9UZk>CLpNI`bWZ6Q%Z>CU%0xHuwM&M>jZd@e+F+d zxZmV;8{`GoRh<>}Ch)B;zA=T-FFANd;tUeqVd%2ui-KUh+` zJ0xw{B_L1{#u`$S8YNqRBgrgF!14^9m|(UZf}2u{lOWOysz~>AJJdFx;Fe=gM(~@5 z?F~vX*@D(FGH>Dw7OgQ2cgRW}vl*PI2Vi{v8|9d~dvFZ8g^+P&`cV2GmsMRWSk4u`nAjyAFQhe+58vjP+66U|`PFDr;`T^L^?|4A^Jmr*yUXsk z`w9+7WvFc=OZ${|yZ{T($I|_g(u9=)-QHM_ES|5$=leCeU(Yusii0^Ec`ps#RwDikO%Z*(X4 zJVI$7=qjLR2P+wGczM7Ohg5(ZZOG%7t`H1}7rkR3s&q%~txJPEImU5gC;h7Vm!eb) zLLe#P3kZlR7zhZ*f8O8zM^P%Nw5_4Om9RO`AYg9b{Qqz#Ns3xhI6y4il*#6zQ-ys~ z^O{zpd#L5_wLL8<0aS3J#vlv=FG}fnBH8v;ganz0Psv{S>pcD*0u>(S;JH#{uaz{% zS31X)@n4v}Af1C1oD+Ig&`5GJ_Y=6&-ktXfJ{#rV zWUg-wSxe86*|5_t2Y^tZut;C?*()hLUzF#YEj>cdNnwj2cLL?|+nB(vv-=x~*-@*z z*qx>NW>Oj!Womu|Pnoh`Fp#KyqD!cwc{5`N`}vlUs2J8YQx8oRh>l4`fjfjUMb%{f zMeatfs6JXNanGM*9odS`cSo1uJi}V70E!czbY7 zz@Ae7)Cst``o^3OX-WvMh4Hg@)F*v~RcRQzWnrtl%h|o*SNd+oV+XWnelIhkSwpl% zMS9LWKIg`5^`aNkB9Xtxk-hf_6udV9e~kQ%h(QyQrZBN!ynsYQBoOw^*u;0%%Y z^9{8f^nYr4@rIH(0O0wi6cPvs(SIHZ{}X>0q!#9jW(x2zY3N|)hUdgURi}(CMzFdh zhK+ArAdPidXX&MZ(UG^W=U%1RoUk%Afl;>ZD*t2Cgs)Pli>?)u+-yZTv!|lWqgkb@ z^@jO|xp17Zd5)qwLH`{6_`0?4nRD!UJf7s6;|tN_@}^{L7*q?!IlDoRt!2DVX{T~v zFFlkG3o)#c*#kz+7l;&bL}D-U2 zlB0Cv?jIS;4d_$Y5T&pDA zO&ghs8m|PKt$d9Kv8{5=3$d+s4F(}M`ji(w{}dNlV$4IbKa5H!5Rq1_A7kP?%!mtv zck?yclIFanDpS7$(7$`~6t>&RZJTOCUe=LJn`i&IaDb=ux_3iT;3_M(K`Rh0q0VcO z7G05X38WU{AP`H!REQQ24?W1>g$*NK6qso>1nMnGmLO=Za@ee%{%ou&sUPaeuR1=l z*W1v-jh`8CSLK*cKML&D6Nio=Sd2LZ)7X?o8qnc3EN-LKZn7S##dC48w;Y;i^(2iH zuG!z$bX=BfHz9ozt3umkk1~}uB>u%dC~Lm5+_A-*0Z7zSxPkzC@xv+p$$uO<{J6!ypC4BRb8d+v>n(2c86X_7vAN z51}8q&isx8Zp+JVWaH8#RQ`vEKZ1W(8Na7$RN<3zjC!fMMx7o~l2t~zEEdE``ihM8emuAADvvhqUU^!p{)K;jtLc^hJt&Bv#!TC11r5CCC%9|Ayc!4csf zOqMy%5CsFEH|L0fJ2-Zb7329HrOe7UP?#>bDAm_1GTTYBkB*RX?TA2ieNxULmJ{M1 z`NQmq%%^B~*-d507~xm1t?`>|Kl+<)KY=gz!H)(WzwVjtI4SPu{?^bdE~rYfx#TvQ^qCD9x;$)zqdkvMUX}yJOtK z(ygEED1PEBjex7Dh{60WXHwQ;TV6nEz*yJCLlQ=caT>Isbkyr^ZRAR8fdC!9Ta(mE zoO?Tq{92(sb#~U3GrI&+ASwOH$ylJuD+;UkaeSN&kF1nHXIrL4dNEnni+(|3d@o zY?FCo5;-AS40@8de#!E3 zuq|3HJy!U3mc%333iWk^)=Ji~Wo3IotEU{aF<_IoOG>?KP8(_RkHf}snk9XCBGOEt z#QCE(%WhX{zDcGbKN%E9wyq2LdjyrvWKzXkAPd$BRjxgQ+ZUNII5XK9%W>4cuW@=` zEFnciml4NMd|O$GF=sGtFl%iXX6mb+^O;HmUCQFV3sdQN8<>tfl9EpoDfR?Y%)Ich z1=~{U%|lqp2DZ@Ty(?y{VSA`=7d4kz+V2aV+$fn{_@H!O%Rp>+&3wh~Sm+L&o57~e zy}S3#eo$?}sHH`1g(pL$BwTX^Y z;tcpo^D!~|x$2|47?8NceZ3-=>}X>enMH21!-?DO58R3dch5`s`WT0R*!D!Y`UI`? zOsUTyVZtL~!z09!Lk2=Gzt3Y_qv#S;zf9>FP~QR*3_>dwQ*n)DVi)@s89)6v7b8u7 zRgxnF?v~ks#2k)%7;?=wrt(DHn)!hbj3;n`*I&f)f3VPLUH?JOqx~;L zxgC&uLTC4XA>P0YVOmm#j;dLPUQ&8gVo_#l^njXr^4sWyOcTW4D#aIo*ydfQKyGK? zV?%%!@H7&{z}ei&h}OZ_(AeD0>2J?u9pOa;!kWMD@~R04vK2oA>T8|#f5MU0kr1=W zEEZ-GV7aY7?kQmSBvX3_wm-w=LWUsf=ZDE&%M__v0+>AurP&}av`4I1GX_qnt(b94 z9I#P^C#Fj02rO4#DiCt|=R~@UuZpT#W(Ma0pWOtkV;|fE|1ZM&>s5ho_kX$ez#!|t z=<)v~!y8(e{}0srU$@7O+$Zw@5k^Wtgc0Mv-!5SAmr2*qNyygP8c1f7Hn%YbVhCM; z81(

n17d0Ga1#ykKw<-F-%(;vGU1{_xi_MoSs*0jz?RY{Jy>)kanRYU@+$=E3C-9(O>YUlO6@# zIq}@_An-(j-3I??YZ!+f-Il;>ZeH01F9!d{EG#DIJ}@ z^vheg4l?BGJ9BC?chzZF!WY+Ht-pNqNM2Sja^B`X7IaWBS!td4)AX5hbwVzjq*|d( z#F*$hF1Je0QsE&(AI$K`3impOyp35HH>j>c#A zc)|u|AraRPa%F1VriFV;jhAnv-vv*$QHZ1_^H?Q1ur);4R9it_1!U1&&7z?6u)j5u z4}Vb?2|wSI5>KZon5t69&VLnECFyvEi;KYw%|??XF$+?(4_w)TzPXx*{bnTK4pTYr zLsF`Ybu3FwWt+8C+tVQ@7nrZ)<`wrrstvRT~A3Hi8zYtU5o%pd-j3&_cq2CssDi zJJwaHwIed?k~)``lN9E7>&_%s@xUBV5TN$^tY>pEk;o*ls4l6rk_J6OaB=V0JqhJ| zsHpoC7?e$j3|N#Uos#2F`;m+1+_HfW?5B$j8+OVk^}AKEmpe_2oz z1!3??fz$30sQU8!`?UM_|0-kd=xAUn#IcvG`zvyDQf#~Rr2Py*!p$RNxixt$UsmZ1yRFlGtID@Q^gKN$R zQo@YG4EHMuIdOxRBxg}nImA{+O4*}bW!59UYM|3-_$=TV}qlJgh+A; z7chtCZp0ByRL*zXtbzc1HD-SZ*ZXF4{5$z#Ao^FgSeQ1OwB>DqYHyHz9ENCfnl&?t57-nwT@DXzDYeV zedJk_K}|7S3en~y!2HE;kVws3T{eIef{2rd3qX9qXHMinetQm*=e0}G_gWY{f@_3N zKJAL7ca>L<#35l?(9J0u2QC+{F1l+Tn<2(_W9Ee4=kvREAh&@RSuu-0*B3R6oi5-sStS-lL6n)77}KNEY%^hS%{zg z5X<)aeiZI}D_SlF5ASz{=;@CZxunZ;ID_+wka;g5f)r30ePX{2qVd}8TCm#pW+Pp8 zm6eU!aoznQCx`@H5shhPF;*~uuo(xL8Orn5Z~EZ3n5umubIkX`)DKAibD!$0lwtad zdm8}UczFLeGvFUXUkNA)*_Z<7hn+qUg!|u(H?owcfgD`450iFdee1m20=*`G%{+M{ zDnfC|(84g7I+U;QVOzx)#qb&~qnF7~H9eylP@XrSVdO&%zKJ)JE>(h-7937n8IRrW zSL?Q0_rufl+aPE+6FtaB2v`=gb-9MKe!*l-sa(k_=~fEE;n6C=KWR@#^fHK&bNKaU z#%wkXu*$@TJr;SYHMejSny8pG?JfKGkh7IvDN7+j=1j$}vcTt@AHd|eqUt@ph>4y)I~o6QsHv@(78a3$60T^QN6?rmF1lJFk18w&GnUWekD zu=5zQ{Z=GWn{m%NT>HU`;o;txxe2;NC~rc`aL~hB?^~7HmW^t>^%eqU-1*oykK<~d z!kkG&Max*o$-iHp2jklVH$FiC!4Jm$C<01h^?&HgC%>;95sm|L_YY9c$eIyI21y*bLj`C99W-S1>?y4}(_&K^sP@Lw_yJ$XpZo zM=SWJG~zIH6&Ur1l6YK>8JHc;zPzKzt#AlGk*K|1iQUiChcE39D4JHDH&>hO$-DuK zd08Y=TC0wS*+kV%-GZLubSU)59=VI=UO68^Jz|U#!?B0^sfS-j?j+Ej(Nx{ZNgJ1J zuu&AZNQ(vIxm$(sDI6+BcIalui9=<)3@+JNhp@SE7 zPbl|Px81D2jIWe^NmK|l6b#C}i%~;4_nG`PE<9$~+$s#`{tjny_X8z+x9+-Dy12U9#c~o@NQQ!aD1hai zQg{eg#`_?KZuOt^yW`SMEz*0x`qn*3y;O!gym%u$jjj5WQqvUAeCYC{fE-0-?i4ewH{#p@9j3t0_Tw)-~p5E^>m7xSJ?u7Y*leODI|q6!%N& zeP$PRLqjagTc)WmK9ep^9po9lA>Z3-1a{7Vb>Te1Iw%=p7=a_NP{ZevAedHe=+ytZ>n;FRy_n z(T$Lgmj?EirMnA_Iv+>f5Yk=q3s=Fqb%cORz0EJ@R4Zpt11Fco2j*Ga3RGW6t zjXl#mZy)#lvqgZAID{)n8V46{1eAF-H?xx|-W|6|R6iX>z9r z$+VPuP%_b|p0&!(b7yBQN7|`M*m1nY3vU4cwFG_RskLtXp$OjyGE5BI_nqxeq*IN| z_D!kXcoOT*#=E)RaSUB9_ti0PJs1N@Juua==u>vA%%k78W||ykun)Ovy$G#wc@*GF z6Cw5M%}oUxrcWo!ur7IGy{cAfc6ct7D`7EICxR{h0`M>_bT&#{2`&U-rvgtc;KP$J zTx##CRZELp&K4VUc#DnNi;I^FDwE3dfNjBdd%pcgJg&;&k^1c&3AQUL2)TX0&#cYj z@))ws0sxz{pyNWJbrg<0Z}pb`s)ZHGA}y-yH;#a9>Q)H$Z*CYuuu}%&eThBI`E=Ws z9EiqAIh;<1xxUKi0}Ye&q)po|gu&FQy__%&uv3Pzy^e08eRr_BJrTeHhTfC449Ql= zMGMjP0@;)1Zlh=V-AB}q+?|;70RCOU=&Sczg=?mc_h~ngUXf1fS|6gp>cqIv-)w(Y zzzW*ScUa$oQkEgu3Ks#<*vkpc_#`<4izdV#U^U`yO1)Y%Z`N-recRv*21(^Rs9Pwl z4`2@#KcmT-qj8HDA?zl{&jdha#?1-ui!tfFf41*+F`2O}teMl6SYgeV&fC&oi{hPY zsXO0Wp}TpM!*0D@;UdAj|$tAcg2pm{|%!vb03++$AalHpC+ zF`tdU_WvsB=_TVJ`Ml9B2m98jF729`{xHR6vC1p8lmq~Qb#vG)y4A(Qh8s0MExB(7 zji&!-JdA>f;O$ZFj<&}<-RNY2WQsOm!-{N-dkKEab1@_@J>^S6->5~2M7}pWWH61w zi1VT`9xOe&-VlP!*fO>D7V=TFAKANk-YpKdYU48#FnW(Q${duf*{2lr zSk&A1I%l=(ZGM^ieC6RURohV?AA%m!5NL|Y^-Ow!FHEKHS8TSACYf&lSv1QT7;Gxf z7IbSik5*`Qht@ZHHiM=rTmkbf=Z17k_;*sQ*#OTM4W4l78!Wc)gjUH+!74Z0s6Ci_ z6d8&!b-o7!f*k=X*D$EM!y^2F<`ACHDteON5Bo|09I}{hey*tSkXm!ZF*_sU6ZcAN zx~S|R7CS_>iQL=4A@fRE0dw z&n`=ly%s^>`eqx32quMZ%&ys%65d00VpRu4#>_u{CHlZQ$1?VYu_>Xa z?pK%FuKD?C&K*1~c>8xiGV9c{CrJ$c$7>73`~YJq8mfNB+aSjoXbGYF4Atqj~;P@r}G%%>~%KBTFG4@ z&uQLc&gr(t&PLyApLa<4p6E!HBcuCUHKZdlni1qWN<)}&R9#8+xVXJnG+hbx{cC3! z5f~g)U1le1tmIv5CQ^rIZ^$|$f-`t;^!_>5j3}_p=SsZPLO|&X>*U5VZorjL(TO*! zcJRbjo#~3|s12@V^wBC}fMPSvCRJMc@3TPl@)cQ~D(Qa~M z057CaRDcm<3rDS^^XN~=Qu^NJrG=yHycOTFL8hmVWfNnY?o{kYClDZDKCO~}K8v6> zAr>{*Gz)uNt&~7-__N!#i-pJg-WiSPI=NsIJxC|i*ZTHYI?+KMVFcKSgoY@AEUL_|6`=nuM)?Awn*G>eZf}P@W)7tr_ z$=S@5C2?xFNPbm%=F-TVcSREMf$&dQK?9bJu=-Q#PE++?az#=}`*2skj=VmZPdxpm z!Jpw0aF7zL+GC;qtpp)HOcLyhU<_qj8)+!uwz-d@|un+A&_NM^r#v~{`unAz=pIeGAY z)p7-LV6PuZ$2Sx&l+wC@36X`jX#Jh^oHU(-rhkD3V#N+ zzO-o^kuvU)rf)E4ACX28lNa7or}#TQI>VR~MZ_7zZ)W)+GYT>z!H0Dd0J1x&-JOmyHYmjX_nBG*^7d zca)J#r+a|b+BBu3bRM9$;%N~t4kmYg+kuU= z(ZZ|9p0%1=Yp4V~gFai|ijVbV$}(?}3pXT~+cM9!S&wAY-6wGv+p2eBG+@W-xjy4( zsdabRvaN6ArIOZ7rP413h-!YBA79E0*P6LX%YQF3;=j#1D$$nZEl-^;Lwwhpo|Pnc{iX7sYlh8eEUvT4B_oR08Ch=LyD%EpuK z$FXYA!+X?F)0f4%Z;ZkOeTXmW!Leuvc`AyHBaaJYwwfOE_`|Ye%n{QIk_H!B-yAcNL9%n@tk(wF{e4Wi z*YThzp@-kgt~_1dGcGdXBO(>+%FiUuVE`S*I%Q!^#Ed|};g+a?SbarqV6~<_9#uv4 zOq`w$N*kw;@H8ToEg6p=WSvuS5sH9cX|hZOy+7&uF_tJ;mZA77SA@#YRb(Jz4sC?& zZ9iI=A%=zeo8i#2%xX@j(464dLHl{r{qP|0C=Cr{1fo z<@gsMB@t`9P6QQxU>Bdz&+zL8254fTJao%o6O^)P@40e9=>9W&W`&L|r6>`EE2r zxx;p1l-p6g$mL}AOK3<^q7p-%s74hBC&?PgGps&hT@^>v(KZLgET-y$!-={qDkTP% zs1HlO@XBlu7HN)(akbbZ`YGH66)p^nC782Lp~&#pkZZA77aY>aGq9aW0QO7@Gh^;r zuD#;o!JA4NGm_28YC)rw78whYp}$SK>%V8Mh_getn`tG@RbJ9aa%@1a)kn1DS7E@@ zrm){{lr}XMrU%(?E|71I*r3j$Y%XLapemk%L^6ssEJ6t3;HSnR1DasdDFJe_%E=fk zo|>Isd#XAuCQ6&>9`?)Y&?FN+twewX1iH8~9GnP>^xPAfYV&_|D^Gk6Py3+ zPmvm+sxUZix@*t%lp8sVy)NG&w3=(g*v;3}H9~{n2G*`<{0!)VeFzm3HKT^T+{=!9 zg~ivC?tOjwF6e3~XXI77L*g}oxTGEP+qju~F@GKQLI0P(+;y2hnBWV1PQ(S~J>w5c z!EHSP`X(*dIV`>1V@p>&=N|^jy=qUIz3jv;+Y!_%Azlu<(aEmbiW8N4Ej>C>p^W!8)+=bWEa;X&p4@)C%gMAm1lt;`c9emodvVe586rbR<$30@t$ii}|ENAgJvi);r-)IrQ#b;iHaKqQD(#&o?I=Ud zF7+uCR*V6>bxszS?A9<5y;^0`p1($oDl0d|LY4C}aFXlGMV)1=V+3X5>qf2$#qUlK z6$+AtuHc011WRoQXwC%mB)9Wpbk{{No=kh4KgyF{mG#SM*s6J_k9FA&{hd=K(2kMV z`txzCp#v7V&EIo{c(UR2F0Vl)I5rnXUrb~XJD$=xIRg}?M{{)h)|j2@Pj&Vybsrrm zHR|XL`)QUB919!9SDx_pRxi7atwV{bcQ&!Dcq0j;(iIzLyXn9!wlL*xEyPPW)4Hp zH!1b%x`dQ7_v*?vpA@B1@17GsYdX91eRE-VjC7Fg4#9N~6n~9D5YB*I=vc3LEIRU8 zX(7;jO0zyYcDOr;&pS9u+nB@{#k-D|dX>h${XUvD^0iQK%4R+dyNBe}ZO9uRqJ(~x z!&@`q6s(#BPp9pscFVzuV#CvD&N1H|T>^%pv$grw0@c6-Vx4lSx>UBh2bG>6VX+QW zMe=z?acfWu$24#BW#21lj#aJw<8Lix|B8t+t+c00fNF2?zq?)kWmW%YO!WUDDJCiE z{N;A#ZO~|Nk&$Uk-*b(m5~Z?1`$0-<_8rY1Hjd|sO(OVW#6;DEg6y;4SD{=G-xJtN zLAWd17Zf;i7yPNLj^pfcUe}Me_jmXmW z;6mM0Nv(i;Zo%z4S>d!nlZ``#3`_H?{X)y`(Lnv(htU!BP5Uh9O{R^RnvEt!L3ZM7 zQh&jCh4g9P9H=Bsc!}dE94e7UUf1@61aF=h zUG%qa1~OMNDu0B?L}}i9cO5Khl%ne1$6bMOOchuX3feV$QDH#S)oH)r38zDNcE(P@ z8cPt*3GG-E{OJk0wULdc zur*YUicX(%FnWia>{voppa}& z^|Q32cA$W`Up9Aevy8kha9l#Wx84nCIMo2;Nado7g0A0aqvil@9McpxpxwyB5lr2w zJ9YGAGtntVUYtK|uo+ zCUxQ^#nVlTZ#()F9e81~x+GKJVC25k>{Kw4RTgm;&!yh`s*r%H2U zxXH?;YSHXdrHRsE?@$&<0Ag$rZMu|ZFO;OYnw1n@OZcM5o?}51qN!vJ^hp;^Gh1(T z)LF1ijKs;r5>;j`s}%2#InkFX@lfeh2pPBOYm@SbW7gj=H!^}AW0`7Frpi*7mbMLp zhneJof(bG(MBX)l4_sHchRxD_hN+2@_sgB31YFLZQiCNju0P*J=v0?quv>UTR&NHt+T&{qpQmZqok3n>fq5- zthPQ-dnZH&lmNOJfcw&hAPn5wJ)oM;&cOfxKq$*cLul`d#Uw4 zUVB6E_aKoOT^_MBH?yh&Q91m{H(%KXL@$lslzXSI9burl>^U+|%MO^MFylIw-L%5G zAbXUK>FZlrpc88Tt)Z_c2b_uLqu!*sem0HL7v}PRgK?7dnp)8c3PhB z#N0O{^-9nd6(=hS)Q(tVLb9sm(zdcCZHE{xqy;*9PT*cehUi9Gi+8wG(K|y*(BAMX zCJ)#cDTp%Bo#Sk>6*BwVU7PBZvD1q$$E4V=9Mm0NCStXqva0YHGe!0pT*F9({j+{g zXE#F%T~0?@Bj6xds**)<%)i<-13KL)W&&0q<~5bW7|~{$<$M@MUMBOC$I}(m3A~~L ztnMnv_-wJ8%Og%)!3PMTt&q|S;X5f$Rxq6?DJPWoSN7dyX?ET67<8!>?4$^guGK6k ze1*kgG4POJ06>X+(er)$MF@9MQ=posLrjKOYSP8G`SNl0emdc_NR`Kt!X`nHoiP&iEmIYvr# z2W2%lq7aeeF;Wk-B7gc-TOvk6&+)G9b6lpMJ#IjU*&ms}eC~1soBgbxkheU_iVbOh zgpm02hO7z+-lj%fdO(GFy>w@+IJ4z!U39kRJ6Bw~3 zNk&gGt->>!=_mEhbau%^lXWmnXC>#s05`3Jict!$pA*GpBI#4gnV2tBj|Ci$&Pxl1 zo($iBo5L-m&@@gxf#(zr2%MvM`EwHsShsy-jAWvpGrd3ws{b*U=789#>_sO56_qPyGoUhFYV~uIS=OV@MO9! zx^L+DfkDw`LR=l}!pE8=cfS2o>nCYaR}mbG8}zUb7#^STKWf$*|4(CA0hQIZb&>Aw z2I-J)>28ru>F)0CF6l-8x>{xs4xlnX` zr1Rz4_{cGE$(degrwhK|rK~G4ZbjTVPr6RRwod}d$q{F*6Dlkt&B4#Y5%!Ud5LuzX zzpQ!>i6|0daH+8t-N7h%|5_6mZH*sHxR0M6Xho2@1@y#n-9RK>b2Zu6gPEb?#A*;q zO)8D5U@|t7miWfek!woK_8nB;E~*-yME642O^MMx+}PDCFAK-J%ec~Ff+og_K#kxf zO1XNCi=b}Mue+{$mJHloM7K06E|K*e=V|wJHz&uN6dv=G`Jis?N>YMtLxPdH#Db&{ zc4uC{x~qiovzWUB7IiRyMIGnwhOSq(R;C7e_JEf)z&Fyw!0x9h{)xk6daR^uCm(9S z7#OIKJMXKmoTCm&MIWG}uvd!X1tEn=7#+M+qx zPvcm-aLl(lr7H)zs#NDg$8jR;j%`#;l5@CSPeSN?et0hf6ERZlwM$(=(xKhbS-^_YXCJObu}o^(n=DE{?*Pjh zGx@;hdbSQyDwj7##_m&d0)hS!rlpr1BEwgzdkc7_LVL-3qfvXLb5b|Ur12=T;E~8@ zkpYpoR6%b--tV;XUd{${QV<8d^jY-R^^%y0xPrpKD=VE3wf}5*s6{1t-7td(y;hqK zr=qy}30tWZFFbeIgdU&4tKST+^MeG8y9K^HsG`#f>UqyB!E*ggoi@*Oo3C9 zg{h(5-#U>OUOsU_Vl-E-lK>bmK>;Qhk>A_&rzZTqP#4s(GXPw~Hn6lau{Uuv__Hrk zp8Q`9u}tnX>$)*@H>y3M33SbQce;cT61oB;rUFyf&%o0R7c3kTd)qg?)_W3DZnWL6 zM6ph3aHL?sQ3R9Rc1Ig`N4cG@cFV2So-u!V2?8R^D7H`P!`&JWr(oY;nq|3O=;INn zFUdj0t{eQ8njH45jw>D(M{}p&X-fQ!YhR9)Jjt2^L?;hY#^gN(CISyt<_5p|P1V-T z1Mgt6BjLb)3Ms~#V1UXJrvos2_?^S_mA`uF!v#}#ERd%88 z?;i?rK}U9i4RPT_K!3}eg(78D5MBz92BADJqcn6W9m4y|0xF?S6$HO+5gFp{o=8%&v&J{)j zM-XzL{E2ScTHi$afKYam^Y$aIH{5o1CX_fPX%r+Ld20C#9U|A_ZFu*n)cx6lFJ(E> z%K#GD=Uq(njaXGmyU9q!S9Nlb)H^nDFJ+wQZD8t=hFWt{oN`jEka7v4OJ_X^$LjirV&Mm zIZXEBB>bJ%?H6Glf`Q#~?60(m4>AOY+u8D*$J&#HA*x%r>Gmd?oLEP*3td{tv&>f} zX{mjjyZZ_qfikx*1-4>1(azq#9NqPO`C^-Cn@MepI34~ICPCDU$;+sz`SK|e{L?L_ zoomorDb!GLPR*`34-J~S2oEtJD;KY zL!oaKp#7*exa4`Ng=A|j9q}`}?6&2z&NR}|?P`0^?bQa)S+ufjB4mb~vt?>q#DdYy zT7(MDd!Qv9nqC>ApEqL99oSdWYxLT=Ymh5($QmZx8yARiyy3yvb0;c~UKPfElO}rT zoELS-Vi|oxh19s%XC`zi>Apdbt_ueF2R9=HnLG!W?+InoI5l z>#cLO%C2pC{UCQ`+U#VWvEkwDc7{p+bp@J8YH`xx?(hS z+-z&HB2*`O8?fq`;rTav3X%`CsjM;Knb2k+^caHcjLdQ^OrhNhEh5zA+?exi{7q`G zIe0N6dT^TS1j^x=mVv>CN@m5T2eIcGm<+rZH4Uc+qr+bI4`FtAM%bG%q*t0=E}|7< z*D+7e(ydiBIUX)*U{YydM7Ah44NIXVDlXzcEs;2~FrYVI7<{1t zoB?J?fx4hbuxwFFGThURQvt$~u|YSCTBnjOSG=Im=8Z~$fU{WT7emET!-L&Tuae0i zEkIe6kbx1NtS*R7bE1g~3$Z1cq(Pc@n6&H=gD%MmQ;XfiS*JEpD*!1+{d~mGI2{Xa z@U%ONYZc!gTb@V9s?JErU0kBvUxJv9*DZQric&J^<6CoF^PWj_xlk|36h7;IGyEH< z5Afzd=|fIz>I2^0_#YGB$(4GORdsEIFvWL5c`=M&O=3@h8z3||ms-Oxf}d#ljE9nJ zDbt*0wR!s+qKtGdiV4&Wp~7=~FzK(T30Ao0pQV$6GqOannM-bEfT-uR&{Bp$ana(q zm}O$)CSxjU;);ve*@g>x7aOn2VZNy2V)vO39M}gr2DKOtVSB44mOnfVSW7uviKtaQ z2xnNveA$p0+NnuU2~(Qij@8`V_Kq}*6paQL+fSvNaiVl0RPO^v^7F93pf|1s+M{_4 z)o?Nw1#bCu)hI}oMrg=u!TN+3dtx;`p=5FW_-M62R;7bz12JEiYn8zDath6wXW-`N zO#+(;r}YB1u{zU3R(hmm`MAn>>MDZ{GzMzLe&>3}+PYBLUO40$sgd9YB}xLuvFm4n zVT1a5lIcz)ab3&P1*3ZkR0oE3)o5YTsa$qsQx~z>{8^}Ae4{7HAk@1mD`=k~e=|GMkN+s97yFIDC;U}vSl;KE#mPlMFRyS9)l@wqdhcP|63VBX z+dpLP9Kt=|t^uVDMbi3NZG!~+g+lxBjKMDd_u+U90N(ObCxP(-Oj=VJx z8)9acR!SYpoe-N|@5;HD|DcKkTPr5Obmf5HHL&h>%j0*7A^5Vx3G)jful}bl_x^Rd zDQ>8_>H1O@9+9J2Ex$+Qo$pXIUC3nN5EEIAmNJ+Xa|C|LIrFR1;ZLZ4-B z7{T%M%9V=s{pU^k_6JM3Y~?=wV&(CaaTgdCsL zT=pdH_Qoa>$r!B z%1{B@9%iNJYzVx)v)uGcihlQ{6i(1UB|9>_0|uLDrwtQkU{3n6A?8vDcxqVc(nh7B zzY6RbWtBp;fyfRyc*(JQ#2dC7I5~ZRf&4r-RtGIb9N@K99QoEz+SDNlIL|nw7Wu$% zQVsY~yjbJ@T|Y3vD)tr0PV3E;+jJk3tAch>IEYZ$a}fq7XKT*A0o{h%w6E45P;ZzM z^ETE!KIV$v?(gHhru*8mAK6nH$&UbaWz%AvRWio;UQw0Tb)44Auj!*vHu@lUITp%M zEVS{~dQ*kHbuuq8NK*$N+Lv|avUeBudAX0w0mXaFv$wrb$;Vw3{()^0@E#Z$dGFBR zXMryj=xq!jY7EWkj;Y?TJEJx%TlQj?tk_&;`87@8De;OV#&e>r-QpQuD>b%?ZUKat zJ*3gNIkgCnh@q$##%T$>KMvh&>;f?qd3`Uo$)+rF(;;M^pZbDJqaG} z^EQzUo8I2AtdKE!70H+c%Wgo$;;TKBb%^~OJ;A~tKi_cNdID_`L*gup@$(WDJLV+! zk|FSf@46_FNSYI0VU03)@_kD-2Y@V6nE3Ta98fC=?jD|rb+$2q>U? zo&K}o){6D#vSO}5?V;nUHGV0<{gPXG&LVW8mZ#kp!ZYo7k2W#NU;T<`*y(zL zJzvoMwX0{o)%XpzY{1%-pb`Ei`In4NC~oF6#GUJ@cVw!ZjT?|_VH=zHoQH@JfB-ch zo0QzlwY`VeSL4ixN6$Xk3%}aG-ljXrt}bq6H9(>;ge?6y_vL1%O`w$aLk>*Edj(of z$%!UzsMi&jZY`8e1EPgT^k_a!xYI$D<(a^r6ner`fOD|R&6iG(a|dyShA}sGq{S2# zCz_#FSyFPQk>l^F?h2v94bevrvKItiHV2KyIGRR`ogQX$1ic?Wm9)et8h?hBv9w+| zN;`-ar^qXGUP=8W)??)T1e59kRDx^h&T=5Y4{CdQNYt3pen4EAjls)5A0aOhaevU@ z96QqeWpz%_M+lM(4XTXD*Fb9#%#o}U3f<1Mnlcy@=xUP6(IH+-Ce#k{Oq$ZP_9&`i zl}bXYINXxQ7sz1y%{398;6>LILIkfN(sSsevLK`KWHQ9K8j^A}By2K7HJ8P>T%I3q zT1=0m+moK=72g0IZ-|=E@kXxo858iz&Ycz4WUl4o+<-L2c*(Mz6YMKJP_Oh7OdWH? zJ|N$7i&a?acPa0i-K>ho?c2Blw~9uaGv@O%b!7y+U`+=%k>5MKAlb(i9OVr_YaBN% zPPyc_w9HYKD4&|jn#%HPoefI=`fSz}x!vrR!xfwx;q5tBMK^=^C9V@A!h{>>-NtN3 zyRapmXoAen8#w}Qh}3Ix1`#>#%$QQv?pE&yC|3p_PCEu-tn#;wMET}Gkml=6I1;!L zATMaaG$7Jdw_$Z{=v0<>J9G(RzmlecHD@;lR!aNgwAN&`wPbG!)h;O>Q+`HmBoL55 zcQKrE1{qi`8u;Ld@p1g_D}MJW9k54DaA9^QI109UOh;^wFTD@Gn(g-Vy#WXV2iPnb zVKPEpGHCO#kUAV)i4VbS*BH(^+k_L!7ZxZRZ8u?25-)%@LC0t0J9Oh^1-_|`O!A5@ zmMrIFtq=3Md}TModF&*Wb||ey6;na|UT~{p;x2>uE`nHR(sy+hNOM%5KvF)df32x0 zY)KpibDsrPN@RHkcIdj_5*Uiwb~Ry}l#@9cwEyD0jWUJVG*B1v6+30CKYt5rDonr# zbcLK)aCke#z_u>EYRK@c`HRMfWza;2NlCt%7lf~mJ-zd9JY+DZL@%`+{mBrdWHDcQ z9LTkoS8TAC`l7zCP@8X)a0$KEnS`Fx?l>O1JoK;+k=!#PCfv?4j^_?@)Pe%fTOrve zt(gja=_@}iwN_Nz2@s7kzvQOvGGTkfrIAG)HrU>pDBpfwF zu@fVdy4RrNOB&9Nt(3ChE^%BRCbR%C-TgNb3gn5LNr5H#$S|OeM4=M}nis^@XCPrv zOmIX}P6SdU5>^mOWe6`Gz@A;%`A^(KCaw|r^9YQ)OOdM<)B}aAZMeIp?v6X%YTV(^ zOS z`OCEe6QE{thI=mmtLy2|nej`C&J{@(eZhE$DmXAtY}wZ)Hn8LK;0=#Ftznfc>+gF> z-p85vtO5d%LEdV3CVL9;iXh+Is@_2{TThs`K`gF{SRNgL-4#kOiKlQFTX1_NOrGxo zDXP!X(HovSKkwP%ewX)xZYbn#H_%g4)S97k&Jc7dzDKxWi}cjUdERJ!mXBeo-Xhdy5Z4F3^6%Y}gNJd*g6{Zc!vN~CAwb^6>Sk@pg%`GcnCtC&``7@d=i2%QN|bJ<`^MLR7@@xc5`1?TUbFG z1C-IbHN7gyufQI=y%c-CRJ|MZT8w6DLU5xCDn*QS#FGu^zSmQ2nIzAM&S4e;_2%8y z^}A37s^+&^Cw$1yR$*Xq>1LRL*LWw4@0bX`3Z}?<`B?De4DDG_5I@Mc(L2$G+rlsb z)KMAcg~ztuiwoQv$771lFr@myA{vkJdzJe-VjQ{W`8u+}`y)}6t+2xtc~94}s>s+{l$Sk5siC}DERoEcGE)D#hchx(gt z_rJV&C?W9(Eq9;Oni1uFoT>sAAz`2x-}4F08ldPYr` zo;BO49j&fqNdGMsB|}!M*BZ?FwwiIUAZ-|e?;ASlrxTXsOc=AUS>2-Dx+PxZgwbBF zMSUK-U2Qu3T1a-S4DRGMlYmQyFW#dFPG5qzkrX$uQ-Kzr4t7h$B!z1LmCqsKC|=g{B~CI{QIm!X1%jhQpS z9(%WgFx~-%Ey{kKtDhw4+p*ObW`=A%cT5Fe#d( z$+uR{(44e>(*+X9ew_;2Sj+toqYm$sI%~MMWNDYI(_t1=w;Nln+w9mdNUwgxJ(o&p z!9ZfFB}lJ7XRcgwTJPxpm~hH2;Q0Aqd114Sc8ekz(&^Znf9(|PpkwVD(jEB4MJt~$kYHQEza{V=v=1phbe5#-O3^C92N^Dk{=&bfr$g`-P;_0Jb5 z-!gOV2MV0Sm~`REEJ_hRn(}2-cz%s zW@o(5jcQmjI8q0b(-t%4V@xCstgcp}v2%$Y7Bwu&N?zval8oRDXNI<7ADk3JMQG$< z(&VT3Y+K3$*j|9wE0tF0N$*%tO1zyz7fWmuA{! z@QGumEOIoH{KAi8TY)dXHo$(J#gvZk71xrHNuGT-7%`rTlV_Zo!(OU-j%icv9_-KI z1nHQSr3ammbpfco?=Kl?ei@>EJ_17g>0S9U|fXm_LPN?1oEDA0!ut15`lj4Mm4xZU(wTeUNQEd zCE^6qv>v?G60wFbS<({Xk%G*#32rE?5^J1;{xx@AA#Akp0+j7jzL5B60@lV<+q#T9pQhKYEb9ArmLmLyiBO zplj;BiVS^~VaT8{hG|Ht#rRfNH3DN$?d98gb9u`7)O9^k;D`opoxd=)>$VRnWF{4= zgu@S?hwVK;ion71Q?M{fCP<6h8+9Z|>pD2wXq+ugvG1~%OlW9->&kCIjCuxVz*hX$ zzQ?iZehcS(p@nC2%$hQcBk?A>+~7f?i(MX9?(xp1mu9DyGM!72HSSfl z$Oz4QCzgy(V=oL3pQYF{Rx(Tx*~PbKFY*nBwbi=yuucV$ViM)}D3bSqy8 z88cp;2??o+fHgr1ilD6x(tjxLpv7V}g^jlHKGmPL=}$ey8ny4Ce(vkS=~F{6KE|Hw41~6<~g%~DzYSw1k;&|?j%Dl@2+$P=Xn^9hsFVRyG+Sh2H>*}g0F5Z3- z_(&dHcGwwx)K_QrW~r;^fw{0^ym(VVev=6Cqyz|SWK-F)LA{ecs}>LxqO}YTZ-!+( z;q?IN!|V*e1+fcZPiH1w{91m#?I0%~W9F{+g3vxq^=;nN2!9lnbn26nbT!P!@-_7L@R`X(Gl~k!p8%w(Grbc zXv1DpfF-8i*dE{5URa%j1YP2)HcOmon(g{XqKn(6_X3Msu0IcIPtB$L{5A_|SNkoE zZR(^u^A++I9BAg{utl6|;jyYf)sAW>t-#8D-_+*|1|FNcY2IH4uDJ9Tv=+eJObD2p zX?`ELesjwF`L;!by!!u3i`3QA9uwzjpz>2-WYZ3YVzj&}24x^J>3#?9XC=s`8u)Ee zrbY6C9wTeJ$qJE(7V-cb>5m<~$U_x(Pfq8!Pd{~E<6vrkbboxNjYLhU|LT>T6p^n0 z!X15P&Q4%xsyNNNXzz?(S{gDNbcpAN!OvZj6==wUn7d>c|OG>mhmic;2Yk$hZ=? zfd@%h+n~m5UDF91u)?AhThlL%(Ri569S6Spll1;NU?2si%D6$tk^|UxF*DY9)zD-z z#bv2vY)Pt@Nquvxvkhay!QRZu^^K|3=5+O4SI&{D_Z*LmH2nhdRQRUOps_EpJH=hf zCK$OVm|eW>q0~C{nqIERu*9DUb;(m2Up@A^1RChMCC=1#49=cpFG9 zTDi(`NBGuW)I?j;5BhPSt+-RujJefAYBF_T+%Lyx;Kihqw+s?2v5cE3M`KKS)!abNcTs%<>ORqL=?|OIG)!`0lzHMo!%wrs*NtjvZY09C-U@O)g!5^ zIv|SW4SR#gC4hkCK@ccUzOjciiS)(ys!-D=iP_h*P$4(-rms){9|dnFadD=P6926D z&_4EYGxjo`W&@&tD|SoAc03xlhl~MD%~=G=L^+P+Ty!sD-NA|aV>B7Z^R`z6IG+br zm<`>r?2QC_UsF)yT&&0Xg1oOp(Ep;B*n0~fu~7GcgW&Dslj*yzqrxr;@D&sRZnOR? zy^Fs-1AZqRTZ6wGV1ClOC{+ZkRpk)5>j7@pj+JQHp6d|0NKL5FEd2%0$P6TKDbvg! zyy}nDFzTB>p3FvxoH44xfj?~8yO@K-42c# zyTODmB@6do_d2|~LZIAqDG|?lSQQ;;& z2qsyVG8fmH)=I+;9-JeM`5YFwOY6gW1f6rh674zNkivf6fm}iIx!TA@%TXt;=5h#% zhH&FMx`&~+-1`gW*YDFa%wFOagiqkSE^sme?XvChu~&P+Eu8bpI4aP%YPX*niCBWw zLh95H>xi0I#FJn`mY?>0!e<(Uh!MgbLk59L*q!%*N5WZONmz)xU7&KGokQOuqcP>T z^WBbbgz=XtR)&$!ZiH+wmc7c@py~zA;D)V{6W(IV5n|y>@DM!0V}y9+lt<1q?ec~D zHH6p^c-d8DZlO4MT`;@)!6dDteVC6W=-d*^Rs-KG;SPz%n^9^c&LP|~%Fonsf*FWV zRbZQ~!sz=qR=*DX8Efw_NE;_yp)}UYRQ!jIM^nrF zPrP6{M;tf(&~L-3s}~ef`HZz4QfLKGXj6|DN(|0eadB8G-Y6`mNc1VeTdQ=NZA13; zxP6BBwSEOB&_5mkJR;ozJA!{DV<%u>Xk}|4Vq$A&FYloHz5f5&{q`tX$fGC&Zpl|! z+f*Bi!M&}U7xUxOg5)peLxe$!Mh>xvXPvK?R+*a7pIb05^2+ATe(W5r@k=PYQa8HG zd(7p{nX+@zgCwi?V3YsmWT+8vX})nR!|m$f^3;kB=$*}*Ue)3B978P;OgmgGEM#;8 znsqfG$%fIhkR%$r63BTtEJh`cS@4Qc8~I*vp0^Ca(vfN5h;{6raqF?}j!!RL?E`ga zjMUyL^t^T|Rx8q&kyou&l8;zPvox+(EG|>U5}*TIBA{a^k#+5QBg+19g;m4QG?jla zr5U>mOK0A6S|Y_klvZ&7c`(_awy9LDDTys2Hfsn`YvLp|p)OzDp?REw#eU>W2R zf<;j=LZV1py-9AsUM;_+(CLN@-gBl1a-Z|dL_HJU%aCpOWw4Zw2-5Pc;FcWtrg2nj z;WiqqjY+VgKF?qg)+DH0s%5zLKe{nKPpL6B#L8(s(u;;M?4puS3C6`e>5zhHL`&-m z_SI<_vI!z`A;#+Y*bH5F2G*Ad9XhWQ>@5C%9luRC={nqg&e=FAD&oa}^T~TsFbsM! z6^|iAUe1mxMU!dfE-jDOvniPm0#gR>lFf+ip%8Ffx3XVW*>w;PCiSgUc7r3mRv09vXIPkGMf9XhH(Tv7$mBMwFN>+AzLt{ z=42KviP zaUOT+uWYej2!0(y;()l4zj;=F0gTZczmL)KE_U_?7XQYkkRCfE(Zz=vI5uBwQCl}_ zIwQQyB7h>)9e@#yj`5oA)xKv6{!|PJ3fYvvs)VvH3_(W_GPwz2A%C}O8q@jM<49U# zi&snAvyNxxiG&Cd#OA2ks{SLu=4e^MgkFO$$;P1c7w@9^>W2EnvKEc-SEW{vLftrR z<6ocJg>ec}sW8iQm!wsAcgSVJEY*PFaT%+@GePJOPPC#dixOdhYo0Vs6BfU zF+g_4XgWE^5|RB&A4}nqX4VT4AJpmCcDo7)j>~nqA>feFoXsliWGa-mcK39AfD;EO z3965JmGA~=)OLP@)C_IVGoU9V*7=f z&|jOB7R}ff0??ez0Ams3?@jvCwENw$`nTofTlqDs$ycZkGL)z=b{)$K%qVQQ^sEQ& zOqkLnDM`^5P=*L@&3s)=@#=On_*-;HxM^~9hb#|z8|Q`WCp=?+Nt|)+R~br z=L4`cP@f3)5-a2UXpZR?mLttHEi+`Ya>hl@oWDY=jQ~B@hjG#pNA7)mt?>J@JBm9V zH;EN(!Y?S7F=@xZ`-x^ zJyYq=E{on)ESOyK?^JQ$Z#FO7+mqi0i>=T)%?Oj+xDkj<(|Y%k!<0=1N|mk!S{#aH zlE!EglG-Otpt?emg6s}%wZJZPn6kMc9n1V4X50#HxbE636>NHFsie4W7E=Re>*~0Vc z`P?TbYu=G}yC;OiEh?U${Y!gBRwxq8{oF9ajIHzOtPPNSRX5tHLB6WcUqCuTInCRu zd=`mb{rYSRhZ@}c!LTN7u)i`x7FeYWn2d>^0i3E*if=K^z|Z) zY)AJZ_K6RsH6DWQdJu)+n@G{Qqm;^#Qwu{b`C$ql8sq*E@lDBu-gl+R+n-T0FIX~} zGqR8ixFfdetrHEfD0vEXdTKaw1Mg`Fs)dN|F@Eh*mrm$fB(QL#lA5RDAK8LPdDuO?Edn1ymgP8JauL8ia(5RBBo%CCo)oza0?nX}4%^d9y?)UJ7fx<{5!OWA$XcNA{VpaTf zf!2~872Zo4OjH6LN#0z*y~55mvi`yM!1$12^wX(U(*lHp6D7|_tF5$t!E?C?RG z7lsJ5QR31rB(Ty=2@D2ZQdTKNWl&;_HH$4CXq3{Ot_!JZachGyEhRg|SW~ZRHCi+# zcaL1KSZ~)8B5-9ju<$)NR;tJgBm^We3})I|El@B%ML&Z*H2S=e-W8_Wv*C)dBNabJwTB#iE3yBOH{a zs%+ed!dTr;+a3ZtCmOBG zDaiz7_oKziODI+C9?c8II)EdJo@cl6TDGL|b0v%>tED>C<;y8kFP_T7W*netjkK^| zJ}4h_lJg5=eBDvs+0FX898KKl%86ZR)7xV=|Y1IaN2kfbHi9PvG~YY z7tDDDs+GVGF~BJ%dc__U=G`-)PmI-t)-)pW)f?P7n9MhG{SH|6hWbF`#^>(#A@kEI zzY2UzK2g8pL_;-%P4d$5hh|*pII)8@W!7WdhRt2?BM!9%n3`$>(0S^i^em4FA1){j zQMSepjX2$n6nhlF4N&z_xfke~Qu4-M-f8y?!|Rpp1}z9cHCO|^$ckD|>H#Z#&w(K@ zKWN$>z~BudL{0%Ra3e` zJ>Xma{vZM!^MCtOT3CUfR$N8|aP9urWOe2IQQt&}6ac?(Ir{r~=ksmv_n&0a{4(Mq z!tx3)q(y#4Sbstcn7h6|h(KTYQviQ|6F2&&6o5_8Pbu2Ir}!?1^iK%^0;E4B-2XM< z_ZSAhF&q2p8|DK3>kQ!c{Co0muNog*z-sCzA*R0p{tm44w{H5=9QCXQ%%TEjZEHZu zg?<72))@Z;_;ig1Sgkp`@H^NW1C(q{^nS=RKV{rKQn4!nFf9Ro#J?~)0`l+&M)AMP z@Yj~{DIRE{>xu>-uX2FYw7=kK11!IPz!SCA(Kk0x1c=FtSy>oJSy`DmSpONi99HIv z9$+}D1vK)vKKwg4C|3{8w20AknxVfa7iUEg(hEf!EE+<X!PXU!V1E|bj+erh^FaCfgU}a_hEr^D-w2p}-z=HZCbjH&zK;BOm)C=fMQGm>T z3xM$VQ{$5Wh?@5Y=&2NeSKJ@=-@%_Y<`V{_Yn2#Uuu{yuO{{;Je ztH|(sZ~RtoPXlWFK=su97pVUbUgK&0p9U`Y!Q^f5FPMII`vH{B`sXkPPpO{j#{Zx) zH~uH8|Dq!Q6#l6I`VV+u(|>~h-V*;LhyIl6sgmjsDiYg&LG@Rq{jchJDn|K(LxzEkDJ4svPwLQ_b_AFuym*U(kR3so%zrPI@RaB2mHi()AfdnU{NBX>>H_~$&Zifz ze{fcY|8LGe4_8k;WPf04Wd3i=KlsZ&&Gb`u#2*B&a{dLu&!g0zOYqbg><0l;-fskd z&l3N}jQg}CPn|b@5FHi%M)X^E{io0E>4EqU \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [[ "$(uname)" == "Darwin" ]] && [[ "$HOME" == "$PWD" ]]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 0000000..e95643d --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/settings.gradle b/settings.gradle new file mode 100644 index 0000000..ef50653 --- /dev/null +++ b/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'elasticsearch-extras-client' diff --git a/src/integration-test/java/org/elasticsearch/node/MockNode.java b/src/integration-test/java/org/elasticsearch/node/MockNode.java new file mode 100644 index 0000000..b0c02eb --- /dev/null +++ b/src/integration-test/java/org/elasticsearch/node/MockNode.java @@ -0,0 +1,38 @@ +package org.elasticsearch.node; + +import org.elasticsearch.Version; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.Collection; + +/** + * + */ +public class MockNode extends Node { + + public MockNode() { + super(Settings.EMPTY); + } + + public MockNode(Settings settings) { + super(settings); + } + + public MockNode(Settings settings, Collection> classpathPlugins) { + super(InternalSettingsPreparer.prepareEnvironment(settings, null), Version.CURRENT, classpathPlugins); + } + + public MockNode(Settings settings, Class classpathPlugin) { + this(settings, list(classpathPlugin)); + } + + private static Collection> list(Class classpathPlugin) { + Collection> list = new ArrayList<>(); + list.add(classpathPlugin); + return list; + } + +} diff --git a/src/integration-test/java/org/elasticsearch/node/package-info.java b/src/integration-test/java/org/elasticsearch/node/package-info.java new file mode 100644 index 0000000..f299cbc --- /dev/null +++ b/src/integration-test/java/org/elasticsearch/node/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes to support Elasticsearch node creation. + */ +package org.elasticsearch.node; diff --git a/src/integration-test/java/org/xbib/elasticsearch/AliasTest.java b/src/integration-test/java/org/xbib/elasticsearch/AliasTest.java new file mode 100644 index 0000000..545e9e8 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/AliasTest.java @@ -0,0 +1,92 @@ +package org.xbib.elasticsearch; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.cluster.metadata.AliasAction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.junit.Test; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * + */ +public class AliasTest extends NodeTestUtils { + + private static final ESLogger logger = ESLoggerFactory.getLogger(AliasTest.class.getName()); + + @Test + public void testAlias() throws IOException { + CreateIndexRequest indexRequest = new CreateIndexRequest("test"); + client("1").admin().indices().create(indexRequest).actionGet(); + // put alias + IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); + String[] indices = new String[]{"test"}; + String[] aliases = new String[]{"test_alias"}; + IndicesAliasesRequest.AliasActions aliasAction = + new IndicesAliasesRequest.AliasActions(AliasAction.Type.ADD, indices, aliases); + indicesAliasesRequest.addAliasAction(aliasAction); + client("1").admin().indices().aliases(indicesAliasesRequest).actionGet(); + // get alias + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(Strings.EMPTY_ARRAY); + long t0 = System.nanoTime(); + GetAliasesResponse getAliasesResponse = client("1").admin().indices().getAliases(getAliasesRequest).actionGet(); + long t1 = (System.nanoTime() - t0) / 1000000; + logger.info("{} time(ms) = {}", getAliasesResponse.getAliases(), t1); + assertTrue(t1 >= 0); + } + + @Test + public void testMostRecentIndex() throws IOException { + String alias = "test"; + CreateIndexRequest indexRequest = new CreateIndexRequest("test20160101"); + client("1").admin().indices().create(indexRequest).actionGet(); + indexRequest = new CreateIndexRequest("test20160102"); + client("1").admin().indices().create(indexRequest).actionGet(); + indexRequest = new CreateIndexRequest("test20160103"); + client("1").admin().indices().create(indexRequest).actionGet(); + IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); + String[] indices = new String[]{"test20160101", "test20160102", "test20160103"}; + String[] aliases = new String[]{alias}; + IndicesAliasesRequest.AliasActions aliasAction = + new IndicesAliasesRequest.AliasActions(AliasAction.Type.ADD, indices, aliases); + indicesAliasesRequest.addAliasAction(aliasAction); + client("1").admin().indices().aliases(indicesAliasesRequest).actionGet(); + + GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client("1"), + GetAliasesAction.INSTANCE); + GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet(); + Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); + Set result = new TreeSet<>(Collections.reverseOrder()); + for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) { + Matcher m = pattern.matcher(indexName.value); + if (m.matches()) { + if (alias.equals(m.group(1))) { + result.add(indexName.value); + } + } + } + Iterator it = result.iterator(); + assertEquals("test20160103", it.next()); + assertEquals("test20160102", it.next()); + assertEquals("test20160101", it.next()); + logger.info("result={}", result); + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/NodeTestUtils.java b/src/integration-test/java/org/xbib/elasticsearch/NodeTestUtils.java new file mode 100644 index 0000000..1815326 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/NodeTestUtils.java @@ -0,0 +1,204 @@ +package org.xbib.elasticsearch; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; +import org.junit.After; +import org.junit.Before; +import org.xbib.elasticsearch.extras.client.NetworkUtils; + +import java.io.IOException; +import java.nio.file.*; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; + +/** + * + */ +public class NodeTestUtils { + + protected static final ESLogger logger = ESLoggerFactory.getLogger("test"); + private static Random random = new Random(); + private static char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray(); + private Map nodes = new HashMap<>(); + private Map clients = new HashMap<>(); + private AtomicInteger counter = new AtomicInteger(); + private String cluster; + private String host; + private int port; + + private static void deleteFiles() throws IOException { + Path directory = Paths.get(System.getProperty("path.home") + "/data"); + Files.walkFileTree(directory, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + + }); + + } + + @Before + public void startNodes() { + try { + logger.info("starting"); + setClusterName(); + startNode("1"); + findNodeAddress(); + try { + ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN).timeout(TimeValue.timeValueSeconds(30))).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + throw new IOException("cluster state is " + healthResponse.getStatus().name() + + ", from here on, everything will fail!"); + } + } catch (ElasticsearchTimeoutException e) { + throw new IOException("timeout, cluster does not respond to health request, cowardly refusing to continue with operations"); + } + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @After + public void stopNodes() { + try { + closeNodes(); + } catch (Exception e) { + logger.error("can not close nodes", e); + } finally { + try { + deleteFiles(); + logger.info("data files wiped"); + Thread.sleep(2000L); + } catch (IOException e) { + logger.error(e.getMessage(), e); + } catch (InterruptedException e) { + // ignore + } + } + } + + protected void setClusterName() { + this.cluster = "test-helper-cluster-" + + NetworkUtils.getLocalAddress().getHostName() + + "-" + System.getProperty("user.name") + + "-" + counter.incrementAndGet(); + } + + protected String getClusterName() { + return cluster; + } + + protected Settings getSettings() { + return settingsBuilder() + .put("host", host) + .put("port", port) + .put("cluster.name", cluster) + .put("path.home", getHome()) + .build(); + } + + protected Settings getNodeSettings() { + return settingsBuilder() + .put("cluster.name", cluster) + .put("cluster.routing.schedule", "50ms") + .put("cluster.routing.allocation.disk.threshold_enabled", false) + .put("discovery.zen.multicast.enabled", true) + .put("discovery.zen.multicast.ping_timeout", "5s") + .put("http.enabled", true) + .put("threadpool.bulk.size", Runtime.getRuntime().availableProcessors()) + .put("threadpool.bulk.queue_size", 16 * Runtime.getRuntime().availableProcessors()) // default is 50, too low + .put("index.number_of_replicas", 0) + .put("path.home", getHome()) + .build(); + } + + protected String getHome() { + return System.getProperty("path.home"); + } + + public void startNode(String id) throws IOException { + buildNode(id).start(); + } + + public AbstractClient client(String id) { + return clients.get(id); + } + + private void closeNodes() throws IOException { + logger.info("closing all clients"); + for (AbstractClient client : clients.values()) { + client.close(); + } + clients.clear(); + logger.info("closing all nodes"); + for (Node node : nodes.values()) { + if (node != null) { + node.close(); + } + } + nodes.clear(); + logger.info("all nodes closed"); + } + + protected void findNodeAddress() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true); + NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + Object obj = response.iterator().next().getTransport().getAddress() + .publishAddress(); + if (obj instanceof InetSocketTransportAddress) { + InetSocketTransportAddress address = (InetSocketTransportAddress) obj; + host = address.address().getHostName(); + port = address.address().getPort(); + } + } + + private Node buildNode(String id) throws IOException { + Settings nodeSettings = settingsBuilder() + .put(getNodeSettings()) + .put("name", id) + .build(); + logger.info("settings={}", nodeSettings.getAsMap()); + Node node = new MockNode(nodeSettings); + AbstractClient client = (AbstractClient) node.client(); + nodes.put(id, node); + clients.put(id, client); + logger.info("clients={}", clients); + return node; + } + + protected String randomString(int len) { + final char[] buf = new char[len]; + final int n = numbersAndLetters.length - 1; + for (int i = 0; i < buf.length; i++) { + buf[i] = numbersAndLetters[random.nextInt(n)]; + } + return new String(buf); + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/SearchTest.java b/src/integration-test/java/org/xbib/elasticsearch/SearchTest.java new file mode 100644 index 0000000..8146b19 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/SearchTest.java @@ -0,0 +1,66 @@ +package org.xbib.elasticsearch; + +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.junit.Test; + +import static org.elasticsearch.client.Requests.indexRequest; +import static org.elasticsearch.client.Requests.refreshRequest; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * + */ +public class SearchTest extends NodeTestUtils { + + @Test + public void testSearch() throws Exception { + Client client = client("1"); + long t0 = System.currentTimeMillis(); + BulkRequestBuilder builder = new BulkRequestBuilder(client, BulkAction.INSTANCE); + for (int i = 0; i < 1000; i++) { + builder.add(indexRequest() + .index("pages").type("row") + .source(jsonBuilder() + .startObject() + .field("user1", "kimchy") + .field("user2", "kimchy") + .field("user3", "kimchy") + .field("user4", "kimchy") + .field("user5", "kimchy") + .field("user6", "kimchy") + .field("user7", "kimchy") + .field("user8", "kimchy") + .field("user9", "kimchy") + .field("rowcount", i) + .field("rs", 1234))); + } + client.bulk(builder.request()).actionGet(); + + client.admin().indices().refresh(refreshRequest()).actionGet(); + + long t1 = System.currentTimeMillis(); + logger.info("t1-t0 = {}", t1 - t0); + + for (int i = 0; i < 100; i++) { + t1 = System.currentTimeMillis(); + QueryBuilder queryStringBuilder = + QueryBuilders.queryStringQuery("rs:" + 1234); + SearchRequestBuilder requestBuilder = client.prepareSearch() + .setIndices("pages") + .setTypes("row") + .setQuery(queryStringBuilder) + .addSort("rowcount", SortOrder.DESC) + .setFrom(i * 10).setSize(10); + SearchResponse response = requestBuilder.execute().actionGet(); + long t2 = System.currentTimeMillis(); + logger.info("t2-t1 = {}", t2 - t1); + } + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/SimpleTest.java b/src/integration-test/java/org/xbib/elasticsearch/SimpleTest.java new file mode 100644 index 0000000..7a25dce --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/SimpleTest.java @@ -0,0 +1,59 @@ +package org.xbib.elasticsearch; + +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.common.settings.Settings; +import org.junit.Test; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.junit.Assert.assertEquals; + +/** + * + */ +public class SimpleTest extends NodeTestUtils { + + protected Settings getNodeSettings() { + return settingsBuilder() + .put("path.home", System.getProperty("path.home")) + .put("index.analysis.analyzer.default.filter.0", "lowercase") + .put("index.analysis.analyzer.default.filter.1", "trim") + .put("index.analysis.analyzer.default.tokenizer", "keyword") + .build(); + } + + @Test + public void test() throws Exception { + try { + DeleteIndexRequestBuilder deleteIndexRequestBuilder = + new DeleteIndexRequestBuilder(client("1"), DeleteIndexAction.INSTANCE, "test"); + deleteIndexRequestBuilder.execute().actionGet(); + } catch (Exception e) { + // ignore + } + IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client("1"), IndexAction.INSTANCE); + indexRequestBuilder + .setIndex("test") + .setType("test") + .setId("1") + .setSource(jsonBuilder().startObject().field("field", + "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8").endObject()) + .setRefresh(true) + .execute() + .actionGet(); + String doc = client("1").prepareSearch("test") + .setTypes("test") + .setQuery(matchQuery("field", + "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8")) + .execute() + .actionGet() + .getHits().getAt(0).getSourceAsString(); + + assertEquals(doc, + "{\"field\":\"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8\"}"); + } +} \ No newline at end of file diff --git a/src/integration-test/java/org/xbib/elasticsearch/WildcardTest.java b/src/integration-test/java/org/xbib/elasticsearch/WildcardTest.java new file mode 100644 index 0000000..fd9ce16 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/WildcardTest.java @@ -0,0 +1,70 @@ +package org.xbib.elasticsearch; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilder; +import org.junit.Test; + +import java.io.IOException; + +import static org.elasticsearch.client.Requests.indexRequest; +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; + +/** + * + */ +public class WildcardTest extends NodeTestUtils { + + protected Settings getNodeSettings() { + return settingsBuilder() + .put("cluster.name", getClusterName()) + .put("cluster.routing.allocation.disk.threshold_enabled", false) + .put("discovery.zen.multicast.enabled", false) + .put("http.enabled", false) + .put("path.home", System.getProperty("path.home")) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + } + + @Test + public void testWildcard() throws Exception { + index(client("1"), "1", "010"); + index(client("1"), "2", "0*0"); + // exact + validateCount(client("1"), queryStringQuery("010").defaultField("field"), 1); + validateCount(client("1"), queryStringQuery("0\\*0").defaultField("field"), 1); + // pattern + validateCount(client("1"), queryStringQuery("0*0").defaultField("field"), 1); // 2? + validateCount(client("1"), queryStringQuery("0?0").defaultField("field"), 1); // 2? + validateCount(client("1"), queryStringQuery("0**0").defaultField("field"), 1); // 2? + validateCount(client("1"), queryStringQuery("0??0").defaultField("field"), 0); + validateCount(client("1"), queryStringQuery("*10").defaultField("field"), 1); + validateCount(client("1"), queryStringQuery("*1*").defaultField("field"), 1); + validateCount(client("1"), queryStringQuery("*\\*0").defaultField("field"), 0); // 1? + validateCount(client("1"), queryStringQuery("*\\**").defaultField("field"), 0); // 1? + } + + private void index(Client client, String id, String fieldValue) throws IOException { + client.index(indexRequest() + .index("index").type("type").id(id) + .source(jsonBuilder().startObject().field("field", fieldValue).endObject()) + .refresh(true)).actionGet(); + } + + private long count(Client client, QueryBuilder queryBuilder) { + return client.prepareSearch("index").setTypes("type") + .setQuery(queryBuilder) + .execute().actionGet().getHits().getTotalHits(); + } + + private void validateCount(Client client, QueryBuilder queryBuilder, long expectedHits) { + final long actualHits = count(client, queryBuilder); + if (actualHits != expectedHits) { + throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits); + } + } + +} \ No newline at end of file diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/NetworkTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/NetworkTest.java new file mode 100644 index 0000000..b9e7a87 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/NetworkTest.java @@ -0,0 +1,44 @@ +package org.xbib.elasticsearch.extras.client; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.junit.Test; + +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.util.Collections; +import java.util.Enumeration; + +/** + * + */ +public class NetworkTest { + + private static final Logger logger = LogManager.getLogger(NetworkTest.class); + + @Test + public void testNetwork() throws Exception { + Enumeration nets = NetworkInterface.getNetworkInterfaces(); + for (NetworkInterface netint : Collections.list(nets)) { + System.out.println("checking network interface = " + netint.getName()); + Enumeration inetAddresses = netint.getInetAddresses(); + for (InetAddress addr : Collections.list(inetAddresses)) { + logger.info("found address = " + addr.getHostAddress() + + " name = " + addr.getHostName() + + " canicalhostname = " + addr.getCanonicalHostName() + + " loopback = " + addr.isLoopbackAddress() + + " sitelocal = " + addr.isSiteLocalAddress() + + " linklocal = " + addr.isLinkLocalAddress() + + " anylocal = " + addr.isAnyLocalAddress() + + " multicast = " + addr.isMulticastAddress() + + " mcglobal = " + addr.isMCGlobal() + + " mclinklocal = " + addr.isMCLinkLocal() + + " mcnodelocal = " + addr.isMCNodeLocal() + + " mcorglocal = " + addr.isMCOrgLocal() + + " mcsitelocal = " + addr.isMCSiteLocal() + + " mcsitelocal = " + addr.isReachable(1000)); + } + } + + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClientTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClientTest.java new file mode 100644 index 0000000..c7f7421 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClientTest.java @@ -0,0 +1,208 @@ +package org.xbib.elasticsearch.extras.client.node; + +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Before; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +/** + * + */ +public class BulkNodeClientTest extends NodeTestUtils { + + private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeClientTest.class.getSimpleName()); + + private static final Long MAX_ACTIONS = 1000L; + + private static final Long NUM_ACTIONS = 1234L; + + @Before + public void startNodes() { + try { + super.startNodes(); + startNode("2"); + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @Test + public void testNewIndexNodeClient() throws Exception { + final BulkNodeClient client = ClientBuilder.builder() + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + client.newIndex("test"); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + @Test + public void testMappingNodeClient() throws Exception { + final BulkNodeClient client = ClientBuilder.builder() + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + XContentBuilder builder = jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("location") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .endObject(); + client.mapping("test", builder.string()); + client.newIndex("test"); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); + GetMappingsResponse getMappingsResponse = + client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + logger.info("mappings={}", getMappingsResponse.getMappings()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + @Test + public void testSingleDocNodeClient() { + final BulkNodeClient client = ClientBuilder.builder() + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(30)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + try { + client.newIndex("test"); + client.index("test", "test", "1", "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + } catch (InterruptedException e) { + // ignore + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } catch (ExecutionException e) { + logger.error(e.getMessage(), e); + } finally { + assertEquals(1, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + @Test + public void testRandomDocsNodeClient() throws Exception { + long numactions = NUM_ACTIONS; + final BulkNodeClient client = ClientBuilder.builder() + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + assertEquals(numactions, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + @Test + public void testThreadedRandomDocsNodeClient() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + Long maxactions = MAX_ACTIONS; + final Long maxloop = NUM_ACTIONS; + logger.info("NodeClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); + final BulkNodeClient client = ClientBuilder.builder() + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + try { + client.newIndex("test") + .startBulk("test", -1, 1000); + ThreadPoolExecutor pool = EsExecutors.newFixed("bulk-nodeclient-test", maxthreads, 30, + EsExecutors.daemonThreadFactory("bulk-nodeclient-test")); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + pool.execute(new Runnable() { + public void run() { + for (int i = 0; i < maxloop; i++) { + client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + latch.countDown(); + } + }); + } + logger.info("waiting for max 30 seconds..."); + latch.await(30, TimeUnit.SECONDS); + logger.info("flush..."); + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + logger.info("got all responses, thread pool shutdown..."); + pool.shutdown(); + logger.info("pool is shut down"); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.stopBulk("test"); + assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setQuery(QueryBuilders.matchAllQuery()).setSize(0); + assertEquals(maxthreads * maxloop, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.shutdown(); + } + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClusterBlockTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClusterBlockTest.java new file mode 100644 index 0000000..58e2b8e --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClusterBlockTest.java @@ -0,0 +1,49 @@ +package org.xbib.elasticsearch.extras.client.node; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.junit.Before; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * + */ +public class BulkNodeClusterBlockTest extends NodeTestUtils { + + @Before + public void startNodes() { + try { + setClusterName(); + startNode("1"); + findNodeAddress(); + // do not wait for green health state + logger.info("ready"); + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + protected Settings getNodeSettings() { + return Settings.settingsBuilder() + .put(super.getNodeSettings()) + .put("discovery.zen.minimum_master_nodes", 2) // block until we have two nodes + .build(); + } + + @Test(expected = ClusterBlockException.class) + public void testClusterBlock() throws Exception { + BulkRequestBuilder brb = client("1").prepareBulk(); + XContentBuilder builder = jsonBuilder().startObject().field("field1", "value1").endObject(); + String jsonString = builder.string(); + IndexRequestBuilder irb = client("1").prepareIndex("test", "test", "1").setSource(jsonString); + brb.add(irb); + brb.execute().actionGet(); + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeDuplicateIDTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeDuplicateIDTest.java new file mode 100644 index 0000000..98c6a70 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeDuplicateIDTest.java @@ -0,0 +1,60 @@ +package org.xbib.elasticsearch.extras.client.node; + +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.unit.TimeValue; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.*; + +public class BulkNodeDuplicateIDTest extends NodeTestUtils { + + private final static ESLogger logger = ESLoggerFactory.getLogger(BulkNodeDuplicateIDTest.class.getSimpleName()); + + private final static Long MAX_ACTIONS = 1000L; + + private final static Long NUM_ACTIONS = 12345L; + + @Test + public void testDuplicateDocIDs() throws Exception { + long numactions = NUM_ACTIONS; + final BulkNodeClient client = ClientBuilder.builder() + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < NUM_ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + assertEquals(numactions, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeIndexAliasTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeIndexAliasTest.java new file mode 100644 index 0000000..d4b19b0 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeIndexAliasTest.java @@ -0,0 +1,77 @@ +package org.xbib.elasticsearch.extras.client.node; + +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.IndexAliasAdder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertFalse; + +/** + * + */ +public class BulkNodeIndexAliasTest extends NodeTestUtils { + + private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeIndexAliasTest.class.getSimpleName()); + + @Test + public void testIndexAlias() throws Exception { + final BulkNodeClient client = ClientBuilder.builder() + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + try { + client.newIndex("test1234"); + for (int i = 0; i < 1; i++) { + client.index("test1234", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.refreshIndex("test1234"); + + List simpleAliases = Arrays.asList("a", "b", "c"); + client.switchAliases("test", "test1234", simpleAliases); + + client.newIndex("test5678"); + for (int i = 0; i < 1; i++) { + client.index("test5678", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.refreshIndex("test5678"); + + simpleAliases = Arrays.asList("d", "e", "f"); + client.switchAliases("test", "test5678", simpleAliases, new IndexAliasAdder() { + @Override + public void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias) { + builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)); + } + }); + Map aliases = client.getIndexFilters("test5678"); + logger.info("aliases of index test5678 = {}", aliases); + + aliases = client.getAliasFilters("test"); + logger.info("aliases of alias test = {}", aliases); + + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.waitForResponses(TimeValue.timeValueSeconds(30)); + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeReplicaTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeReplicaTest.java new file mode 100644 index 0000000..93141e1 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeReplicaTest.java @@ -0,0 +1,105 @@ +package org.xbib.elasticsearch.extras.client.node; + +import org.elasticsearch.action.admin.indices.stats.*; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.indexing.IndexingStats; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import java.util.Map; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class BulkNodeReplicaTest extends NodeTestUtils { + + private final static ESLogger logger = ESLoggerFactory.getLogger(BulkNodeReplicaTest.class.getSimpleName()); + + @Test + public void testReplicaLevel() throws Exception { + + // we need nodes for replica levels + startNode("2"); + startNode("3"); + startNode("4"); + + Settings settingsTest1 = Settings.settingsBuilder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 3) + .build(); + + Settings settingsTest2 = Settings.settingsBuilder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 1) + .build(); + + final BulkNodeClient client = ClientBuilder.builder() + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + + try { + client.newIndex("test1", settingsTest1, null) + .newIndex("test2", settingsTest2, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 1234; i++) { + client.index("test1", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + for (int i = 0; i < 1234; i++) { + client.index("test2", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(60)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("refreshing"); + client.refreshIndex("test1"); + client.refreshIndex("test2"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test1", "test2") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("query total hits={}", hits); + assertEquals(2468, hits); + IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), IndicesStatsAction.INSTANCE) + .all(); + IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); + for (Map.Entry m : response.getIndices().entrySet()) { + IndexStats indexStats = m.getValue(); + CommonStats commonStats = indexStats.getTotal(); + IndexingStats indexingStats = commonStats.getIndexing(); + IndexingStats.Stats stats = indexingStats.getTotal(); + logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); + for (Map.Entry me : indexStats.getIndexShards().entrySet()) { + IndexShardStats indexShardStats = me.getValue(); + CommonStats commonShardStats = indexShardStats.getTotal(); + logger.info("shard {} count = {}", me.getKey(), + commonShardStats.getIndexing().getTotal().getIndexCount()); + } + } + try { + client.deleteIndex("test1") + .deleteIndex("test2"); + } catch (Exception e) { + logger.error("delete index failed, ignored. Reason:", e); + } + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeUpdateReplicaLevelTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeUpdateReplicaLevelTest.java new file mode 100644 index 0000000..b1c88fe --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeUpdateReplicaLevelTest.java @@ -0,0 +1,67 @@ +package org.xbib.elasticsearch.extras.client.node; + +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +/** + * + */ +public class BulkNodeUpdateReplicaLevelTest extends NodeTestUtils { + + private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeUpdateReplicaLevelTest.class.getSimpleName()); + + @Test + public void testUpdateReplicaLevel() throws Exception { + + int numberOfShards = 2; + int replicaLevel = 3; + + // we need 3 nodes for replica level 3 + startNode("2"); + startNode("3"); + + int shardsAfterReplica; + + Settings settings = Settings.settingsBuilder() + .put("index.number_of_shards", numberOfShards) + .put("index.number_of_replicas", 0) + .build(); + + final BulkNodeClient client = ClientBuilder.builder() + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkNodeClient(client("1")); + + try { + client.newIndex("replicatest", settings, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 12345; i++) { + client.index("replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); + assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/package-info.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/package-info.java new file mode 100644 index 0000000..873ebae --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for testing Elasticsearch node client extras. + */ +package org.xbib.elasticsearch.extras.client.node; diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/package-info.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/package-info.java new file mode 100644 index 0000000..2bfc45c --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes to test Elasticsearch clients. + */ +package org.xbib.elasticsearch.extras.client; diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClientTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClientTest.java new file mode 100644 index 0000000..0a35742 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClientTest.java @@ -0,0 +1,201 @@ +package org.xbib.elasticsearch.extras.client.transport; + +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Before; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +/** + * + */ +public class BulkTransportClientTest extends NodeTestUtils { + + private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportClientTest.class.getSimpleName()); + + private static final Long MAX_ACTIONS = 1000L; + + private static final Long NUM_ACTIONS = 1234L; + + @Before + public void startNodes() { + try { + super.startNodes(); + startNode("2"); + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @Test + public void testBulkClient() throws IOException { + final BulkTransportClient client = ClientBuilder.builder() + .put(getSettings()) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkTransportClient(); + client.newIndex("test"); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + try { + client.deleteIndex("test") + .newIndex("test") + .deleteIndex("test"); + } catch (NoNodeAvailableException e) { + logger.error("no node available"); + } finally { + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + @Test + public void testSingleDocBulkClient() throws IOException { + final BulkTransportClient client = ClientBuilder.builder() + .put(getSettings()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkTransportClient(); + try { + client.newIndex("test"); + client.index("test", "test", "1", "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + } catch (InterruptedException e) { + // ignore + } catch (ExecutionException e) { + logger.error(e.getMessage(), e); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + assertEquals(1, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + @Test + public void testRandomDocsBulkClient() throws IOException { + long numactions = NUM_ACTIONS; + final BulkTransportClient client = ClientBuilder.builder() + .put(getSettings()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkTransportClient(); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + } catch (InterruptedException e) { + // ignore + } catch (ExecutionException e) { + logger.error(e.getMessage(), e); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + assertEquals(numactions, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + @Test + public void testThreadedRandomDocsBulkClient() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + long maxactions = MAX_ACTIONS; + final long maxloop = NUM_ACTIONS; + + Settings settingsForIndex = Settings.settingsBuilder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 1) + .build(); + + final BulkTransportClient client = ClientBuilder.builder() + .put(getSettings()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) // = disable autoflush for this test + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkTransportClient(); + try { + client.newIndex("test", settingsForIndex, null) + .startBulk("test", -1, 1000); + ThreadPoolExecutor pool = + EsExecutors.newFixed("bulkclient-test", maxthreads, 30, EsExecutors.daemonThreadFactory("bulkclient-test")); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + pool.execute(() -> { + for (int i1 = 0; i1 < maxloop; i1++) { + client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + latch.countDown(); + }); + } + logger.info("waiting for max 30 seconds..."); + latch.await(30, TimeUnit.SECONDS); + logger.info("client flush ..."); + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + logger.info("thread pool to be shut down ..."); + pool.shutdown(); + logger.info("poot shut down"); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.stopBulk("test"); + assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + // to avoid NPE at org.elasticsearch.action.search.SearchRequest.writeTo(SearchRequest.java:580) + .setIndices("_all") + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(0); + assertEquals(maxthreads * maxloop, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.shutdown(); + } + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportDuplicateIDTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportDuplicateIDTest.java new file mode 100644 index 0000000..00a4066 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportDuplicateIDTest.java @@ -0,0 +1,61 @@ +package org.xbib.elasticsearch.extras.client.transport; + +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.unit.TimeValue; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.*; + +public class BulkTransportDuplicateIDTest extends NodeTestUtils { + + private final static ESLogger logger = ESLoggerFactory.getLogger(BulkTransportDuplicateIDTest.class.getSimpleName()); + + private final static Long MAX_ACTIONS = 1000L; + + private final static Long NUM_ACTIONS = 12345L; + + @Test + public void testDuplicateDocIDs() throws Exception { + long numactions = NUM_ACTIONS; + final BulkTransportClient client = ClientBuilder.builder() + .put(getSettings()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkTransportClient(); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < NUM_ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + assertEquals(numactions, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportReplicaTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportReplicaTest.java new file mode 100644 index 0000000..119688e --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportReplicaTest.java @@ -0,0 +1,108 @@ +package org.xbib.elasticsearch.extras.client.transport; + +import org.elasticsearch.action.admin.indices.stats.*; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.indexing.IndexingStats; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import java.util.Map; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +/** + * + */ +public class BulkTransportReplicaTest extends NodeTestUtils { + + private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportReplicaTest.class.getSimpleName()); + + @Test + public void testReplicaLevel() throws Exception { + + // we need nodes for replica levels + startNode("2"); + startNode("3"); + startNode("4"); + + Settings settingsTest1 = Settings.settingsBuilder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 3) + .build(); + + Settings settingsTest2 = Settings.settingsBuilder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 1) + .build(); + + final BulkTransportClient client = ClientBuilder.builder() + .put(getSettings()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkTransportClient(); + try { + client.newIndex("test1", settingsTest1, null) + .newIndex("test2", settingsTest2, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 1234; i++) { + client.index("test1", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + for (int i = 0; i < 1234; i++) { + client.index("test2", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(60)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("refreshing"); + client.refreshIndex("test1"); + client.refreshIndex("test2"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test1", "test2") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("query total hits={}", hits); + assertEquals(2468, hits); + IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), + IndicesStatsAction.INSTANCE).all(); + IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); + for (Map.Entry m : response.getIndices().entrySet()) { + IndexStats indexStats = m.getValue(); + CommonStats commonStats = indexStats.getTotal(); + IndexingStats indexingStats = commonStats.getIndexing(); + IndexingStats.Stats stats = indexingStats.getTotal(); + logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); + for (Map.Entry me : indexStats.getIndexShards().entrySet()) { + IndexShardStats indexShardStats = me.getValue(); + CommonStats commonShardStats = indexShardStats.getTotal(); + logger.info("shard {} count = {}", me.getKey(), + commonShardStats.getIndexing().getTotal().getIndexCount()); + } + } + try { + client.deleteIndex("test1") + .deleteIndex("test2"); + } catch (Exception e) { + logger.error("delete index failed, ignored. Reason:", e); + } + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportUpdateReplicaLevelTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportUpdateReplicaLevelTest.java new file mode 100644 index 0000000..8ed2c4a --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportUpdateReplicaLevelTest.java @@ -0,0 +1,69 @@ +package org.xbib.elasticsearch.extras.client.transport; + +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.junit.Test; +import org.xbib.elasticsearch.NodeTestUtils; +import org.xbib.elasticsearch.extras.client.ClientBuilder; +import org.xbib.elasticsearch.extras.client.SimpleBulkControl; +import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +/** + * + */ +public class BulkTransportUpdateReplicaLevelTest extends NodeTestUtils { + + private static final ESLogger logger = + ESLoggerFactory.getLogger(BulkTransportUpdateReplicaLevelTest.class.getSimpleName()); + + @Test + public void testUpdateReplicaLevel() throws Exception { + + int numberOfShards = 2; + int replicaLevel = 3; + + // we need 3 nodes for replica level 3 + startNode("2"); + startNode("3"); + + int shardsAfterReplica; + + Settings settings = Settings.settingsBuilder() + .put("index.number_of_shards", numberOfShards) + .put("index.number_of_replicas", 0) + .build(); + + final BulkTransportClient client = ClientBuilder.builder() + .put(getSettings()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .toBulkTransportClient(); + + try { + client.newIndex("replicatest", settings, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 12345; i++) { + client.index("replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); + assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } + +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/package-info.java b/src/integration-test/java/org/xbib/elasticsearch/package-info.java new file mode 100644 index 0000000..2958ce1 --- /dev/null +++ b/src/integration-test/java/org/xbib/elasticsearch/package-info.java @@ -0,0 +1,4 @@ +/** + * Test classes for testing Elasticsearch. + */ +package org.xbib.elasticsearch; \ No newline at end of file diff --git a/src/integration-test/java/suites/BulkNodeTestSuite.java b/src/integration-test/java/suites/BulkNodeTestSuite.java new file mode 100644 index 0000000..caac820 --- /dev/null +++ b/src/integration-test/java/suites/BulkNodeTestSuite.java @@ -0,0 +1,23 @@ +package suites; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.xbib.elasticsearch.extras.client.node.BulkNodeClientTest; +import org.xbib.elasticsearch.extras.client.node.BulkNodeDuplicateIDTest; +import org.xbib.elasticsearch.extras.client.node.BulkNodeIndexAliasTest; +import org.xbib.elasticsearch.extras.client.node.BulkNodeReplicaTest; +import org.xbib.elasticsearch.extras.client.node.BulkNodeUpdateReplicaLevelTest; + +/** + * + */ +@RunWith(ListenerSuite.class) +@Suite.SuiteClasses({ + BulkNodeClientTest.class, + BulkNodeDuplicateIDTest.class, + BulkNodeReplicaTest.class, + BulkNodeUpdateReplicaLevelTest.class, + BulkNodeIndexAliasTest.class +}) +public class BulkNodeTestSuite { +} diff --git a/src/integration-test/java/suites/BulkTransportTestSuite.java b/src/integration-test/java/suites/BulkTransportTestSuite.java new file mode 100644 index 0000000..f429dfc --- /dev/null +++ b/src/integration-test/java/suites/BulkTransportTestSuite.java @@ -0,0 +1,22 @@ +package suites; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.xbib.elasticsearch.extras.client.transport.BulkTransportClientTest; +import org.xbib.elasticsearch.extras.client.transport.BulkTransportDuplicateIDTest; +import org.xbib.elasticsearch.extras.client.transport.BulkTransportReplicaTest; +import org.xbib.elasticsearch.extras.client.transport.BulkTransportUpdateReplicaLevelTest; + +/** + * + */ +@RunWith(ListenerSuite.class) +@Suite.SuiteClasses({ + BulkTransportClientTest.class, + BulkTransportDuplicateIDTest.class, + BulkTransportReplicaTest.class, + BulkTransportUpdateReplicaLevelTest.class +}) +public class BulkTransportTestSuite { + +} diff --git a/src/integration-test/java/suites/ListenerSuite.java b/src/integration-test/java/suites/ListenerSuite.java new file mode 100644 index 0000000..c02d371 --- /dev/null +++ b/src/integration-test/java/suites/ListenerSuite.java @@ -0,0 +1,23 @@ +package suites; + +import org.junit.runner.Runner; +import org.junit.runner.notification.RunNotifier; +import org.junit.runners.Suite; +import org.junit.runners.model.InitializationError; +import org.junit.runners.model.RunnerBuilder; + +public class ListenerSuite extends Suite { + + private final TestListener listener = new TestListener(); + + public ListenerSuite(Class klass, RunnerBuilder builder) throws InitializationError { + super(klass, builder); + } + + @Override + protected void runChild(Runner runner, RunNotifier notifier) { + notifier.addListener(listener); + runner.run(notifier); + notifier.removeListener(listener); + } +} diff --git a/src/integration-test/java/suites/MiscTestSuite.java b/src/integration-test/java/suites/MiscTestSuite.java new file mode 100644 index 0000000..ea23630 --- /dev/null +++ b/src/integration-test/java/suites/MiscTestSuite.java @@ -0,0 +1,21 @@ +package suites; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.xbib.elasticsearch.AliasTest; +import org.xbib.elasticsearch.SearchTest; +import org.xbib.elasticsearch.SimpleTest; +import org.xbib.elasticsearch.WildcardTest; + +/** + * + */ +@RunWith(ListenerSuite.class) +@Suite.SuiteClasses({ + SimpleTest.class, + AliasTest.class, + SearchTest.class, + WildcardTest.class +}) +public class MiscTestSuite { +} diff --git a/src/integration-test/java/suites/TestListener.java b/src/integration-test/java/suites/TestListener.java new file mode 100644 index 0000000..7e24527 --- /dev/null +++ b/src/integration-test/java/suites/TestListener.java @@ -0,0 +1,44 @@ +package suites; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; +import org.junit.runner.notification.RunListener; + +/** + * + */ +public class TestListener extends RunListener { + + private static final Logger logger = LogManager.getLogger("test.listener"); + + public void testRunStarted(Description description) throws java.lang.Exception { + logger.info("number of tests to execute: {}", description.testCount()); + } + + public void testRunFinished(Result result) throws java.lang.Exception { + logger.info("number of tests executed: {}", result.getRunCount()); + } + + public void testStarted(Description description) throws java.lang.Exception { + logger.info("starting execution of {} {}", + description.getClassName(), description.getMethodName()); + } + + public void testFinished(Description description) throws java.lang.Exception { + logger.info("finished execution of {} {}", + description.getClassName(), description.getMethodName()); + } + + public void testFailure(Failure failure) throws java.lang.Exception { + logger.info("failed execution of tests: {}", + failure.getMessage()); + } + + public void testIgnored(Description description) throws java.lang.Exception { + logger.info("execution of test ignored: {}", + description.getClassName(), description.getMethodName()); + } +} diff --git a/src/integration-test/resources/log4j2.xml b/src/integration-test/resources/log4j2.xml new file mode 100644 index 0000000..f71aced --- /dev/null +++ b/src/integration-test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/integration-test/resources/org/xbib/elasticsearch/extras/client/settings.json b/src/integration-test/resources/org/xbib/elasticsearch/extras/client/settings.json new file mode 100644 index 0000000..86f5118 --- /dev/null +++ b/src/integration-test/resources/org/xbib/elasticsearch/extras/client/settings.json @@ -0,0 +1,3 @@ +{ + "index.analysis.analyzer.default.type" : "keyword" +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/AbstractClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/AbstractClient.java new file mode 100644 index 0000000..bac6522 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/AbstractClient.java @@ -0,0 +1,495 @@ +package org.xbib.elasticsearch.extras.client; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.flush.FlushAction; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * + */ +public abstract class AbstractClient { + + private static final ESLogger logger = ESLoggerFactory.getLogger(AbstractClient.class.getName()); + + private Settings.Builder settingsBuilder; + + private Settings settings; + + private Map mappings = new HashMap<>(); + + public abstract ElasticsearchClient client(); + + protected abstract void createClient(Settings settings) throws IOException; + + public abstract void shutdown(); + + public Settings.Builder getSettingsBuilder() { + return settingsBuilder(); + } + + public void resetSettings() { + settingsBuilder = Settings.settingsBuilder(); + settings = null; + mappings = new HashMap<>(); + } + + public void setSettings(Settings settings) { + this.settings = settings; + } + + public void setting(String key, String value) { + if (settingsBuilder == null) { + settingsBuilder = Settings.settingsBuilder(); + } + settingsBuilder.put(key, value); + } + + public void setting(String key, Boolean value) { + if (settingsBuilder == null) { + settingsBuilder = Settings.settingsBuilder(); + } + settingsBuilder.put(key, value); + } + + public void setting(String key, Integer value) { + if (settingsBuilder == null) { + settingsBuilder = Settings.settingsBuilder(); + } + settingsBuilder.put(key, value); + } + + public void setting(InputStream in) throws IOException { + settingsBuilder = Settings.settingsBuilder().loadFromStream(".json", in); + } + + public Settings.Builder settingsBuilder() { + return settingsBuilder != null ? settingsBuilder : Settings.settingsBuilder(); + } + + public Settings settings() { + if (settings != null) { + return settings; + } + if (settingsBuilder == null) { + settingsBuilder = Settings.settingsBuilder(); + } + return settingsBuilder.build(); + } + + public void mapping(String type, String mapping) throws IOException { + mappings.put(type, mapping); + } + + public void mapping(String type, InputStream in) throws IOException { + if (type == null) { + return; + } + StringWriter sw = new StringWriter(); + Streams.copy(new InputStreamReader(in), sw); + mappings.put(type, sw.toString()); + } + + public Map mappings() { + return mappings.isEmpty() ? null : mappings; + } + + + public void updateIndexSetting(String index, String key, Object value) throws IOException { + if (client() == null) { + return; + } + if (index == null) { + throw new IOException("no index name given"); + } + if (key == null) { + throw new IOException("no key given"); + } + if (value == null) { + throw new IOException("no value given"); + } + Settings.Builder settingsBuilder = Settings.settingsBuilder(); + settingsBuilder.put(key, value.toString()); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index) + .settings(settingsBuilder); + client().execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet(); + } + + public void waitForRecovery() throws IOException { + if (client() == null) { + return; + } + client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).actionGet(); + } + + public int waitForRecovery(String index) throws IOException { + if (client() == null) { + return -1; + } + if (index == null) { + throw new IOException("unable to waitfor recovery, index not set"); + } + RecoveryResponse response = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest(index)).actionGet(); + int shards = response.getTotalShards(); + client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest(index) + .waitForActiveShards(shards)).actionGet(); + return shards; + } + + public void waitForCluster(String statusString, TimeValue timeout) + throws IOException, ElasticsearchTimeoutException { + if (client() == null) { + return; + } + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString); + ClusterHealthResponse healthResponse = + client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest() + .waitForStatus(status).timeout(timeout)).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + throw new IOException("cluster state is " + healthResponse.getStatus().name() + + " and not " + status.name() + + ", from here on, everything will fail!"); + } + } + + public String fetchClusterName() { + if (client() == null) { + return null; + } + try { + ClusterStateRequestBuilder clusterStateRequestBuilder = + new ClusterStateRequestBuilder(client(), ClusterStateAction.INSTANCE).all(); + ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); + String name = clusterStateResponse.getClusterName().value(); + int nodeCount = clusterStateResponse.getState().getNodes().size(); + return name + " (" + nodeCount + " nodes connected)"; + } catch (ElasticsearchTimeoutException e) { + return "TIMEOUT"; + } catch (NoNodeAvailableException e) { + return "DISCONNECTED"; + } catch (Throwable t) { + return "[" + t.getMessage() + "]"; + } + } + + public String healthColor() { + if (client() == null) { + return null; + } + try { + ClusterHealthResponse healthResponse = + client().execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().timeout(TimeValue.timeValueSeconds(30))).actionGet(); + ClusterHealthStatus status = healthResponse.getStatus(); + return status.name(); + } catch (ElasticsearchTimeoutException e) { + return "TIMEOUT"; + } catch (NoNodeAvailableException e) { + return "DISCONNECTED"; + } catch (Throwable t) { + return "[" + t.getMessage() + "]"; + } + } + + public int updateReplicaLevel(String index, int level) throws IOException { + waitForCluster("YELLOW", TimeValue.timeValueSeconds(30)); + updateIndexSetting(index, "number_of_replicas", level); + return waitForRecovery(index); + } + + public void flushIndex(String index) { + if (client() == null) { + return; + } + if (index != null) { + client().execute(FlushAction.INSTANCE, new FlushRequest(index)).actionGet(); + } + } + + public void refreshIndex(String index) { + if (client() == null) { + return; + } + if (index != null) { + client().execute(RefreshAction.INSTANCE, new RefreshRequest(index)).actionGet(); + } + } + + public void putMapping(String index) { + if (client() == null) { + return; + } + if (!mappings().isEmpty()) { + for (Map.Entry me : mappings().entrySet()) { + client().execute(PutMappingAction.INSTANCE, + new PutMappingRequest(index).type(me.getKey()).source(me.getValue())).actionGet(); + } + } + } + + public String resolveAlias(String alias) { + if (client() == null) { + return alias; + } + GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); + GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet(); + if (!getAliasesResponse.getAliases().isEmpty()) { + return getAliasesResponse.getAliases().keys().iterator().next().value; + } + return alias; + } + + public String resolveMostRecentIndex(String alias) { + if (client() == null) { + return alias; + } + if (alias == null) { + return null; + } + GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); + GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet(); + Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); + Set indices = new TreeSet<>(Collections.reverseOrder()); + for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) { + Matcher m = pattern.matcher(indexName.value); + if (m.matches()) { + if (alias.equals(m.group(1))) { + indices.add(indexName.value); + } + } + } + return indices.isEmpty() ? alias : indices.iterator().next(); + } + + public Map getAliasFilters(String alias) { + GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); + return getFilters(getAliasesRequestBuilder.setIndices(resolveAlias(alias)).execute().actionGet()); + } + + public Map getIndexFilters(String index) { + GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); + return getFilters(getAliasesRequestBuilder.setIndices(index).execute().actionGet()); + } + + private Map getFilters(GetAliasesResponse getAliasesResponse) { + Map result = new HashMap<>(); + for (ObjectObjectCursor> object : getAliasesResponse.getAliases()) { + List aliasMetaDataList = object.value; + for (AliasMetaData aliasMetaData : aliasMetaDataList) { + if (aliasMetaData.filteringRequired()) { + result.put(aliasMetaData.alias(), new String(aliasMetaData.getFilter().uncompressed())); + } else { + result.put(aliasMetaData.alias(), null); + } + } + } + return result; + } + + public void switchAliases(String index, String concreteIndex, List extraAliases) { + switchAliases(index, concreteIndex, extraAliases, null); + } + + public void switchAliases(String index, String concreteIndex, + List extraAliases, IndexAliasAdder adder) { + if (client() == null) { + return; + } + if (index.equals(concreteIndex)) { + return; + } + // two situations: 1. there is a new alias 2. there is already an old index with the alias + String oldIndex = resolveAlias(index); + final Map oldFilterMap = oldIndex.equals(index) ? null : getIndexFilters(oldIndex); + final List newAliases = new LinkedList<>(); + final List switchAliases = new LinkedList<>(); + IndicesAliasesRequestBuilder requestBuilder = new IndicesAliasesRequestBuilder(client(), IndicesAliasesAction.INSTANCE); + if (oldFilterMap == null || !oldFilterMap.containsKey(index)) { + // never apply a filter for trunk index name + requestBuilder.addAlias(concreteIndex, index); + newAliases.add(index); + } + // switch existing aliases + if (oldFilterMap != null) { + for (Map.Entry entry : oldFilterMap.entrySet()) { + String alias = entry.getKey(); + String filter = entry.getValue(); + requestBuilder.removeAlias(oldIndex, alias); + if (filter != null) { + requestBuilder.addAlias(concreteIndex, alias, filter); + } else { + requestBuilder.addAlias(concreteIndex, alias); + } + switchAliases.add(alias); + } + } + // a list of aliases that should be added, check if new or old + if (extraAliases != null) { + for (String extraAlias : extraAliases) { + if (oldFilterMap == null || !oldFilterMap.containsKey(extraAlias)) { + // index alias adder only active on extra aliases, and if alias is new + if (adder != null) { + adder.addIndexAlias(requestBuilder, concreteIndex, extraAlias); + } else { + requestBuilder.addAlias(concreteIndex, extraAlias); + } + newAliases.add(extraAlias); + } else { + String filter = oldFilterMap.get(extraAlias); + requestBuilder.removeAlias(oldIndex, extraAlias); + if (filter != null) { + requestBuilder.addAlias(concreteIndex, extraAlias, filter); + } else { + requestBuilder.addAlias(concreteIndex, extraAlias); + } + switchAliases.add(extraAlias); + } + } + } + if (!newAliases.isEmpty() || !switchAliases.isEmpty()) { + logger.info("new aliases = {}, switch aliases = {}", newAliases, switchAliases); + requestBuilder.execute().actionGet(); + } + } + + public void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep) { + if (client() == null) { + return; + } + if (index.equals(concreteIndex)) { + return; + } + GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client(), GetIndexAction.INSTANCE); + GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet(); + Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); + Set indices = new TreeSet<>(); + logger.info("{} indices", getIndexResponse.getIndices().length); + for (String s : getIndexResponse.getIndices()) { + Matcher m = pattern.matcher(s); + if (m.matches()) { + if (index.equals(m.group(1)) && !s.equals(concreteIndex)) { + indices.add(s); + } + } + } + if (indices.isEmpty()) { + logger.info("no indices found, retention policy skipped"); + return; + } + if (mintokeep > 0 && indices.size() <= mintokeep) { + logger.info("{} indices found, not enough for retention policy ({}), skipped", + indices.size(), mintokeep); + return; + } else { + logger.info("candidates for deletion = {}", indices); + } + List indicesToDelete = new ArrayList<>(); + // our index + Matcher m1 = pattern.matcher(concreteIndex); + if (m1.matches()) { + Integer i1 = Integer.parseInt(m1.group(2)); + for (String s : indices) { + Matcher m2 = pattern.matcher(s); + if (m2.matches()) { + Integer i2 = Integer.parseInt(m2.group(2)); + int kept = indices.size() - indicesToDelete.size(); + if ((timestampdiff == 0 || (timestampdiff > 0 && i1 - i2 > timestampdiff)) && mintokeep <= kept) { + indicesToDelete.add(s); + } + } + } + } + logger.info("indices to delete = {}", indicesToDelete); + if (indicesToDelete.isEmpty()) { + logger.info("not enough indices found to delete, retention policy complete"); + return; + } + String[] s = indicesToDelete.toArray(new String[indicesToDelete.size()]); + DeleteIndexRequestBuilder requestBuilder = new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, s); + DeleteIndexResponse response = requestBuilder.execute().actionGet(); + if (!response.isAcknowledged()) { + logger.warn("retention delete index operation was not acknowledged"); + } + } + + public Long mostRecentDocument(String index) { + if (client() == null) { + return null; + } + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client(), SearchAction.INSTANCE); + SortBuilder sort = SortBuilders.fieldSort("_timestamp").order(SortOrder.DESC); + SearchResponse searchResponse = searchRequestBuilder.setIndices(index) + .addField("_timestamp") + .setSize(1) + .addSort(sort) + .execute().actionGet(); + if (searchResponse.getHits().getHits().length == 1) { + SearchHit hit = searchResponse.getHits().getHits()[0]; + if (hit.getFields().get("_timestamp") != null) { + return hit.getFields().get("_timestamp").getValue(); + } else { + return 0L; + } + } + return null; + } + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/BulkControl.java b/src/main/java/org/xbib/elasticsearch/extras/client/BulkControl.java new file mode 100644 index 0000000..910f2f2 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/BulkControl.java @@ -0,0 +1,22 @@ +package org.xbib.elasticsearch.extras.client; + +import java.util.Map; +import java.util.Set; + +/** + */ +public interface BulkControl { + + void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval); + + boolean isBulk(String indexName); + + void finishBulk(String indexName); + + Set indices(); + + Map getStartBulkRefreshIntervals(); + + Map getStopBulkRefreshIntervals(); + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/BulkMetric.java b/src/main/java/org/xbib/elasticsearch/extras/client/BulkMetric.java new file mode 100644 index 0000000..a45e9c2 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/BulkMetric.java @@ -0,0 +1,31 @@ +package org.xbib.elasticsearch.extras.client; + +import org.xbib.metrics.Count; +import org.xbib.metrics.Metered; + +/** + * + */ +public interface BulkMetric { + + Metered getTotalIngest(); + + Count getTotalIngestSizeInBytes(); + + Count getCurrentIngest(); + + Count getCurrentIngestNumDocs(); + + Count getSubmitted(); + + Count getSucceeded(); + + Count getFailed(); + + void start(); + + void stop(); + + long elapsed(); + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/BulkProcessor.java b/src/main/java/org/xbib/elasticsearch/extras/client/BulkProcessor.java new file mode 100644 index 0000000..223c27e --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/BulkProcessor.java @@ -0,0 +1,473 @@ +package org.xbib.elasticsearch.extras.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.FutureUtils; + +import java.io.Closeable; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +/** + * A bulk processor is a thread safe bulk processing class, allowing to easily set when to "flush" a new bulk request + * (either based on number of actions, based on the size, or time), and to easily control the number of concurrent bulk + * requests allowed to be executed in parallel. + * In order to create a new bulk processor, use the {@link Builder}. + */ +public class BulkProcessor implements Closeable { + + private final int bulkActions; + + private final long bulkSize; + + private final ScheduledThreadPoolExecutor scheduler; + + private final ScheduledFuture scheduledFuture; + + private final AtomicLong executionIdGen = new AtomicLong(); + + private final BulkRequestHandler bulkRequestHandler; + + private BulkRequest bulkRequest; + + private volatile boolean closed = false; + + private BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, + int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { + this.bulkActions = bulkActions; + this.bulkSize = bulkSize.bytes(); + + this.bulkRequest = new BulkRequest(); + this.bulkRequestHandler = concurrentRequests == 0 ? + new SyncBulkRequestHandler(client, listener) : + new AsyncBulkRequestHandler(client, listener, concurrentRequests); + + if (flushInterval != null) { + this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, + EsExecutors.daemonThreadFactory(client.settings(), + name != null ? "[" + name + "]" : "" + "bulk_processor")); + this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); + this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); + this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), + flushInterval.millis(), TimeUnit.MILLISECONDS); + } else { + this.scheduler = null; + this.scheduledFuture = null; + } + } + + public static Builder builder(Client client, Listener listener) { + if (client == null) { + throw new NullPointerException("The client you specified while building a BulkProcessor is null"); + } + return new Builder(client, listener); + } + + /** + * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed. + */ + @Override + public void close() { + try { + awaitClose(0, TimeUnit.NANOSECONDS); + } catch (InterruptedException exc) { + Thread.currentThread().interrupt(); + } + } + + /** + * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are + * flushed. + * + * If concurrent requests are not enabled, returns {@code true} immediately. + * If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then + * returns {@code true}, + * If the specified waiting time elapses before all bulk requests complete, {@code false} is returned. + * + * @param timeout The maximum time to wait for the bulk requests to complete + * @param unit The time unit of the {@code timeout} argument + * @return {@code true} if all bulk requests completed and {@code false} if the waiting time elapsed before all the + * bulk requests completed + * @throws InterruptedException If the current thread is interrupted + */ + public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + if (closed) { + return true; + } + closed = true; + if (this.scheduledFuture != null) { + FutureUtils.cancel(this.scheduledFuture); + this.scheduler.shutdown(); + } + if (bulkRequest.numberOfActions() > 0) { + execute(); + } + return this.bulkRequestHandler.awaitClose(timeout, unit); + } + + /** + * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest} + * (for example, if no id is provided, one will be generated, or usage of the create flag). + * + * @param request request + * @return his bulk processor + */ + public BulkProcessor add(IndexRequest request) { + return add((ActionRequest) request); + } + + /** + * Adds an {@link DeleteRequest} to the list of actions to execute. + * + * @param request request + * @return his bulk processor + */ + public BulkProcessor add(DeleteRequest request) { + return add((ActionRequest) request); + } + + /** + * Adds either a delete or an index request. + * + * @param request request + * @return his bulk processor + */ + public BulkProcessor add(ActionRequest request) { + return add(request, null); + } + + /** + * Adds either a delete or an index request with a payload. + * + * @param request request + * @param payload payload + * @return his bulk processor + */ + public BulkProcessor add(ActionRequest request, @Nullable Object payload) { + internalAdd(request, payload); + return this; + } + + protected void ensureOpen() { + if (closed) { + throw new IllegalStateException("bulk process already closed"); + } + } + + private synchronized void internalAdd(ActionRequest request, @Nullable Object payload) { + ensureOpen(); + bulkRequest.add(request, payload); + executeIfNeeded(); + } + + public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) + throws Exception { + return add(data, defaultIndex, defaultType, null); + } + + public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, + @Nullable String defaultType, @Nullable Object payload) throws Exception { + bulkRequest.add(data, defaultIndex, defaultType, null, null, payload, true); + executeIfNeeded(); + return this; + } + + private void executeIfNeeded() { + ensureOpen(); + if (!isOverTheLimit()) { + return; + } + execute(); + } + + private void execute() { + final BulkRequest bulkRequest = this.bulkRequest; + final long executionId = executionIdGen.incrementAndGet(); + + this.bulkRequest = new BulkRequest(); + this.bulkRequestHandler.execute(bulkRequest, executionId); + } + + private boolean isOverTheLimit() { + return bulkActions != -1 && + bulkRequest.numberOfActions() >= bulkActions || + bulkSize != -1 && + bulkRequest.estimatedSizeInBytes() >= bulkSize; + } + + /** + * Flush pending delete or index requests. + */ + public synchronized void flush() { + ensureOpen(); + if (bulkRequest.numberOfActions() > 0) { + execute(); + } + } + + /** + * A listener for the execution. + */ + public interface Listener { + + /** + * Callback before the bulk is executed. + * + * @param executionId execution ID + * @param request request + */ + void beforeBulk(long executionId, BulkRequest request); + + /** + * Callback after a successful execution of bulk request. + * + * @param executionId execution ID + * @param request request + * @param response response + */ + void afterBulk(long executionId, BulkRequest request, BulkResponse response); + + /** + * Callback after a failed execution of bulk request. + * + * Note that in case an instance of InterruptedException is passed, which means that request + * processing has been + * cancelled externally, the thread's interruption status has been restored prior to calling this method. + * + * @param executionId execution ID + * @param request request + * @param failure failure + */ + void afterBulk(long executionId, BulkRequest request, Throwable failure); + } + + /** + * A builder used to create a build an instance of a bulk processor. + */ + public static class Builder { + + private final Client client; + private final Listener listener; + private String name; + private int concurrentRequests = 1; + private int bulkActions = 1000; + private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); + private TimeValue flushInterval = null; + + /** + * Creates a builder of bulk processor with the client to use and the listener that will be used + * to be notified on the completion of bulk requests. + * + * @param client the client + * @param listener the listener + */ + Builder(Client client, Listener listener) { + this.client = client; + this.listener = listener; + } + + /** + * Sets an optional name to identify this bulk processor. + * + * @param name name + * @return this builder + */ + public Builder setName(String name) { + this.name = name; + return this; + } + + /** + * Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single + * request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed + * while accumulating new bulk requests. Defaults to 1. + * + * @param concurrentRequests maximum number of concurrent requests + * @return this builder + */ + public Builder setConcurrentRequests(int concurrentRequests) { + this.concurrentRequests = concurrentRequests; + return this; + } + + /** + * Sets when to flush a new bulk request based on the number of actions currently added. Defaults to + * 1000. Can be set to -1 to disable it. + * + * @param bulkActions mbulk actions + * @return this builder + */ + public Builder setBulkActions(int bulkActions) { + this.bulkActions = bulkActions; + return this; + } + + /** + * Sets when to flush a new bulk request based on the size of actions currently added. Defaults to + * 5mb. Can be set to -1 to disable it. + * + * @param bulkSize bulk size + * @return this builder + */ + public Builder setBulkSize(ByteSizeValue bulkSize) { + this.bulkSize = bulkSize; + return this; + } + + /** + * Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set. + * Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)} + * can be set to -1 with the flush interval set allowing for complete async processing of bulk actions. + * + * @param flushInterval flush interval + * @return this builder + */ + public Builder setFlushInterval(TimeValue flushInterval) { + this.flushInterval = flushInterval; + return this; + } + + /** + * Builds a new bulk processor. + * + * @return a bulk processor + */ + public BulkProcessor build() { + return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); + } + } + + private class Flush implements Runnable { + + @Override + public void run() { + synchronized (BulkProcessor.this) { + if (closed) { + return; + } + if (bulkRequest.numberOfActions() == 0) { + return; + } + execute(); + } + } + } + + /** + * Abstracts the low-level details of bulk request handling. + */ + abstract class BulkRequestHandler { + + public abstract void execute(BulkRequest bulkRequest, long executionId); + + public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException; + + } + + private class SyncBulkRequestHandler extends BulkRequestHandler { + private final Client client; + private final BulkProcessor.Listener listener; + + SyncBulkRequestHandler(Client client, BulkProcessor.Listener listener) { + this.client = client; + this.listener = listener; + } + + public void execute(BulkRequest bulkRequest, long executionId) { + boolean afterCalled = false; + try { + listener.beforeBulk(executionId, bulkRequest); + BulkResponse bulkResponse = client.execute(BulkAction.INSTANCE, bulkRequest).actionGet(); + afterCalled = true; + listener.afterBulk(executionId, bulkRequest, bulkResponse); + } catch (Throwable t) { + if (!afterCalled) { + listener.afterBulk(executionId, bulkRequest, t); + } + } + } + + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + return true; + } + } + + private class AsyncBulkRequestHandler extends BulkRequestHandler { + private final Client client; + private final BulkProcessor.Listener listener; + private final Semaphore semaphore; + private final int concurrentRequests; + + private AsyncBulkRequestHandler(Client client, BulkProcessor.Listener listener, int concurrentRequests) { + this.client = client; + this.listener = listener; + this.concurrentRequests = concurrentRequests; + this.semaphore = new Semaphore(concurrentRequests); + } + + @Override + public void execute(final BulkRequest bulkRequest, final long executionId) { + boolean bulkRequestSetupSuccessful = false; + boolean acquired = false; + try { + listener.beforeBulk(executionId, bulkRequest); + semaphore.acquire(); + acquired = true; + client.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener() { + @Override + public void onResponse(BulkResponse response) { + try { + listener.afterBulk(executionId, bulkRequest, response); + } finally { + semaphore.release(); + } + } + + @Override + public void onFailure(Throwable e) { + try { + listener.afterBulk(executionId, bulkRequest, e); + } finally { + semaphore.release(); + } + } + }); + bulkRequestSetupSuccessful = true; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + listener.afterBulk(executionId, bulkRequest, e); + } catch (Throwable t) { + listener.afterBulk(executionId, bulkRequest, t); + } finally { + if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore + semaphore.release(); + } + } + } + + @Override + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { + semaphore.release(this.concurrentRequests); + return true; + } + return false; + } + } +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/ClientBuilder.java b/src/main/java/org/xbib/elasticsearch/extras/client/ClientBuilder.java new file mode 100644 index 0000000..4089249 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/ClientBuilder.java @@ -0,0 +1,105 @@ +package org.xbib.elasticsearch.extras.client; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.xbib.elasticsearch.extras.client.node.BulkNodeClient; +import org.xbib.elasticsearch.extras.client.transport.BulkTransportClient; +import org.xbib.elasticsearch.extras.client.transport.MockTransportClient; + +/** + * + */ +public final class ClientBuilder implements Parameters { + + private final Settings.Builder settingsBuilder; + + private BulkMetric metric; + + private BulkControl control; + + public ClientBuilder() { + settingsBuilder = Settings.builder(); + } + + public static ClientBuilder builder() { + return new ClientBuilder(); + } + + public ClientBuilder put(String key, String value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Integer value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Long value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Double value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, ByteSizeValue value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, TimeValue value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(Settings settings) { + settingsBuilder.put(settings); + return this; + } + + public ClientBuilder setMetric(BulkMetric metric) { + this.metric = metric; + return this; + } + + public ClientBuilder setControl(BulkControl control) { + this.control = control; + return this; + } + + public BulkNodeClient toBulkNodeClient(Client client) { + Settings settings = settingsBuilder.build(); + return new BulkNodeClient() + .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) + .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) + .maxVolumePerRequest(settings.getAsBytesSize(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) + .flushIngestInterval(settings.getAsTime(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) + .init(client, metric, control); + } + + public BulkTransportClient toBulkTransportClient() { + Settings settings = settingsBuilder.build(); + return new BulkTransportClient() + .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) + .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) + .maxVolumePerRequest(settings.getAsBytesSize(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) + .flushIngestInterval(settings.getAsTime(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) + .init(settings, metric, control); + } + + public MockTransportClient toMockTransportClient() { + Settings settings = settingsBuilder.build(); + return new MockTransportClient() + .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) + .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) + .maxVolumePerRequest(settings.getAsBytesSize(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) + .flushIngestInterval(settings.getAsTime(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) + .init(settings, metric, control); + } + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/ClientMethods.java b/src/main/java/org/xbib/elasticsearch/extras/client/ClientMethods.java new file mode 100644 index 0000000..c643924 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/ClientMethods.java @@ -0,0 +1,391 @@ +package org.xbib.elasticsearch.extras.client; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +/** + * Interface for providing convenient administrative methods for ingesting data into Elasticsearch. + */ +public interface ClientMethods extends Parameters { + + /** + * Initialize new ingest client, wrap an existing Elasticsearch client, and set up metrics. + * + * @param client the Elasticsearch client + * @param metric metric + * @param control control + * @return this ingest + * @throws IOException if client could not get created + */ + ClientMethods init(ElasticsearchClient client, BulkMetric metric, BulkControl control) throws IOException; + + /** + * Initialize, create new ingest client, and set up metrics. + * + * @param settings settings + * @param metric metric + * @param control control + * @return this ingest + * @throws IOException if client could not get created + */ + ClientMethods init(Settings settings, BulkMetric metric, BulkControl control) throws IOException; + + /** + * Return Elasticsearch client. + * + * @return Elasticsearch client + */ + ElasticsearchClient client(); + + /** + * Index document. + * + * @param index the index + * @param type the type + * @param id the id + * @param source the source + * @return this + */ + ClientMethods index(String index, String type, String id, String source); + + /** + * Delete document. + * + * @param index the index + * @param type the type + * @param id the id + * @return this ingest + */ + ClientMethods delete(String index, String type, String id); + + /** + * Update document. Use with precaution! Does not work in all cases. + * + * @param index the index + * @param type the type + * @param id the id + * @param source the source + * @return this + */ + ClientMethods update(String index, String type, String id, String source); + + /** + * Set the maximum number of actions per request. + * + * @param maxActionsPerRequest maximum number of actions per request + * @return this ingest + */ + ClientMethods maxActionsPerRequest(int maxActionsPerRequest); + + /** + * Set the maximum concurent requests. + * + * @param maxConcurentRequests maximum number of concurrent ingest requests + * @return this Ingest + */ + ClientMethods maxConcurrentRequests(int maxConcurentRequests); + + /** + * Set the maximum volume for request before flush. + * + * @param maxVolume maximum volume + * @return this ingest + */ + ClientMethods maxVolumePerRequest(ByteSizeValue maxVolume); + + /** + * Set the flush interval for automatic flushing outstanding ingest requests. + * + * @param flushInterval the flush interval, default is 30 seconds + * @return this ingest + */ + ClientMethods flushIngestInterval(TimeValue flushInterval); + + /** + * Set mapping. + * + * @param type mapping type + * @param in mapping definition as input stream + * @throws IOException if mapping could not be added + */ + void mapping(String type, InputStream in) throws IOException; + + /** + * Set mapping. + * + * @param type mapping type + * @param mapping mapping definition as input stream + * @throws IOException if mapping could not be added + */ + void mapping(String type, String mapping) throws IOException; + + /** + * Put mapping. + * + * @param index index + */ + void putMapping(String index); + + /** + * Create a new index. + * + * @param index index + * @return this ingest + */ + ClientMethods newIndex(String index); + + /** + * Create a new index. + * + * @param index index + * @param type type + * @param settings settings + * @param mappings mappings + * @return this ingest + * @throws IOException if new index creation fails + */ + ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException; + + /** + * Create a new index. + * + * @param index index + * @param settings settings + * @param mappings mappings + * @return this ingest + */ + ClientMethods newIndex(String index, Settings settings, Map mappings); + + /** + * Create new mapping. + * + * @param index index + * @param type index type + * @param mapping mapping + * @return this ingest + */ + ClientMethods newMapping(String index, String type, Map mapping); + + /** + * Delete index. + * + * @param index index + * @return this ingest + */ + ClientMethods deleteIndex(String index); + + /** + * Start bulk mode. + * + * @param index index + * @param startRefreshIntervalSeconds refresh interval before bulk + * @param stopRefreshIntervalSeconds refresh interval after bulk + * @return this ingest + * @throws IOException if bulk could not be started + */ + ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) throws IOException; + + /** + * Stops bulk mode. + * + * @param index index + * @return this Ingest + * @throws IOException if bulk could not be stopped + */ + ClientMethods stopBulk(String index) throws IOException; + + /** + * Bulked index request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * + * @param indexRequest the index request to add + * @return this ingest + */ + ClientMethods bulkIndex(IndexRequest indexRequest); + + /** + * Bulked delete request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * + * @param deleteRequest the delete request to add + * @return this ingest + */ + ClientMethods bulkDelete(DeleteRequest deleteRequest); + + /** + * Bulked update request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * Note that updates only work correctly when all operations between nodes are synchronized! + * + * @param updateRequest the update request to add + * @return this ingest + */ + ClientMethods bulkUpdate(UpdateRequest updateRequest); + + /** + * Flush ingest, move all pending documents to the cluster. + * + * @return this + */ + ClientMethods flushIngest(); + + /** + * Wait for all outstanding responses. + * + * @param maxWait maximum wait time + * @return this ingest + * @throws InterruptedException if wait is interrupted + * @throws ExecutionException if execution failed + */ + ClientMethods waitForResponses(TimeValue maxWait) throws InterruptedException, ExecutionException; + + /** + * Refresh the index. + * + * @param index index + */ + void refreshIndex(String index); + + /** + * Flush the index. + * + * @param index index + */ + void flushIndex(String index); + + /** + * Update replica level. + * + * @param index index + * @param level the replica level + * @return number of shards after updating replica level + * @throws IOException if replica could not be updated + */ + int updateReplicaLevel(String index, int level) throws IOException; + + /** + * Wait for cluster being healthy. + * + * @param healthColor cluster health color to wait for + * @param timeValue time value + * @throws IOException if wait failed + */ + void waitForCluster(String healthColor, TimeValue timeValue) throws IOException; + + /** + * Get current health color. + * + * @return the cluster health color + */ + String healthColor(); + + /** + * Wait for index recovery (after replica change). + * + * @param index index + * @return number of shards found + * @throws IOException if wait failed + */ + int waitForRecovery(String index) throws IOException; + + /** + * Resolve alias. + * + * @param alias the alias + * @return one index name behind the alias or the alias if there is no index + */ + String resolveAlias(String alias); + + /** + * Resolve alias to all connected indices, sort index names with most recent timestamp on top, return this index + * name. + * + * @param alias the alias + * @return the most recent index name pointing to the alias + */ + String resolveMostRecentIndex(String alias); + + /** + * Get all alias filters. + * + * @param index index + * @return map of alias filters + */ + Map getAliasFilters(String index); + + /** + * Switch aliases from one index to another. + * + * @param index the index name + * @param concreteIndex the index name with timestamp + * @param extraAliases a list of names that should be set as index aliases + */ + void switchAliases(String index, String concreteIndex, List extraAliases); + + /** + * Switch aliases from one index to another. + * + * @param index the index name + * @param concreteIndex the index name with timestamp + * @param extraAliases a list of names that should be set as index aliases + * @param adder an adder method to create alias term queries + */ + void switchAliases(String index, String concreteIndex, List extraAliases, IndexAliasAdder adder); + + /** + * Retention policy for an index. All indices before timestampdiff should be deleted, + * but mintokeep indices must be kept. + * + * @param index index name + * @param concreteIndex index name with timestamp + * @param timestampdiff timestamp delta (for index timestamps) + * @param mintokeep minimum number of indices to keep + */ + void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep); + + /** + * Log the timestamp of the most recently indexed document in the index. + * + * @param index the index name + * @return millis UTC millis of the most recent document + * @throws IOException if most rcent document can not be found + */ + Long mostRecentDocument(String index) throws IOException; + + /** + * Get metric. + * + * @return metric + */ + BulkMetric getMetric(); + + /** + * Returns true is a throwable exists. + * + * @return true if a Throwable exists + */ + boolean hasThrowable(); + + /** + * Return last throwable if exists. + * + * @return last throwable + */ + Throwable getThrowable(); + + /** + * Shutdown the ingesting. + */ + void shutdown(); +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/IndexAliasAdder.java b/src/main/java/org/xbib/elasticsearch/extras/client/IndexAliasAdder.java new file mode 100644 index 0000000..8ce2df5 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/IndexAliasAdder.java @@ -0,0 +1,11 @@ +package org.xbib.elasticsearch.extras.client; + +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; + +/** + * + */ +public interface IndexAliasAdder { + + void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias); +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/NetworkUtils.java b/src/main/java/org/xbib/elasticsearch/extras/client/NetworkUtils.java new file mode 100644 index 0000000..4dd69da --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/NetworkUtils.java @@ -0,0 +1,264 @@ +package org.xbib.elasticsearch.extras.client; + +import java.io.IOException; +import java.net.Inet4Address; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.net.SocketException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Enumeration; +import java.util.List; +import java.util.Locale; + +/** + * + */ +public class NetworkUtils { + + private static final String IPv4_SETTING = "java.net.preferIPv4Stack"; + + private static final String IPv6_SETTING = "java.net.preferIPv6Addresses"; + + private static final InetAddress localAddress; + + static { + InetAddress address; + try { + address = InetAddress.getLocalHost(); + } catch (Throwable e) { + address = InetAddress.getLoopbackAddress(); + } + localAddress = address; + } + + private NetworkUtils() { + } + + public static InetAddress getLocalAddress() { + return localAddress; + } + + public static InetAddress getFirstNonLoopbackAddress(ProtocolVersion ipversion) throws SocketException { + InetAddress address; + for (NetworkInterface networkInterface : getNetworkInterfaces()) { + try { + if (!networkInterface.isUp() || networkInterface.isLoopback()) { + continue; + } + } catch (Exception e) { + continue; + } + address = getFirstNonLoopbackAddress(networkInterface, ipversion); + if (address != null) { + return address; + } + } + return null; + } + + public static InetAddress getFirstNonLoopbackAddress(NetworkInterface networkInterface, ProtocolVersion ipVersion) + throws SocketException { + if (networkInterface == null) { + throw new IllegalArgumentException("network interface is null"); + } + for (Enumeration addresses = networkInterface.getInetAddresses(); addresses.hasMoreElements(); ) { + InetAddress address = addresses.nextElement(); + if (!address.isLoopbackAddress()) { + if ((address instanceof Inet4Address && ipVersion == ProtocolVersion.IPv4) || + (address instanceof Inet6Address && ipVersion == ProtocolVersion.IPv6)) { + return address; + } + } + } + return null; + } + + public static InetAddress getFirstAddress(NetworkInterface networkInterface, ProtocolVersion ipVersion) + throws SocketException { + if (networkInterface == null) { + throw new IllegalArgumentException("network interface is null"); + } + for (Enumeration addresses = networkInterface.getInetAddresses(); addresses.hasMoreElements(); ) { + InetAddress address = addresses.nextElement(); + if ((address instanceof Inet4Address && ipVersion == ProtocolVersion.IPv4) || + (address instanceof Inet6Address && ipVersion == ProtocolVersion.IPv6)) { + return address; + } + } + return null; + } + + public static List getAllAvailableInterfaces() throws SocketException { + List allInterfaces = new ArrayList<>(); + for (Enumeration interfaces = NetworkInterface.getNetworkInterfaces(); + interfaces.hasMoreElements(); ) { + NetworkInterface networkInterface = interfaces.nextElement(); + allInterfaces.add(networkInterface); + Enumeration subInterfaces = networkInterface.getSubInterfaces(); + if (subInterfaces.hasMoreElements()) { + while (subInterfaces.hasMoreElements()) { + allInterfaces.add(subInterfaces.nextElement()); + } + } + } + sortInterfaces(allInterfaces); + return allInterfaces; + } + + public static List getAllAvailableAddresses() throws SocketException { + List allAddresses = new ArrayList<>(); + for (NetworkInterface networkInterface : getNetworkInterfaces()) { + Enumeration addrs = networkInterface.getInetAddresses(); + while (addrs.hasMoreElements()) { + allAddresses.add(addrs.nextElement()); + } + } + sortAddresses(allAddresses); + return allAddresses; + } + + public static ProtocolVersion getProtocolVersion() throws SocketException { + switch (findAvailableProtocols()) { + case IPv4: + return ProtocolVersion.IPv4; + case IPv6: + return ProtocolVersion.IPv6; + case IPv46: + if (Boolean.getBoolean(System.getProperty(IPv4_SETTING))) { + return ProtocolVersion.IPv4; + } + if (Boolean.getBoolean(System.getProperty(IPv6_SETTING))) { + return ProtocolVersion.IPv6; + } + return ProtocolVersion.IPv6; + } + return ProtocolVersion.NONE; + } + + public static ProtocolVersion findAvailableProtocols() throws SocketException { + boolean hasIPv4 = false; + boolean hasIPv6 = false; + for (InetAddress addr : getAllAvailableAddresses()) { + if (addr instanceof Inet4Address) { + hasIPv4 = true; + } + if (addr instanceof Inet6Address) { + hasIPv6 = true; + } + } + if (hasIPv4 && hasIPv6) { + return ProtocolVersion.IPv46; + } + if (hasIPv4) { + return ProtocolVersion.IPv4; + } + if (hasIPv6) { + return ProtocolVersion.IPv6; + } + return ProtocolVersion.NONE; + } + + public static InetAddress resolveInetAddress(String host, String defaultValue) throws IOException { + if (host == null) { + host = defaultValue; + } + String origHost = host; + int pos = host.indexOf(':'); + if (pos > 0) { + host = host.substring(0, pos - 1); + } + if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) { + host = host.substring(1, host.length() - 1); + if (host.equals("local")) { + return getLocalAddress(); + } else if (host.startsWith("non_loopback")) { + if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) { + return getFirstNonLoopbackAddress(ProtocolVersion.IPv4); + } else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) { + return getFirstNonLoopbackAddress(ProtocolVersion.IPv6); + } else { + return getFirstNonLoopbackAddress(getProtocolVersion()); + } + } else { + ProtocolVersion protocolVersion = getProtocolVersion(); + if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) { + protocolVersion = ProtocolVersion.IPv4; + host = host.substring(0, host.length() - 5); + } else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) { + protocolVersion = ProtocolVersion.IPv6; + host = host.substring(0, host.length() - 5); + } + for (NetworkInterface ni : getAllAvailableInterfaces()) { + if (!ni.isUp()) { + continue; + } + if (host.equals(ni.getName()) || host.equals(ni.getDisplayName())) { + if (ni.isLoopback()) { + return getFirstAddress(ni, protocolVersion); + } else { + return getFirstNonLoopbackAddress(ni, protocolVersion); + } + } + } + } + throw new IOException("failed to find network interface for [" + origHost + "]"); + } + return InetAddress.getByName(host); + } + + private static List getNetworkInterfaces() throws SocketException { + List networkInterfaces = new ArrayList<>(); + Enumeration interfaces = NetworkInterface.getNetworkInterfaces(); + while (interfaces.hasMoreElements()) { + NetworkInterface networkInterface = interfaces.nextElement(); + networkInterfaces.add(networkInterface); + Enumeration subInterfaces = networkInterface.getSubInterfaces(); + if (subInterfaces.hasMoreElements()) { + while (subInterfaces.hasMoreElements()) { + networkInterfaces.add(subInterfaces.nextElement()); + } + } + } + sortInterfaces(networkInterfaces); + return networkInterfaces; + } + + private static void sortInterfaces(List interfaces) { + Collections.sort(interfaces, new Comparator() { + @Override + public int compare(NetworkInterface o1, NetworkInterface o2) { + return Integer.compare(o1.getIndex(), o2.getIndex()); + } + }); + } + + private static void sortAddresses(List addressList) { + Collections.sort(addressList, new Comparator() { + @Override + public int compare(InetAddress o1, InetAddress o2) { + return compareBytes(o1.getAddress(), o2.getAddress()); + } + }); + } + + private static int compareBytes(byte[] left, byte[] right) { + for (int i = 0, j = 0; i < left.length && j < right.length; i++, j++) { + int a = (left[i] & 0xff); + int b = (right[j] & 0xff); + if (a != b) { + return a - b; + } + } + return left.length - right.length; + } + + /** + * + */ + public enum ProtocolVersion { + IPv4, IPv6, IPv46, NONE + } +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/Parameters.java b/src/main/java/org/xbib/elasticsearch/extras/client/Parameters.java new file mode 100644 index 0000000..41cc6d2 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/Parameters.java @@ -0,0 +1,28 @@ +package org.xbib.elasticsearch.extras.client; + +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; + +/** + * + */ +public interface Parameters { + + int DEFAULT_MAX_ACTIONS_PER_REQUEST = 1000; + + int DEFAULT_MAX_CONCURRENT_REQUESTS = Runtime.getRuntime().availableProcessors() * 4; + + ByteSizeValue DEFAULT_MAX_VOLUME_PER_REQUEST = new ByteSizeValue(10, ByteSizeUnit.MB); + + TimeValue DEFAULT_FLUSH_INTERVAL = TimeValue.timeValueSeconds(30); + + String MAX_ACTIONS_PER_REQUEST = "max_actions_per_request"; + + String MAX_CONCURRENT_REQUESTS = "max_concurrent_requests"; + + String MAX_VOLUME_PER_REQUEST = "max_volume_per_request"; + + String FLUSH_INTERVAL = "flush_interval"; + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkControl.java b/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkControl.java new file mode 100644 index 0000000..b9a92d6 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkControl.java @@ -0,0 +1,54 @@ +package org.xbib.elasticsearch.extras.client; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + */ +public class SimpleBulkControl implements BulkControl { + + private final Set indexNames = new HashSet<>(); + + private final Map startBulkRefreshIntervals = new HashMap<>(); + + private final Map stopBulkRefreshIntervals = new HashMap<>(); + + @Override + public void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval) { + synchronized (indexNames) { + indexNames.add(indexName); + startBulkRefreshIntervals.put(indexName, startRefreshInterval); + stopBulkRefreshIntervals.put(indexName, stopRefreshInterval); + } + } + + @Override + public boolean isBulk(String indexName) { + return indexNames.contains(indexName); + } + + @Override + public void finishBulk(String indexName) { + synchronized (indexNames) { + indexNames.remove(indexName); + } + } + + @Override + public Set indices() { + return indexNames; + } + + @Override + public Map getStartBulkRefreshIntervals() { + return startBulkRefreshIntervals; + } + + @Override + public Map getStopBulkRefreshIntervals() { + return stopBulkRefreshIntervals; + } + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkMetric.java b/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkMetric.java new file mode 100644 index 0000000..bfbde5a --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkMetric.java @@ -0,0 +1,82 @@ +package org.xbib.elasticsearch.extras.client; + +import org.xbib.metrics.Count; +import org.xbib.metrics.CountMetric; +import org.xbib.metrics.Meter; +import org.xbib.metrics.Metered; +/** + * + */ +public class SimpleBulkMetric implements BulkMetric { + + private final Meter totalIngest = new Meter(); + + private final Count totalIngestSizeInBytes = new CountMetric(); + + private final Count currentIngest = new CountMetric(); + + private final Count currentIngestNumDocs = new CountMetric(); + + private final Count submitted = new CountMetric(); + + private final Count succeeded = new CountMetric(); + + private final Count failed = new CountMetric(); + + private Long started; + + private Long stopped; + + @Override + public Metered getTotalIngest() { + return totalIngest; + } + + @Override + public Count getTotalIngestSizeInBytes() { + return totalIngestSizeInBytes; + } + + @Override + public Count getCurrentIngest() { + return currentIngest; + } + + @Override + public Count getCurrentIngestNumDocs() { + return currentIngestNumDocs; + } + + @Override + public Count getSubmitted() { + return submitted; + } + + @Override + public Count getSucceeded() { + return succeeded; + } + + @Override + public Count getFailed() { + return failed; + } + + @Override + public void start() { + this.started = System.nanoTime(); + this.totalIngest.spawn(5L); + } + + @Override + public void stop() { + this.stopped = System.nanoTime(); + totalIngest.stop(); + } + + @Override + public long elapsed() { + return (stopped != null ? stopped : System.nanoTime()) - started; + } + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClient.java new file mode 100644 index 0000000..5a0df14 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClient.java @@ -0,0 +1,502 @@ +package org.xbib.elasticsearch.extras.client.node; + +import com.google.common.collect.ImmutableSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; +import org.xbib.elasticsearch.extras.client.AbstractClient; +import org.xbib.elasticsearch.extras.client.BulkControl; +import org.xbib.elasticsearch.extras.client.BulkMetric; +import org.xbib.elasticsearch.extras.client.ClientMethods; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * + */ +public class BulkNodeClient extends AbstractClient implements ClientMethods { + + private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeClient.class.getName()); + + private int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST; + + private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS; + + private ByteSizeValue maxVolume = DEFAULT_MAX_VOLUME_PER_REQUEST; + + private TimeValue flushInterval = DEFAULT_FLUSH_INTERVAL; + + private ElasticsearchClient client; + + private BulkProcessor bulkProcessor; + + private BulkMetric metric; + + private BulkControl control; + + private Throwable throwable; + + private boolean closed; + + public BulkNodeClient() { + } + + @Override + public BulkNodeClient maxActionsPerRequest(int maxActionsPerRequest) { + this.maxActionsPerRequest = maxActionsPerRequest; + return this; + } + + @Override + public BulkNodeClient maxConcurrentRequests(int maxConcurrentRequests) { + this.maxConcurrentRequests = maxConcurrentRequests; + return this; + } + + @Override + public BulkNodeClient maxVolumePerRequest(ByteSizeValue maxVolume) { + this.maxVolume = maxVolume; + return this; + } + + @Override + public BulkNodeClient flushIngestInterval(TimeValue flushInterval) { + this.flushInterval = flushInterval; + return this; + } + + @Override + public BulkNodeClient init(ElasticsearchClient client, + final BulkMetric metric, final BulkControl control) { + this.client = client; + this.metric = metric; + this.control = control; + if (metric != null) { + metric.start(); + } + BulkProcessor.Listener listener = new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + long l = -1; + if (metric != null) { + metric.getCurrentIngest().inc(); + l = metric.getCurrentIngest().getCount(); + int n = request.numberOfActions(); + metric.getSubmitted().inc(n); + metric.getCurrentIngestNumDocs().inc(n); + metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes()); + } + logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]", + executionId, + request.numberOfActions(), + request.estimatedSizeInBytes(), + l); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + long l = -1; + if (metric != null) { + metric.getCurrentIngest().dec(); + l = metric.getCurrentIngest().getCount(); + metric.getSucceeded().inc(response.getItems().length); + } + int n = 0; + for (BulkItemResponse itemResponse : response.getItems()) { + if (metric != null) { + metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId()); + } + if (itemResponse.isFailed()) { + n++; + if (metric != null) { + metric.getSucceeded().dec(1); + metric.getFailed().inc(1); + } + } + } + if (metric != null) { + logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests", + executionId, + metric.getSucceeded().getCount(), + metric.getFailed().getCount(), + response.getTook().millis(), + l); + } + if (n > 0) { + logger.error("bulk [{}] failed with {} failed items, failure message = {}", + executionId, n, response.buildFailureMessage()); + } else { + if (metric != null) { + metric.getCurrentIngestNumDocs().dec(response.getItems().length); + } + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + if (metric != null) { + metric.getCurrentIngest().dec(); + } + throwable = failure; + closed = true; + logger.error("after bulk [" + executionId + "] error", failure); + } + }; + BulkProcessor.Builder builder = BulkProcessor.builder((Client) client, listener) + .setBulkActions(maxActionsPerRequest) + .setConcurrentRequests(maxConcurrentRequests) + .setFlushInterval(flushInterval); + if (maxVolume != null) { + builder.setBulkSize(maxVolume); + } + this.bulkProcessor = builder.build(); + this.closed = false; + return this; + } + + @Override + public BulkNodeClient init(Settings settings, BulkMetric metric, BulkControl control) throws IOException { + createClient(settings); + this.metric = metric; + this.control = control; + return this; + } + + @Override + public ElasticsearchClient client() { + return client; + } + + @Override + protected void createClient(Settings settings) throws IOException { + if (client != null) { + logger.warn("client is open, closing..."); + client.threadPool().shutdown(); + logger.warn("client is closed"); + client = null; + } + if (settings != null) { + String version = System.getProperty("os.name") + + " " + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.vendor") + + " " + System.getProperty("java.runtime.version") + + " " + System.getProperty("java.vm.version"); + Settings effectiveSettings = Settings.builder().put(settings) + .put("node.client", true) + .put("node.master", false) + .put("node.data", false).build(); + logger.info("creating node client on {} with effective settings {}", + version, effectiveSettings.getAsMap()); + Collection> plugins = Collections.emptyList(); + Node node = new BulkNode(new Environment(effectiveSettings), plugins); + node.start(); + this.client = node.client(); + } + } + + @Override + public BulkMetric getMetric() { + return metric; + } + + @Override + public BulkNodeClient index(String index, String type, String id, String source) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(index, type, id); + } + bulkProcessor.add(new IndexRequest(index).type(type).id(id).create(false).source(source)); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of index request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkNodeClient bulkIndex(IndexRequest indexRequest) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id()); + } + bulkProcessor.add(indexRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of index request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkNodeClient delete(String index, String type, String id) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(index, type, id); + } + bulkProcessor.add(new DeleteRequest(index).type(type).id(id)); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of delete failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkNodeClient bulkDelete(DeleteRequest deleteRequest) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); + } + bulkProcessor.add(deleteRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of delete failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkNodeClient update(String index, String type, String id, String source) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(index, type, id); + } + bulkProcessor.add(new UpdateRequest().index(index).type(type).id(id).upsert(source)); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of update request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkNodeClient bulkUpdate(UpdateRequest updateRequest) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); + } + bulkProcessor.add(updateRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of update request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkNodeClient flushIngest() { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + logger.debug("flushing bulk processor"); + bulkProcessor.flush(); + return this; + } + + @Override + public BulkNodeClient waitForResponses(TimeValue maxWaitTime) throws InterruptedException, ExecutionException { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + while (!bulkProcessor.awaitClose(maxWaitTime.getMillis(), TimeUnit.MILLISECONDS)) { + logger.warn("still waiting for responses"); + } + return this; + } + + @Override + public BulkNodeClient startBulk(String index, long startRefreshIntervalMillis, long stopRefreshItervalMillis) + throws IOException { + if (control == null) { + return this; + } + if (!control.isBulk(index)) { + control.startBulk(index, startRefreshIntervalMillis, stopRefreshItervalMillis); + updateIndexSetting(index, "refresh_interval", startRefreshIntervalMillis + "ms"); + } + return this; + } + + @Override + public BulkNodeClient stopBulk(String index) throws IOException { + if (control == null) { + return this; + } + if (control.isBulk(index)) { + updateIndexSetting(index, "refresh_interval", control.getStopBulkRefreshIntervals().get(index) + "ms"); + control.finishBulk(index); + } + return this; + } + + @Override + public synchronized void shutdown() { + try { + if (bulkProcessor != null) { + logger.debug("closing bulk processor..."); + bulkProcessor.close(); + } + if (control != null && control.indices() != null && !control.indices().isEmpty()) { + logger.debug("stopping bulk mode for indices {}...", control.indices()); + for (String index : ImmutableSet.copyOf(control.indices())) { + stopBulk(index); + } + metric.stop(); + } + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + } + + @Override + public BulkNodeClient newIndex(String index) { + return newIndex(index, null, null); + } + + @Override + public BulkNodeClient newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException { + resetSettings(); + setting(settings); + mapping(type, mappings); + return newIndex(index, settings(), mappings()); + } + + @Override + public BulkNodeClient newIndex(String index, Settings settings, Map mappings) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + if (client == null) { + logger.warn("no client for create index"); + return this; + } + if (index == null) { + logger.warn("no index name given to create index"); + return this; + } + CreateIndexRequestBuilder createIndexRequestBuilder = + new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index); + if (settings != null) { + logger.info("settings = {}", settings.getAsStructuredMap()); + createIndexRequestBuilder.setSettings(settings); + } + if (mappings != null) { + for (String type : mappings.keySet()) { + logger.info("found mapping for {}", type); + createIndexRequestBuilder.addMapping(type, mappings.get(type)); + } + } + createIndexRequestBuilder.execute().actionGet(); + logger.info("index {} created", index); + return this; + } + + @Override + public BulkNodeClient newMapping(String index, String type, Map mapping) { + PutMappingRequestBuilder putMappingRequestBuilder = + new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE) + .setIndices(index) + .setType(type) + .setSource(mapping); + putMappingRequestBuilder.execute().actionGet(); + logger.info("mapping created for index {} and type {}", index, type); + return this; + } + + @Override + public BulkNodeClient deleteIndex(String index) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + if (client == null) { + logger.warn("no client"); + return this; + } + if (index == null) { + logger.warn("no index name given to delete index"); + return this; + } + DeleteIndexRequestBuilder deleteIndexRequestBuilder = + new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index); + deleteIndexRequestBuilder.execute().actionGet(); + return this; + } + + @Override + public boolean hasThrowable() { + return throwable != null; + } + + @Override + public Throwable getThrowable() { + return throwable; + } + + public Settings getSettings() { + return settings(); + } + + public Settings.Builder getSettingsBuilder() { + return settingsBuilder(); + } + + private class BulkNode extends Node { + + BulkNode(Environment env, Collection> classpathPlugins) { + super(env, Version.CURRENT, classpathPlugins); + } + } + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/node/package-info.java b/src/main/java/org/xbib/elasticsearch/extras/client/node/package-info.java new file mode 100644 index 0000000..c5c0895 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/node/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for Elasticsearch node client extras. + */ +package org.xbib.elasticsearch.extras.client.node; diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/package-info.java b/src/main/java/org/xbib/elasticsearch/extras/client/package-info.java new file mode 100644 index 0000000..c231c60 --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for Elasticsearch client extras. + */ +package org.xbib.elasticsearch.extras.client; diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClient.java new file mode 100644 index 0000000..ac2a00e --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClient.java @@ -0,0 +1,581 @@ +package org.xbib.elasticsearch.extras.client.transport; + +import com.google.common.collect.ImmutableSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.xbib.elasticsearch.extras.client.AbstractClient; +import org.xbib.elasticsearch.extras.client.BulkProcessor; +import org.xbib.elasticsearch.extras.client.BulkMetric; +import org.xbib.elasticsearch.extras.client.BulkControl; +import org.xbib.elasticsearch.extras.client.ClientMethods; +import org.xbib.elasticsearch.extras.client.NetworkUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * Transport client with addtitional methods using the BulkProcessor. + */ +public class BulkTransportClient extends AbstractClient implements ClientMethods { + + private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportClient.class.getName()); + + private int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST; + + private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS; + + private ByteSizeValue maxVolumePerRequest = DEFAULT_MAX_VOLUME_PER_REQUEST; + + private TimeValue flushInterval = DEFAULT_FLUSH_INTERVAL; + + private BulkProcessor bulkProcessor; + + private Throwable throwable; + + private boolean closed; + + private TransportClient client; + + private BulkMetric metric; + + private BulkControl control; + + private boolean ignoreBulkErrors; + + private boolean isShutdown; + + public BulkTransportClient() { + } + + @Override + public BulkTransportClient init(ElasticsearchClient client, BulkMetric metric, BulkControl control) throws IOException { + return init(findSettings(), metric, control); + } + + @Override + public BulkTransportClient init(Settings settings, final BulkMetric metric, final BulkControl control) { + createClient(settings); + this.metric = metric; + this.control = control; + if (metric != null) { + metric.start(); + } + resetSettings(); + BulkProcessor.Listener listener = new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + long l = -1L; + if (metric != null) { + metric.getCurrentIngest().inc(); + l = metric.getCurrentIngest().getCount(); + int n = request.numberOfActions(); + metric.getSubmitted().inc(n); + metric.getCurrentIngestNumDocs().inc(n); + metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes()); + } + logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]", + executionId, + request.numberOfActions(), + request.estimatedSizeInBytes(), + l); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + long l = -1L; + if (metric != null) { + metric.getCurrentIngest().dec(); + l = metric.getCurrentIngest().getCount(); + metric.getSucceeded().inc(response.getItems().length); + } + int n = 0; + for (BulkItemResponse itemResponse : response.getItems()) { + if (metric != null) { + metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId()); + if (itemResponse.isFailed()) { + n++; + metric.getSucceeded().dec(1); + metric.getFailed().inc(1); + } + } + } + if (metric != null) { + logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] [concurrent requests={}]", + executionId, + metric.getSucceeded().getCount(), + metric.getFailed().getCount(), + response.getTook().millis(), + l); + } + if (n > 0) { + logger.error("bulk [{}] failed with {} failed items, failure message = {}", + executionId, n, response.buildFailureMessage()); + } else { + if (metric != null) { + metric.getCurrentIngestNumDocs().dec(response.getItems().length); + } + } + } + + @Override + public void afterBulk(long executionId, BulkRequest requst, Throwable failure) { + if (metric != null) { + metric.getCurrentIngest().dec(); + } + throwable = failure; + if (!ignoreBulkErrors) { + closed = true; + } + logger.error("bulk [" + executionId + "] error", failure); + } + }; + BulkProcessor.Builder builder = BulkProcessor.builder(client, listener) + .setBulkActions(maxActionsPerRequest) + .setConcurrentRequests(maxConcurrentRequests) + .setFlushInterval(flushInterval); + if (maxVolumePerRequest != null) { + builder.setBulkSize(maxVolumePerRequest); + } + this.bulkProcessor = builder.build(); + try { + Collection addrs = findAddresses(settings); + if (!connect(addrs, settings.getAsBoolean("autodiscover", false))) { + throw new NoNodeAvailableException("no cluster nodes available, check settings " + + settings.getAsMap()); + } + } catch (IOException e) { + logger.error(e.getMessage(), e); + } + this.closed = false; + return this; + } + + @Override + public ClientMethods newMapping(String index, String type, Map mapping) { + new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE) + .setIndices(index) + .setType(type) + .setSource(mapping) + .execute().actionGet(); + logger.info("mapping created for index {} and type {}", index, type); + return this; + } + + @Override + protected void createClient(Settings settings) { + if (client != null) { + logger.warn("client is open, closing..."); + client.close(); + client.threadPool().shutdown(); + logger.warn("client is closed"); + client = null; + } + if (settings != null) { + String version = System.getProperty("os.name") + + " " + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.vendor") + + " " + System.getProperty("java.runtime.version") + + " " + System.getProperty("java.vm.version"); + logger.info("creating transport client on {} with effective settings {}", + version, settings.getAsMap()); + this.client = TransportClient.builder() + .settings(settings) + .build(); + this.ignoreBulkErrors = settings.getAsBoolean("ignoreBulkErrors", true); + } + } + + public boolean isShutdown() { + return isShutdown; + } + + @Override + public BulkTransportClient maxActionsPerRequest(int maxActionsPerRequest) { + this.maxActionsPerRequest = maxActionsPerRequest; + return this; + } + + @Override + public BulkTransportClient maxConcurrentRequests(int maxConcurrentRequests) { + this.maxConcurrentRequests = maxConcurrentRequests; + return this; + } + + @Override + public BulkTransportClient maxVolumePerRequest(ByteSizeValue maxVolumePerRequest) { + this.maxVolumePerRequest = maxVolumePerRequest; + return this; + } + + @Override + public BulkTransportClient flushIngestInterval(TimeValue flushInterval) { + this.flushInterval = flushInterval; + return this; + } + + @Override + public ElasticsearchClient client() { + return client; + } + + @Override + public BulkMetric getMetric() { + return metric; + } + + @Override + public ClientMethods newIndex(String index) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + return newIndex(index, null, null); + } + + @Override + public ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException { + resetSettings(); + setting(settings); + mapping(type, mappings); + return newIndex(index, settings(), mappings()); + } + + @Override + public ClientMethods newIndex(String index, Settings settings, Map mappings) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + if (client == null) { + logger.warn("no client for create index"); + return this; + } + if (index == null) { + logger.warn("no index name given to create index"); + return this; + } + CreateIndexRequestBuilder createIndexRequestBuilder = + new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index); + if (settings != null) { + logger.info("settings = {}", settings.getAsStructuredMap()); + createIndexRequestBuilder.setSettings(settings); + } + if (mappings != null) { + for (String type : mappings.keySet()) { + logger.info("found mapping for {}", type); + createIndexRequestBuilder.addMapping(type, mappings.get(type)); + } + } + createIndexRequestBuilder.execute().actionGet(); + logger.info("index {} created", index); + return this; + } + + @Override + public ClientMethods deleteIndex(String index) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + if (client == null) { + logger.warn("no client for delete index"); + return this; + } + if (index == null) { + logger.warn("no index name given to delete index"); + return this; + } + new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index).execute().actionGet(); + return this; + } + + @Override + public ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) + throws IOException { + if (control == null) { + return this; + } + if (!control.isBulk(index)) { + control.startBulk(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds); + updateIndexSetting(index, "refresh_interval", startRefreshIntervalSeconds + "s"); + } + return this; + } + + @Override + public ClientMethods stopBulk(String index) throws IOException { + if (control == null) { + return this; + } + if (control.isBulk(index)) { + updateIndexSetting(index, "refresh_interval", control.getStopBulkRefreshIntervals().get(index) + "s"); + control.finishBulk(index); + } + return this; + } + + @Override + public BulkTransportClient index(String index, String type, String id, String source) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + metric.getCurrentIngest().inc(index, type, id); + bulkProcessor.add(new IndexRequest().index(index).type(type).id(id).create(false).source(source)); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of index request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkTransportClient bulkIndex(IndexRequest indexRequest) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id()); + bulkProcessor.add(indexRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of index request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkTransportClient delete(String index, String type, String id) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + metric.getCurrentIngest().inc(index, type, id); + bulkProcessor.add(new DeleteRequest().index(index).type(type).id(id)); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of delete request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkTransportClient bulkDelete(DeleteRequest deleteRequest) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); + bulkProcessor.add(deleteRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of delete request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkTransportClient update(String index, String type, String id, String source) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + metric.getCurrentIngest().inc(index, type, id); + bulkProcessor.add(new UpdateRequest().index(index).type(type).id(id).upsert(source)); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of update request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public BulkTransportClient bulkUpdate(UpdateRequest updateRequest) { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + try { + metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); + bulkProcessor.add(updateRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of update request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public synchronized BulkTransportClient flushIngest() { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + if (client == null) { + logger.warn("no client"); + return this; + } + logger.debug("flushing bulk processor"); + bulkProcessor.flush(); + return this; + } + + @Override + public synchronized BulkTransportClient waitForResponses(TimeValue maxWaitTime) + throws InterruptedException, ExecutionException { + if (closed) { + throw new ElasticsearchException("client is closed"); + } + if (client == null) { + logger.warn("no client"); + return this; + } + bulkProcessor.awaitClose(maxWaitTime.getMillis(), TimeUnit.MILLISECONDS); + return this; + } + + @Override + public synchronized void shutdown() { + if (closed) { + shutdownClient(); + throw new ElasticsearchException("client is closed"); + } + if (client == null) { + logger.warn("no client"); + return; + } + try { + if (bulkProcessor != null) { + logger.debug("closing bulk processor..."); + bulkProcessor.close(); + } + if (control != null && control.indices() != null && !control.indices().isEmpty()) { + logger.debug("stopping bulk mode for indices {}...", control.indices()); + for (String index : ImmutableSet.copyOf(control.indices())) { + stopBulk(index); + } + metric.stop(); + } + logger.debug("shutting down..."); + shutdownClient(); + logger.debug("shutting down completed"); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + } + + @Override + public boolean hasThrowable() { + return throwable != null; + } + + @Override + public Throwable getThrowable() { + return throwable; + } + + private Settings findSettings() { + Settings.Builder settingsBuilder = Settings.settingsBuilder(); + settingsBuilder.put("host", "localhost"); + try { + String hostname = NetworkUtils.getLocalAddress().getHostName(); + logger.debug("the hostname is {}", hostname); + settingsBuilder.put("host", hostname) + .put("port", 9300); + } catch (Exception e) { + logger.warn(e.getMessage(), e); + } + return settingsBuilder.build(); + } + + private Collection findAddresses(Settings settings) throws IOException { + String[] hostnames = settings.getAsArray("host", new String[]{"localhost"}); + int port = settings.getAsInt("port", 9300); + Collection addresses = new ArrayList<>(); + for (String hostname : hostnames) { + String[] splitHost = hostname.split(":", 2); + if (splitHost.length == 2) { + String host = splitHost[0]; + InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); + try { + port = Integer.parseInt(splitHost[1]); + } catch (Exception e) { + // ignore + } + addresses.add(new InetSocketTransportAddress(inetAddress, port)); + } + if (splitHost.length == 1) { + String host = splitHost[0]; + InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); + addresses.add(new InetSocketTransportAddress(inetAddress, port)); + } + } + return addresses; + } + + private void shutdownClient() { + if (client != null) { + logger.debug("shutdown started"); + client.close(); + client.threadPool().shutdown(); + client = null; + logger.debug("shutdown complete"); + } + isShutdown = true; + } + + private boolean connect(Collection addresses, boolean autodiscover) { + logger.info("trying to connect to {}", addresses); + client.addTransportAddresses(addresses); + if (client.connectedNodes() != null) { + List nodes = client.connectedNodes(); + if (!nodes.isEmpty()) { + logger.info("connected to {}", nodes); + if (autodiscover) { + logger.info("trying to auto-discover all cluster nodes..."); + ClusterStateRequestBuilder clusterStateRequestBuilder = + new ClusterStateRequestBuilder(client, ClusterStateAction.INSTANCE); + ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); + DiscoveryNodes discoveryNodes = clusterStateResponse.getState().getNodes(); + client.addDiscoveryNodes(discoveryNodes); + logger.info("after auto-discovery connected to {}", client.connectedNodes()); + } + return true; + } + return false; + } + return false; + } +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/MockTransportClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/MockTransportClient.java new file mode 100644 index 0000000..76bf69e --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/transport/MockTransportClient.java @@ -0,0 +1,156 @@ +package org.xbib.elasticsearch.extras.client.transport; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.xbib.elasticsearch.extras.client.BulkControl; +import org.xbib.elasticsearch.extras.client.BulkMetric; + +import java.io.IOException; +import java.util.Map; + +/** + * Mock client, it does not perform actions on a cluster. + * Useful for testing or dry runs. + */ +public class MockTransportClient extends BulkTransportClient { + + public MockTransportClient() { + } + + @Override + public ElasticsearchClient client() { + return null; + } + + @Override + public MockTransportClient init(ElasticsearchClient client, BulkMetric metric, BulkControl control) { + return this; + } + + @Override + public MockTransportClient init(Settings settings, BulkMetric metric, BulkControl control) { + return this; + } + + @Override + public MockTransportClient maxActionsPerRequest(int maxActions) { + return this; + } + + @Override + public MockTransportClient maxConcurrentRequests(int maxConcurrentRequests) { + return this; + } + + @Override + public MockTransportClient maxVolumePerRequest(ByteSizeValue maxVolumePerRequest) { + return this; + } + + @Override + public MockTransportClient flushIngestInterval(TimeValue interval) { + return this; + } + + @Override + public MockTransportClient index(String index, String type, String id, String source) { + return this; + } + + @Override + public MockTransportClient delete(String index, String type, String id) { + return this; + } + + @Override + public MockTransportClient update(String index, String type, String id, String source) { + return this; + } + + @Override + public MockTransportClient bulkIndex(IndexRequest indexRequest) { + return this; + } + + @Override + public MockTransportClient bulkDelete(DeleteRequest deleteRequest) { + return this; + } + + @Override + public MockTransportClient bulkUpdate(UpdateRequest updateRequest) { + return this; + } + + @Override + public MockTransportClient flushIngest() { + return this; + } + + @Override + public MockTransportClient waitForResponses(TimeValue timeValue) throws InterruptedException { + return this; + } + + @Override + public MockTransportClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) { + return this; + } + + @Override + public MockTransportClient stopBulk(String index) { + return this; + } + + @Override + public MockTransportClient deleteIndex(String index) { + return this; + } + + @Override + public MockTransportClient newIndex(String index) { + return this; + } + + @Override + public MockTransportClient newMapping(String index, String type, Map mapping) { + return this; + } + + @Override + public void putMapping(String index) { + } + + @Override + public void refreshIndex(String index) { + } + + @Override + public void flushIndex(String index) { + } + + @Override + public void waitForCluster(String healthColor, TimeValue timeValue) throws IOException { + } + + @Override + public int waitForRecovery(String index) throws IOException { + return -1; + } + + @Override + public int updateReplicaLevel(String index, int level) throws IOException { + return -1; + } + + @Override + public void shutdown() { + // do nothing + } + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/TransportClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/TransportClient.java new file mode 100644 index 0000000..423503e --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/transport/TransportClient.java @@ -0,0 +1,517 @@ +package org.xbib.elasticsearch.extras.client.transport; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.TransportActionNodeProxy; +import org.elasticsearch.action.admin.cluster.node.liveness.LivenessRequest; +import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; +import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.client.support.Headers; +import org.elasticsearch.client.transport.ClientTransportModule; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterNameModule; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.inject.Injector; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.inject.ModulesBuilder; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.indices.breaker.CircuitBreakerModule; +import org.elasticsearch.monitor.MonitorService; +import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsModule; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPoolModule; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.FutureTransportResponseHandler; +import org.elasticsearch.transport.TransportModule; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Stripped-down transport client without node sampling. + * Merged together: original TransportClient, TransportClientNodesServce, TransportClientProxy + * Configurable ping interval setting added + */ +public class TransportClient extends AbstractClient { + + private static final String CLIENT_TYPE = "transport"; + + private final Injector injector; + + private final ProxyActionMap proxyActionMap; + + private final long pingTimeout; + + private final ClusterName clusterName; + + private final TransportService transportService; + + private final Version minCompatibilityVersion; + + private final Headers headers; + + private final AtomicInteger tempNodeId = new AtomicInteger(); + + private final AtomicInteger nodeCounter = new AtomicInteger(); + + private final Object mutex = new Object(); + + private volatile List listedNodes = Collections.emptyList(); + + private volatile List nodes = Collections.emptyList(); + + private volatile List filteredNodes = Collections.emptyList(); + + private volatile boolean closed; + + private TransportClient(Injector injector) { + super(injector.getInstance(Settings.class), injector.getInstance(ThreadPool.class), + injector.getInstance(Headers.class)); + this.injector = injector; + this.clusterName = injector.getInstance(ClusterName.class); + this.transportService = injector.getInstance(TransportService.class); + this.minCompatibilityVersion = injector.getInstance(Version.class).minimumCompatibilityVersion(); + this.headers = injector.getInstance(Headers.class); + this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis(); + this.proxyActionMap = injector.getInstance(ProxyActionMap.class); + } + + public static Builder builder() { + return new Builder(); + } + + /** + * Returns the current registered transport addresses to use. + * + * @return list of transport addresess + */ + public List transportAddresses() { + List lstBuilder = new ArrayList<>(); + for (DiscoveryNode listedNode : listedNodes) { + lstBuilder.add(listedNode.address()); + } + return Collections.unmodifiableList(lstBuilder); + } + + /** + * Returns the current connected transport nodes that this client will use. + * The nodes include all the nodes that are currently alive based on the transport + * addresses provided. + * + * @return list of nodes + */ + public List connectedNodes() { + return this.nodes; + } + + /** + * The list of filtered nodes that were not connected to, for example, due to + * mismatch in cluster name. + * + * @return list of nodes + */ + public List filteredNodes() { + return this.filteredNodes; + } + + /** + * Returns the listed nodes in the transport client (ones added to it). + * + * @return list of nodes + */ + public List listedNodes() { + return this.listedNodes; + } + + /** + * Adds a list of transport addresses that will be used to connect to. + * The Node this transport address represents will be used if its possible to connect to it. + * If it is unavailable, it will be automatically connected to once it is up. + * In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}. + * + * @param discoveryNodes nodes + * @return this transport client + */ + public TransportClient addDiscoveryNodes(DiscoveryNodes discoveryNodes) { + Collection addresses = new ArrayList<>(); + for (DiscoveryNode discoveryNode : discoveryNodes) { + addresses.add((InetSocketTransportAddress) discoveryNode.address()); + } + addTransportAddresses(addresses); + return this; + } + + public TransportClient addTransportAddresses(Collection transportAddresses) { + synchronized (mutex) { + if (closed) { + throw new IllegalStateException("transport client is closed, can't add addresses"); + } + List filtered = new ArrayList<>(transportAddresses.size()); + for (TransportAddress transportAddress : transportAddresses) { + boolean found = false; + for (DiscoveryNode otherNode : listedNodes) { + if (otherNode.address().equals(transportAddress)) { + found = true; + logger.debug("address [{}] already exists with [{}], ignoring...", transportAddress, otherNode); + break; + } + } + if (!found) { + filtered.add(transportAddress); + } + } + if (filtered.isEmpty()) { + return this; + } + List discoveryNodeList = new ArrayList<>(); + discoveryNodeList.addAll(listedNodes()); + for (TransportAddress transportAddress : filtered) { + DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeId.incrementAndGet(), transportAddress, + minCompatibilityVersion); + logger.debug("adding address [{}]", node); + discoveryNodeList.add(node); + } + listedNodes = Collections.unmodifiableList(discoveryNodeList); + connect(); + } + return this; + } + + /** + * Removes a transport address from the list of transport addresses that are used to connect to. + * + * @param transportAddress transport address to remove + * @return this transport client + */ + public TransportClient removeTransportAddress(TransportAddress transportAddress) { + synchronized (mutex) { + if (closed) { + throw new IllegalStateException("transport client is closed, can't remove an address"); + } + List builder = new ArrayList<>(); + for (DiscoveryNode otherNode : listedNodes) { + if (!otherNode.address().equals(transportAddress)) { + builder.add(otherNode); + } else { + logger.debug("removing address [{}]", otherNode); + } + } + listedNodes = Collections.unmodifiableList(builder); + } + return this; + } + + @Override + @SuppressWarnings("rawtypes") + public void close() { + synchronized (mutex) { + if (closed) { + return; + } + closed = true; + for (DiscoveryNode node : nodes) { + transportService.disconnectFromNode(node); + } + for (DiscoveryNode listedNode : listedNodes) { + transportService.disconnectFromNode(listedNode); + } + nodes = Collections.emptyList(); + } + injector.getInstance(TransportService.class).close(); + try { + injector.getInstance(MonitorService.class).close(); + } catch (Exception e) { + // ignore, might not be bounded + } + for (Class plugin : injector.getInstance(PluginsService.class).nodeServices()) { + injector.getInstance(plugin).close(); + } + try { + ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS); + } catch (Exception e) { + // ignore + } + injector.getInstance(PageCacheRecycler.class).close(); + } + + private void connect() { + Set newNodes = new HashSet<>(); + Set newFilteredNodes = new HashSet<>(); + for (DiscoveryNode listedNode : listedNodes) { + if (!transportService.nodeConnected(listedNode)) { + try { + logger.trace("connecting to listed node (light) [{}]", listedNode); + transportService.connectToNodeLight(listedNode); + } catch (Throwable e) { + logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode); + continue; + } + } + try { + LivenessResponse livenessResponse = transportService.submitRequest(listedNode, + TransportLivenessAction.NAME, headers.applyTo(new LivenessRequest()), + TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE) + .withTimeout(pingTimeout).build(), + new FutureTransportResponseHandler() { + @Override + public LivenessResponse newInstance() { + return new LivenessResponse(); + } + }).txGet(); + if (!clusterName.equals(livenessResponse.getClusterName())) { + logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName); + newFilteredNodes.add(listedNode); + } else if (livenessResponse.getDiscoveryNode() != null) { + DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode(); + newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(), + nodeWithInfo.getHostAddress(), listedNode.address(), nodeWithInfo.attributes(), + nodeWithInfo.version())); + } else { + logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", + listedNode); + newNodes.add(listedNode); + } + } catch (Throwable e) { + logger.info("failed to get node info for {}, disconnecting...", e, listedNode); + transportService.disconnectFromNode(listedNode); + } + } + for (Iterator it = newNodes.iterator(); it.hasNext(); ) { + DiscoveryNode node = it.next(); + if (!transportService.nodeConnected(node)) { + try { + logger.trace("connecting to node [{}]", node); + transportService.connectToNode(node); + } catch (Throwable e) { + it.remove(); + logger.debug("failed to connect to discovered node [" + node + "]", e); + } + } + } + this.nodes = Collections.unmodifiableList(new ArrayList<>(newNodes)); + this.filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); + } + + @Override + @SuppressWarnings({"unchecked", "rawtypes"}) + protected > + void doExecute(Action action, final Request request, + ActionListener listener) { + final TransportActionNodeProxy proxyAction = proxyActionMap.getProxies().get(action); + if (proxyAction == null) { + throw new IllegalStateException("undefined action " + action); + } + NodeListenerCallback callback = new NodeListenerCallback() { + @Override + public void doWithNode(DiscoveryNode node, ActionListener listener) { + proxyAction.execute(node, request, listener); + } + }; + List nodes = this.nodes; + if (nodes.isEmpty()) { + throw new NoNodeAvailableException("none of the configured nodes are available: " + this.listedNodes); + } + int index = nodeCounter.incrementAndGet(); + if (index < 0) { + index = 0; + nodeCounter.set(0); + } + RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index); + DiscoveryNode node = nodes.get((index) % nodes.size()); + try { + callback.doWithNode(node, retryListener); + } catch (Throwable t) { + listener.onFailure(t); + } + } + + /** + * + * @param + */ + interface NodeListenerCallback { + + void doWithNode(DiscoveryNode node, ActionListener listener); + } + + /** + * + */ + public static class Builder { + + private Settings settings = Settings.EMPTY; + private List> pluginClasses = new ArrayList<>(); + + public Builder settings(Settings.Builder settings) { + return settings(settings.build()); + } + + public Builder settings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder addPlugin(Class pluginClass) { + pluginClasses.add(pluginClass); + return this; + } + + public TransportClient build() { + Settings settings = InternalSettingsPreparer.prepareSettings(this.settings); + settings = settingsBuilder() + .put("transport.ping.schedule", this.settings.get("ping.interval", "30s")) + .put(settings) + .put("network.server", false) + .put("node.client", true) + .put(CLIENT_TYPE_SETTING, CLIENT_TYPE) + .build(); + PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses); + this.settings = pluginsService.updatedSettings(); + Version version = Version.CURRENT; + final ThreadPool threadPool = new ThreadPool(settings); + + boolean success = false; + try { + ModulesBuilder modules = new ModulesBuilder(); + modules.add(new Version.Module(version)); + // plugin modules must be added here, before others or we can get crazy injection errors... + for (Module pluginModule : pluginsService.nodeModules()) { + modules.add(pluginModule); + } + modules.add(new PluginsModule(pluginsService)); + modules.add(new SettingsModule(this.settings)); + modules.add(new NetworkModule()); + modules.add(new ClusterNameModule(this.settings)); + modules.add(new ThreadPoolModule(threadPool)); + modules.add(new TransportModule(this.settings)); + modules.add(new SearchModule() { + @Override + protected void configure() { + // noop + } + }); + modules.add(new ActionModule(true)); + modules.add(new ClientTransportModule()); + modules.add(new CircuitBreakerModule(this.settings)); + pluginsService.processModules(modules); + Injector injector = modules.createInjector(); + injector.getInstance(TransportService.class).start(); + TransportClient transportClient = new TransportClient(injector); + success = true; + return transportClient; + } finally { + if (!success) { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + } + } + } + + private static class RetryListener implements ActionListener { + private final ESLogger logger = ESLoggerFactory.getLogger(RetryListener.class.getName()); + private final NodeListenerCallback callback; + private final ActionListener listener; + private final List nodes; + private final int index; + + private volatile int n; + + RetryListener(NodeListenerCallback callback, ActionListener listener, + List nodes, int index) { + this.callback = callback; + this.listener = listener; + this.nodes = nodes; + this.index = index; + } + + @Override + public void onResponse(Response response) { + listener.onResponse(response); + } + + @Override + public void onFailure(Throwable e) { + if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) { + int n = ++this.n; + if (n >= nodes.size()) { + listener.onFailure(new NoNodeAvailableException("none of the configured nodes were available: " + + nodes, e)); + } else { + try { + logger.warn("retrying on another node (n={}, nodes={})", n, nodes.size()); + callback.doWithNode(nodes.get((index + n) % nodes.size()), this); + } catch (final Throwable t) { + listener.onFailure(t); + } + } + } else { + listener.onFailure(e); + } + } + } + + /** + * The {@link ProxyActionMap} must be declared public. + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + public static class ProxyActionMap { + + private final ImmutableMap proxies; + + @Inject + public ProxyActionMap(Settings settings, TransportService transportService, Map actions) { + MapBuilder actionsBuilder = new MapBuilder<>(); + for (GenericAction action : actions.values()) { + if (action instanceof Action) { + actionsBuilder.put((Action) action, new TransportActionNodeProxy(settings, action, transportService)); + } + } + this.proxies = actionsBuilder.immutableMap(); + } + + public ImmutableMap getProxies() { + return proxies; + } + } + +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/package-info.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/package-info.java new file mode 100644 index 0000000..ac6a50d --- /dev/null +++ b/src/main/java/org/xbib/elasticsearch/extras/client/transport/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for Elasticsearch transport client extras. + */ +package org.xbib.elasticsearch.extras.client.transport; diff --git a/src/test/resources/log4j2.xml b/src/test/resources/log4j2.xml new file mode 100644 index 0000000..f71aced --- /dev/null +++ b/src/test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file