diff --git a/.travis.yml b/.travis.yml
index ee1dfd1..dd9c325 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,7 @@
language: java
sudo: required
jdk:
- - oraclejdk8
+ - openjdk11
cache:
directories:
- $HOME/.m2
diff --git a/api/build.gradle b/api/build.gradle
deleted file mode 100644
index 6344343..0000000
--- a/api/build.gradle
+++ /dev/null
@@ -1,11 +0,0 @@
-
-dependencies {
- compile "io.netty:netty-buffer:${rootProject.property('netty.version')}"
- compile "io.netty:netty-codec-http:${rootProject.property('netty.version')}"
- compile "io.netty:netty-handler:${rootProject.property('netty.version')}"
- compile "org.xbib.elasticsearch:elasticsearch:${rootProject.property('elasticsearch-server.version')}"
-}
-
-jar {
- baseName "${rootProject.name}-api"
-}
diff --git a/api/config/checkstyle/checkstyle.xml b/api/config/checkstyle/checkstyle.xml
deleted file mode 100644
index 8cb4438..0000000
--- a/api/config/checkstyle/checkstyle.xml
+++ /dev/null
@@ -1,321 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/api/src/docs/asciidoc/css/foundation.css b/api/src/docs/asciidoc/css/foundation.css
deleted file mode 100644
index 27be611..0000000
--- a/api/src/docs/asciidoc/css/foundation.css
+++ /dev/null
@@ -1,684 +0,0 @@
-/*! normalize.css v2.1.2 | MIT License | git.io/normalize */
-/* ========================================================================== HTML5 display definitions ========================================================================== */
-/** Correct `block` display not defined in IE 8/9. */
-article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; }
-
-/** Correct `inline-block` display not defined in IE 8/9. */
-audio, canvas, video { display: inline-block; }
-
-/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */
-audio:not([controls]) { display: none; height: 0; }
-
-/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */
-[hidden], template { display: none; }
-
-script { display: none !important; }
-
-/* ========================================================================== Base ========================================================================== */
-/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */
-html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ }
-
-/** Remove default margin. */
-body { margin: 0; }
-
-/* ========================================================================== Links ========================================================================== */
-/** Remove the gray background color from active links in IE 10. */
-a { background: transparent; }
-
-/** Address `outline` inconsistency between Chrome and other browsers. */
-a:focus { outline: thin dotted; }
-
-/** Improve readability when focused and also mouse hovered in all browsers. */
-a:active, a:hover { outline: 0; }
-
-/* ========================================================================== Typography ========================================================================== */
-/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */
-h1 { font-size: 2em; margin: 0.67em 0; }
-
-/** Address styling not present in IE 8/9, Safari 5, and Chrome. */
-abbr[title] { border-bottom: 1px dotted; }
-
-/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */
-b, strong { font-weight: bold; }
-
-/** Address styling not present in Safari 5 and Chrome. */
-dfn { font-style: italic; }
-
-/** Address differences between Firefox and other browsers. */
-hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; }
-
-/** Address styling not present in IE 8/9. */
-mark { background: #ff0; color: #000; }
-
-/** Correct font family set oddly in Safari 5 and Chrome. */
-code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; }
-
-/** Improve readability of pre-formatted text in all browsers. */
-pre { white-space: pre-wrap; }
-
-/** Set consistent quote types. */
-q { quotes: "\201C" "\201D" "\2018" "\2019"; }
-
-/** Address inconsistent and variable font size in all browsers. */
-small { font-size: 80%; }
-
-/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */
-sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; }
-
-sup { top: -0.5em; }
-
-sub { bottom: -0.25em; }
-
-/* ========================================================================== Embedded content ========================================================================== */
-/** Remove border when inside `a` element in IE 8/9. */
-img { border: 0; }
-
-/** Correct overflow displayed oddly in IE 9. */
-svg:not(:root) { overflow: hidden; }
-
-/* ========================================================================== Figures ========================================================================== */
-/** Address margin not present in IE 8/9 and Safari 5. */
-figure { margin: 0; }
-
-/* ========================================================================== Forms ========================================================================== */
-/** Define consistent border, margin, and padding. */
-fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; }
-
-/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */
-legend { border: 0; /* 1 */ padding: 0; /* 2 */ }
-
-/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */
-button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ }
-
-/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */
-button, input { line-height: normal; }
-
-/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */
-button, select { text-transform: none; }
-
-/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */
-button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ }
-
-/** Re-set default cursor for disabled elements. */
-button[disabled], html input[disabled] { cursor: default; }
-
-/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */
-input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ }
-
-/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */
-input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; }
-
-/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */
-input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; }
-
-/** Remove inner padding and border in Firefox 4+. */
-button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; }
-
-/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */
-textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ }
-
-/* ========================================================================== Tables ========================================================================== */
-/** Remove most spacing between table cells. */
-table { border-collapse: collapse; border-spacing: 0; }
-
-meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; }
-
-meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; }
-
-meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; }
-
-*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; }
-
-html, body { font-size: 100%; }
-
-body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; }
-
-a:hover { cursor: pointer; }
-
-img, object, embed { max-width: 100%; height: auto; }
-
-object, embed { height: 100%; }
-
-img { -ms-interpolation-mode: bicubic; }
-
-#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; }
-
-.left { float: left !important; }
-
-.right { float: right !important; }
-
-.text-left { text-align: left !important; }
-
-.text-right { text-align: right !important; }
-
-.text-center { text-align: center !important; }
-
-.text-justify { text-align: justify !important; }
-
-.hide { display: none; }
-
-.antialiased { -webkit-font-smoothing: antialiased; }
-
-img { display: inline-block; vertical-align: middle; }
-
-textarea { height: auto; min-height: 50px; }
-
-select { width: 100%; }
-
-object, svg { display: inline-block; vertical-align: middle; }
-
-.center { margin-left: auto; margin-right: auto; }
-
-.spread { width: 100%; }
-
-p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; }
-
-.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; }
-
-/* Typography resets */
-div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; }
-
-/* Default Link Styles */
-a { color: #2ba6cb; text-decoration: none; line-height: inherit; }
-a:hover, a:focus { color: #2795b6; }
-a img { border: none; }
-
-/* Default paragraph styles */
-p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; }
-p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; }
-
-/* Default header styles */
-h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; }
-h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; }
-
-h1 { font-size: 2.125em; }
-
-h2 { font-size: 1.6875em; }
-
-h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; }
-
-h4 { font-size: 1.125em; }
-
-h5 { font-size: 1.125em; }
-
-h6 { font-size: 1em; }
-
-hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; }
-
-/* Helpful Typography Defaults */
-em, i { font-style: italic; line-height: inherit; }
-
-strong, b { font-weight: bold; line-height: inherit; }
-
-small { font-size: 60%; line-height: inherit; }
-
-code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; }
-
-/* Lists */
-ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; }
-
-ul, ol { margin-left: 1.5em; }
-ul.no-bullet, ol.no-bullet { margin-left: 1.5em; }
-
-/* Unordered Lists */
-ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ }
-ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; }
-ul.square { list-style-type: square; }
-ul.circle { list-style-type: circle; }
-ul.disc { list-style-type: disc; }
-ul.no-bullet { list-style: none; }
-
-/* Ordered Lists */
-ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; }
-
-/* Definition Lists */
-dl dt { margin-bottom: 0.3125em; font-weight: bold; }
-dl dd { margin-bottom: 1.25em; }
-
-/* Abbreviations */
-abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; }
-
-abbr { text-transform: none; }
-
-/* Blockquotes */
-blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; }
-blockquote cite { display: block; font-size: 0.8125em; color: #555555; }
-blockquote cite:before { content: "\2014 \0020"; }
-blockquote cite a, blockquote cite a:visited { color: #555555; }
-
-blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; }
-
-/* Microformats */
-.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; }
-.vcard li { margin: 0; display: block; }
-.vcard .fn { font-weight: bold; font-size: 0.9375em; }
-
-.vevent .summary { font-weight: bold; }
-.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; }
-
-@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; }
- h1 { font-size: 2.75em; }
- h2 { font-size: 2.3125em; }
- h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; }
- h4 { font-size: 1.4375em; } }
-/* Tables */
-table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; }
-table thead, table tfoot { background: whitesmoke; font-weight: bold; }
-table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; }
-table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; }
-table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; }
-table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; }
-
-body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; }
-
-h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; }
-
-.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; }
-.clearfix:after, .float-group:after { clear: both; }
-
-*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; }
-*:not(pre) > code.nobreak { word-wrap: normal; }
-*:not(pre) > code.nowrap { white-space: nowrap; }
-
-pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; }
-
-em em { font-style: normal; }
-
-strong strong { font-weight: normal; }
-
-.keyseq { color: #555555; }
-
-kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; }
-
-.keyseq kbd:first-child { margin-left: 0; }
-
-.keyseq kbd:last-child { margin-right: 0; }
-
-.menuseq, .menu { color: #090909; }
-
-b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; }
-
-b.button:before { content: "["; padding: 0 3px 0 2px; }
-
-b.button:after { content: "]"; padding: 0 2px 0 3px; }
-
-#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; }
-#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; }
-#header:after, #content:after, #footnotes:after, #footer:after { clear: both; }
-
-#content { margin-top: 1.25em; }
-
-#content:before { content: none; }
-
-#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; }
-#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; }
-#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; }
-#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; }
-#header .details span:first-child { margin-left: -0.125em; }
-#header .details span.email a { color: #6f6f6f; }
-#header .details br { display: none; }
-#header .details br + span:before { content: "\00a0\2013\00a0"; }
-#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; }
-#header .details br + span#revremark:before { content: "\00a0|\00a0"; }
-#header #revnumber { text-transform: capitalize; }
-#header #revnumber:after { content: "\00a0"; }
-
-#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; }
-
-#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; }
-#toc > ul { margin-left: 0.125em; }
-#toc ul.sectlevel0 > li > a { font-style: italic; }
-#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; }
-#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; }
-#toc li { line-height: 1.3334; margin-top: 0.3334em; }
-#toc a { text-decoration: none; }
-#toc a:active { text-decoration: underline; }
-
-#toctitle { color: #6f6f6f; font-size: 1.2em; }
-
-@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; }
- body.toc2 { padding-left: 15em; padding-right: 0; }
- #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; }
- #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; }
- #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; }
- #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; }
- #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; }
- body.toc2.toc-right { padding-left: 0; padding-right: 15em; }
- body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } }
-@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; }
- #toc.toc2 { width: 20em; }
- #toc.toc2 #toctitle { font-size: 1.375em; }
- #toc.toc2 > ul { font-size: 0.95em; }
- #toc.toc2 ul ul { padding-left: 1.25em; }
- body.toc2.toc-right { padding-left: 0; padding-right: 20em; } }
-#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; }
-#content #toc > :first-child { margin-top: 0; }
-#content #toc > :last-child { margin-bottom: 0; }
-
-#footer { max-width: 100%; background-color: #222222; padding: 1.25em; }
-
-#footer-text { color: #dddddd; line-height: 1.44; }
-
-.sect1 { padding-bottom: 0.625em; }
-
-@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } }
-.sect1 + .sect1 { border-top: 1px solid #dddddd; }
-
-#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; }
-#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; }
-#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; }
-#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; }
-#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; }
-
-.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; }
-
-.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; }
-
-table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; }
-
-.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; }
-
-table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; }
-
-.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; }
-.admonitionblock > table td.icon { text-align: center; width: 80px; }
-.admonitionblock > table td.icon img { max-width: initial; }
-.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; }
-.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; }
-.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; }
-
-.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; }
-.exampleblock > .content > :first-child { margin-top: 0; }
-.exampleblock > .content > :last-child { margin-bottom: 0; }
-
-.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; }
-.sidebarblock > :first-child { margin-top: 0; }
-.sidebarblock > :last-child { margin-bottom: 0; }
-.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; }
-
-.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; }
-
-.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; }
-.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; }
-
-.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; }
-.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; }
-@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } }
-@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } }
-
-.literalblock.output pre { color: #eeeeee; background-color: black; }
-
-.listingblock pre.highlightjs { padding: 0; }
-.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; }
-
-.listingblock > .content { position: relative; }
-
-.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; }
-
-.listingblock:hover code[data-lang]:before { display: block; }
-
-.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; }
-
-.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; }
-
-table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; }
-
-table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; }
-
-table.pyhltable td.code { padding-left: .75em; padding-right: 0; }
-
-pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; }
-
-pre.pygments .lineno { display: inline-block; margin-right: .25em; }
-
-table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; }
-
-.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; }
-.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; }
-.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; }
-.quoteblock blockquote { margin: 0; padding: 0; border: 0; }
-.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); }
-.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; }
-.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; }
-.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; }
-.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; }
-.quoteblock .quoteblock blockquote:before { display: none; }
-
-.verseblock { margin: 0 1em 1.25em 1em; }
-.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; }
-.verseblock pre strong { font-weight: 400; }
-.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; }
-
-.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; }
-.quoteblock .attribution br, .verseblock .attribution br { display: none; }
-.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; }
-
-.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; }
-.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; }
-.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; }
-
-table.tableblock { max-width: 100%; border-collapse: separate; }
-table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; }
-
-table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; }
-
-table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; }
-
-table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; }
-
-table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; }
-
-table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; }
-
-table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; }
-
-table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; }
-
-table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; }
-
-table.frame-all { border-width: 1px; }
-
-table.frame-sides { border-width: 0 1px; }
-
-table.frame-topbot { border-width: 1px 0; }
-
-th.halign-left, td.halign-left { text-align: left; }
-
-th.halign-right, td.halign-right { text-align: right; }
-
-th.halign-center, td.halign-center { text-align: center; }
-
-th.valign-top, td.valign-top { vertical-align: top; }
-
-th.valign-bottom, td.valign-bottom { vertical-align: bottom; }
-
-th.valign-middle, td.valign-middle { vertical-align: middle; }
-
-table thead th, table tfoot th { font-weight: bold; }
-
-tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; }
-
-tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; }
-
-p.tableblock > code:only-child { background: none; padding: 0; }
-
-p.tableblock { font-size: 1em; }
-
-td > div.verse { white-space: pre; }
-
-ol { margin-left: 1.75em; }
-
-ul li ol { margin-left: 1.5em; }
-
-dl dd { margin-left: 1.125em; }
-
-dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; }
-
-ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; }
-
-ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; }
-
-ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; }
-
-ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; }
-
-ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; }
-
-ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; }
-ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; }
-ul.inline > li > * { display: block; }
-
-.unstyled dl dt { font-weight: normal; font-style: normal; }
-
-ol.arabic { list-style-type: decimal; }
-
-ol.decimal { list-style-type: decimal-leading-zero; }
-
-ol.loweralpha { list-style-type: lower-alpha; }
-
-ol.upperalpha { list-style-type: upper-alpha; }
-
-ol.lowerroman { list-style-type: lower-roman; }
-
-ol.upperroman { list-style-type: upper-roman; }
-
-ol.lowergreek { list-style-type: lower-greek; }
-
-.hdlist > table, .colist > table { border: 0; background: none; }
-.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; }
-
-td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; }
-
-td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; }
-
-.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; }
-
-.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; }
-.colist > table tr > td:first-of-type img { max-width: initial; }
-.colist > table tr > td:last-of-type { padding: 0.25em 0; }
-
-.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; }
-
-.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; }
-.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; }
-.imageblock > .title { margin-bottom: 0; }
-.imageblock.thumb, .imageblock.th { border-width: 6px; }
-.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; }
-
-.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; }
-.image.left { margin-right: 0.625em; }
-.image.right { margin-left: 0.625em; }
-
-a.image { text-decoration: none; display: inline-block; }
-a.image object { pointer-events: none; }
-
-sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; }
-sup.footnote a, sup.footnoteref a { text-decoration: none; }
-sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; }
-
-#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; }
-#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; }
-#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; }
-#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; }
-#footnotes .footnote:last-of-type { margin-bottom: 0; }
-#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; }
-
-.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; }
-.gist .file-data > table td.line-data { width: 99%; }
-
-div.unbreakable { page-break-inside: avoid; }
-
-.big { font-size: larger; }
-
-.small { font-size: smaller; }
-
-.underline { text-decoration: underline; }
-
-.overline { text-decoration: overline; }
-
-.line-through { text-decoration: line-through; }
-
-.aqua { color: #00bfbf; }
-
-.aqua-background { background-color: #00fafa; }
-
-.black { color: black; }
-
-.black-background { background-color: black; }
-
-.blue { color: #0000bf; }
-
-.blue-background { background-color: #0000fa; }
-
-.fuchsia { color: #bf00bf; }
-
-.fuchsia-background { background-color: #fa00fa; }
-
-.gray { color: #606060; }
-
-.gray-background { background-color: #7d7d7d; }
-
-.green { color: #006000; }
-
-.green-background { background-color: #007d00; }
-
-.lime { color: #00bf00; }
-
-.lime-background { background-color: #00fa00; }
-
-.maroon { color: #600000; }
-
-.maroon-background { background-color: #7d0000; }
-
-.navy { color: #000060; }
-
-.navy-background { background-color: #00007d; }
-
-.olive { color: #606000; }
-
-.olive-background { background-color: #7d7d00; }
-
-.purple { color: #600060; }
-
-.purple-background { background-color: #7d007d; }
-
-.red { color: #bf0000; }
-
-.red-background { background-color: #fa0000; }
-
-.silver { color: #909090; }
-
-.silver-background { background-color: #bcbcbc; }
-
-.teal { color: #006060; }
-
-.teal-background { background-color: #007d7d; }
-
-.white { color: #bfbfbf; }
-
-.white-background { background-color: #fafafa; }
-
-.yellow { color: #bfbf00; }
-
-.yellow-background { background-color: #fafa00; }
-
-span.icon > .fa { cursor: default; }
-
-.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; }
-.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; }
-.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; }
-.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; }
-.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; }
-.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; }
-
-.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; }
-.conum[data-value] * { color: #fff !important; }
-.conum[data-value] + b { display: none; }
-.conum[data-value]:after { content: attr(data-value); }
-pre .conum[data-value] { position: relative; top: -0.125em; }
-
-b.conum * { color: inherit !important; }
-
-.conum:not([data-value]):empty { display: none; }
-
-.literalblock pre, .listingblock pre { background: #eeeeee; }
diff --git a/api/src/docs/asciidoclet/overview.adoc b/api/src/docs/asciidoclet/overview.adoc
deleted file mode 100644
index 7947331..0000000
--- a/api/src/docs/asciidoclet/overview.adoc
+++ /dev/null
@@ -1,4 +0,0 @@
-= Elasticsearch Java client
-Jörg Prante
-Version 5.4.0.0
-
diff --git a/backup/XbibTransportService.java b/backup/XbibTransportService.java
deleted file mode 100644
index c2dc502..0000000
--- a/backup/XbibTransportService.java
+++ /dev/null
@@ -1,1047 +0,0 @@
-package org.xbib.elasticsearch.client.transport;
-
-import org.apache.logging.log4j.Logger;
-import org.apache.logging.log4j.message.ParameterizedMessage;
-import org.elasticsearch.Version;
-import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
-import org.elasticsearch.cluster.ClusterName;
-import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.collect.MapBuilder;
-import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.common.metrics.MeanMetric;
-import org.elasticsearch.common.regex.Regex;
-import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.Setting;
-import org.elasticsearch.common.settings.Setting.Property;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.transport.BoundTransportAddress;
-import org.elasticsearch.common.util.concurrent.AbstractRunnable;
-import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
-import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
-import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
-import org.elasticsearch.common.util.concurrent.FutureUtils;
-import org.elasticsearch.common.util.concurrent.ThreadContext;
-import org.elasticsearch.tasks.TaskManager;
-import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.ActionNotFoundTransportException;
-import org.elasticsearch.transport.ConnectTransportException;
-import org.elasticsearch.transport.FutureTransportResponseHandler;
-import org.elasticsearch.transport.NodeDisconnectedException;
-import org.elasticsearch.transport.NodeNotConnectedException;
-import org.elasticsearch.transport.PlainTransportFuture;
-import org.elasticsearch.transport.ReceiveTimeoutTransportException;
-import org.elasticsearch.transport.RemoteTransportException;
-import org.elasticsearch.transport.RequestHandlerRegistry;
-import org.elasticsearch.transport.ResponseHandlerFailureTransportException;
-import org.elasticsearch.transport.SendRequestTransportException;
-import org.elasticsearch.transport.Transport;
-import org.elasticsearch.transport.TransportChannel;
-import org.elasticsearch.transport.TransportException;
-import org.elasticsearch.transport.TransportFuture;
-import org.elasticsearch.transport.TransportInterceptor;
-import org.elasticsearch.transport.TransportRequest;
-import org.elasticsearch.transport.TransportRequestHandler;
-import org.elasticsearch.transport.TransportRequestOptions;
-import org.elasticsearch.transport.TransportResponse;
-import org.elasticsearch.transport.TransportResponseHandler;
-import org.elasticsearch.transport.TransportResponseOptions;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ScheduledFuture;
-import java.util.function.Function;
-import java.util.function.Predicate;
-import java.util.function.Supplier;
-
-/**
- *
- */
-public class XbibTransportService extends AbstractLifecycleComponent {
-
- private static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake";
-
- private static final Setting> TRACE_LOG_INCLUDE_SETTING =
- Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(),
- Property.Dynamic, Property.NodeScope);
-
- private static final Setting> TRACE_LOG_EXCLUDE_SETTING =
- Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*",
- TransportLivenessAction.NAME), Function.identity(), Property.Dynamic, Property.NodeScope);
-
- private final CountDownLatch blockIncomingRequestsLatch = new CountDownLatch(1);
-
- private final Transport transport;
-
- private final ThreadPool threadPool;
-
- private final ClusterName clusterName;
-
- private final TaskManager taskManager;
-
- private final TransportInterceptor.AsyncSender asyncSender;
-
- private final Function localNodeFactory;
-
- private volatile Map> requestHandlers = Collections.emptyMap();
-
- private final Object requestHandlerMutex = new Object();
-
- private final ConcurrentMapLong> clientHandlers =
- ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
-
- private final TransportInterceptor interceptor;
-
- // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they
- // do show up, we can print more descriptive information about them
- private final Map timeoutInfoHandlers =
- Collections.synchronizedMap(new LinkedHashMap(100, .75F, true) {
- private static final long serialVersionUID = 9174428975922394994L;
-
- @Override
- protected boolean removeEldestEntry(Map.Entry eldest) {
- return size() > 100;
- }
- });
-
- private final Logger tracerLog;
-
- private volatile String[] tracerLogInclude;
-
- private volatile String[] tracerLogExclude;
-
- private volatile DiscoveryNode localNode = null;
-
- private final Transport.Connection localNodeConnection = new Transport.Connection() {
- @Override
- public DiscoveryNode getNode() {
- return localNode;
- }
-
- @Override
- public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options)
- throws IOException, TransportException {
- sendLocalRequest(requestId, action, request, options);
- }
-
- @Override
- public void close() throws IOException {
- }
- };
-
- /**
- * Build the service.
- *
- * @param clusterSettings if non null the the {@linkplain XbibTransportService} will register
- * with the {@link ClusterSettings} for settings updates for
- * {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}.
- */
- XbibTransportService(Settings settings, Transport transport, ThreadPool threadPool,
- TransportInterceptor transportInterceptor,
- Function localNodeFactory,
- @Nullable ClusterSettings clusterSettings) {
- super(settings);
- this.transport = transport;
- this.threadPool = threadPool;
- this.localNodeFactory = localNodeFactory;
- this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
- setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings));
- setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings));
- tracerLog = Loggers.getLogger(logger, ".tracer");
- taskManager = createTaskManager();
- this.interceptor = transportInterceptor;
- this.asyncSender = interceptor.interceptSender(this::sendRequestInternal);
- if (clusterSettings != null) {
- clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude);
- clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude);
- }
- }
-
- private TaskManager createTaskManager() {
- return new TaskManager(settings);
- }
-
- private void setTracerLogInclude(List tracerLogInclude) {
- this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY);
- }
-
- private void setTracerLogExclude(List tracerLogExclude) {
- this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY);
- }
-
- @Override
- protected void doStart() {
- rxMetric.clear();
- txMetric.clear();
- transport.setTransportService(this);
- transport.start();
- if (transport.boundAddress() != null && logger.isInfoEnabled()) {
- logger.info("{}", transport.boundAddress());
- for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) {
- logger.info("profile [{}]: {}", entry.getKey(), entry.getValue());
- }
- }
- localNode = localNodeFactory.apply(transport.boundAddress());
- registerRequestHandler(HANDSHAKE_ACTION_NAME,
- () -> HandshakeRequest.INSTANCE,
- ThreadPool.Names.SAME,
- (request, channel) -> channel.sendResponse(new HandshakeResponse(localNode, clusterName,
- localNode.getVersion())));
- }
-
- @Override
- protected void doStop() {
- try {
- transport.stop();
- } finally {
- // in case the transport is not connected to our local node (thus cleaned on node disconnect)
- // make sure to clean any leftover on going handles
- for (Map.Entry> entry : clientHandlers.entrySet()) {
- final RequestHolder extends TransportResponse> holderToNotify = clientHandlers.remove(entry.getKey());
- if (holderToNotify != null) {
- // callback that an exception happened, but on a different thread since we don't
- // want handlers to worry about stack overflows
- threadPool.generic().execute(new AbstractRunnable() {
- @Override
- public void onRejection(Exception e) {
- // if we get rejected during node shutdown we don't wanna bubble it up
- logger.debug((Supplier>) () -> new ParameterizedMessage(
- "failed to notify response handler on rejection, action: {}",
- holderToNotify.action()),
- e);
- }
- @Override
- public void onFailure(Exception e) {
- logger.warn((Supplier>) () -> new ParameterizedMessage(
- "failed to notify response handler on exception, action: {}",
- holderToNotify.action()),
- e);
- }
- @Override
- public void doRun() {
- TransportException ex = new TransportException("transport stopped, action: " +
- holderToNotify.action());
- holderToNotify.handler().handleException(ex);
- }
- });
- }
- }
- }
- }
-
- @Override
- protected void doClose() {
- transport.close();
- }
-
- /**
- * Start accepting incoming requests.
- * when the transport layer starts up it will block any incoming requests until
- * this method is called
- */
- final void acceptIncomingRequests() {
- blockIncomingRequestsLatch.countDown();
- }
-
- /**
- * Returns true
iff the given node is already connected.
- */
- boolean nodeConnected(DiscoveryNode node) {
- return isLocalNode(node) || transport.nodeConnected(node);
- }
-
- /**
- * Connect to the specified node.
- *
- * @param node the node to connect to
- */
- void connectToNode(final DiscoveryNode node) {
- if (isLocalNode(node)) {
- return;
- }
- transport.connectToNode(node, null, (newConnection, actualProfile) ->
- handshake(newConnection, actualProfile.getHandshakeTimeout().millis()));
- }
-
- /**
- * Executes a high-level handshake using the given connection
- * and returns the discovery node of the node the connection
- * was established with. The handshake will fail if the cluster
- * name on the target node mismatches the local cluster name.
- *
- * @param connection the connection to a specific node
- * @param handshakeTimeout handshake timeout
- * @return the connected node
- * @throws ConnectTransportException if the connection failed
- * @throws IllegalStateException if the handshake failed
- */
- private DiscoveryNode handshake(final Transport.Connection connection,
- final long handshakeTimeout) throws ConnectTransportException {
- return handshake(connection, handshakeTimeout, clusterName::equals);
- }
-
- /**
- * Executes a high-level handshake using the given connection
- * and returns the discovery node of the node the connection
- * was established with. The handshake will fail if the cluster
- * name on the target node doesn't match the local cluster name.
- *
- * @param connection the connection to a specific node
- * @param handshakeTimeout handshake timeout
- * @param clusterNamePredicate cluster name validation predicate
- * @return the connected node
- * @throws ConnectTransportException if the connection failed
- * @throws IllegalStateException if the handshake failed
- */
- private DiscoveryNode handshake(final Transport.Connection connection,
- final long handshakeTimeout, Predicate clusterNamePredicate)
- throws ConnectTransportException {
- final HandshakeResponse response;
- final DiscoveryNode node = connection.getNode();
- try {
- PlainTransportFuture futureHandler = new PlainTransportFuture<>(
- new FutureTransportResponseHandler() {
- @Override
- public HandshakeResponse newInstance() {
- return new HandshakeResponse();
- }
- });
- sendRequest(connection, HANDSHAKE_ACTION_NAME, HandshakeRequest.INSTANCE,
- TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), futureHandler);
- response = futureHandler.txGet();
- } catch (Exception e) {
- throw new IllegalStateException("handshake failed with " + node, e);
- }
- if (!clusterNamePredicate.test(response.clusterName)) {
- throw new IllegalStateException("handshake failed, mismatched cluster name [" +
- response.clusterName + "] - " + node);
- } else if (!response.version.isCompatible(localNode.getVersion())) {
- throw new IllegalStateException("handshake failed, incompatible version [" +
- response.version + "] - " + node);
- }
- return response.discoveryNode;
- }
-
- void disconnectFromNode(DiscoveryNode node) {
- if (isLocalNode(node)) {
- return;
- }
- transport.disconnectFromNode(node);
- }
-
- TransportFuture submitRequest(DiscoveryNode node, String action,
- TransportRequest request,
- TransportRequestOptions options,
- TransportResponseHandler handler)
- throws TransportException {
- PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler);
- try {
- Transport.Connection connection = getConnection(node);
- sendRequest(connection, action, request, options, futureHandler);
- } catch (NodeNotConnectedException ex) {
- futureHandler.handleException(ex);
- }
- return futureHandler;
- }
-
- final void sendRequest(final DiscoveryNode node, final String action,
- final TransportRequest request,
- final TransportRequestOptions options,
- TransportResponseHandler handler) {
- try {
- Transport.Connection connection = getConnection(node);
- sendRequest(connection, action, request, options, handler);
- } catch (NodeNotConnectedException ex) {
- handler.handleException(ex);
- }
- }
-
- private void sendRequest(final Transport.Connection connection, final String action,
- final TransportRequest request,
- final TransportRequestOptions options,
- TransportResponseHandler handler) {
-
- asyncSender.sendRequest(connection, action, request, options, handler);
- }
-
- /**
- * Returns either a real transport connection or a local node connection
- * if we are using the local node optimization.
- * @throws NodeNotConnectedException if the given node is not connected
- */
- private Transport.Connection getConnection(DiscoveryNode node) {
- if (isLocalNode(node)) {
- return localNodeConnection;
- } else {
- return transport.getConnection(node);
- }
- }
-
- @SuppressWarnings({"unchecked", "rawtypes"})
- private void sendRequestInternal(final Transport.Connection connection,
- final String action,
- final TransportRequest request,
- final TransportRequestOptions options,
- TransportResponseHandler handler) {
- if (connection == null) {
- throw new IllegalStateException("can't send request to a null connection");
- }
- DiscoveryNode node = connection.getNode();
- final long requestId = transport.newRequestId();
- final TimeoutHandler timeoutHandler;
- try {
- if (options.timeout() == null) {
- timeoutHandler = null;
- } else {
- timeoutHandler = new TimeoutHandler(requestId);
- }
- Supplier storedContextSupplier =
- threadPool.getThreadContext().newRestorableContext(true);
- TransportResponseHandler responseHandler =
- new ContextRestoreResponseHandler<>(storedContextSupplier, handler);
- clientHandlers.put(requestId,
- new RequestHolder(responseHandler, connection.getNode(), action, timeoutHandler));
- if (lifecycle.stoppedOrClosed()) {
- // if we are not started the exception handling will remove the RequestHolder again
- // and calls the handler to notify the caller. It will only notify if the toStop code
- // hasn't done the work yet.
- throw new TransportException("TransportService is closed stopped can't send request");
- }
- if (timeoutHandler != null) {
- assert options.timeout() != null;
- timeoutHandler.future = threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler);
- }
- connection.sendRequest(requestId, action, request, options);
- } catch (final Exception e) {
- // usually happen either because we failed to connect to the node
- // or because we failed serializing the message
- final RequestHolder extends TransportResponse> holderToNotify = clientHandlers.remove(requestId);
- // If holderToNotify == null then handler has already been taken care of.
- if (holderToNotify != null) {
- holderToNotify.cancelTimeout();
- // callback that an exception happened, but on a different thread since we don't
- // want handlers to worry about stack overflows
- final SendRequestTransportException sendRequestException =
- new SendRequestTransportException(node, action, e);
- threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() {
- @Override
- public void onRejection(Exception e) {
- // if we get rejected during node shutdown we don't wanna bubble it up
- logger.debug((Supplier>) () -> new ParameterizedMessage(
- "failed to notify response handler on rejection, action: {}",
- holderToNotify.action()), e);
- }
- @Override
- public void onFailure(Exception e) {
- logger.warn((Supplier>) () -> new ParameterizedMessage(
- "failed to notify response handler on exception, action: {}",
- holderToNotify.action()), e);
- }
- @Override
- protected void doRun() throws Exception {
- holderToNotify.handler().handleException(sendRequestException);
- }
- });
- } else {
- logger.debug("Exception while sending request, handler likely already notified due to timeout", e);
- }
- }
- }
-
- private void sendLocalRequest(long requestId, final String action, final TransportRequest request,
- TransportRequestOptions options) {
- final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, adapter,
- threadPool);
- try {
- adapter.onRequestSent(localNode, requestId, action, request, options);
- adapter.onRequestReceived(requestId, action);
- final RequestHandlerRegistry reg = adapter.getRequestHandler(action);
- if (reg == null) {
- throw new ActionNotFoundTransportException("Action [" + action + "] not found");
- }
- final String executor = reg.getExecutor();
- if (ThreadPool.Names.SAME.equals(executor)) {
- reg.processMessageReceived(request, channel);
- } else {
- threadPool.executor(executor).execute(new AbstractRunnable() {
- @Override
- protected void doRun() throws Exception {
- reg.processMessageReceived(request, channel);
- }
-
- @Override
- public boolean isForceExecution() {
- return reg.isForceExecution();
- }
-
- @Override
- public void onFailure(Exception e) {
- try {
- channel.sendResponse(e);
- } catch (Exception inner) {
- inner.addSuppressed(e);
- logger.warn((Supplier>) () ->
- new ParameterizedMessage("failed to notify channel of error message for action [{}]",
- action), inner);
- }
- }
- });
- }
-
- } catch (Exception e) {
- try {
- channel.sendResponse(e);
- } catch (Exception inner) {
- inner.addSuppressed(e);
- logger.warn(
- (Supplier>) () -> new ParameterizedMessage(
- "failed to notify channel of error message for action [{}]", action), inner);
- }
- }
- }
-
- private boolean shouldTraceAction(String action) {
- if (tracerLogInclude.length > 0) {
- if (!Regex.simpleMatch(tracerLogInclude, action)) {
- return false;
- }
- }
- return tracerLogExclude.length <= 0 || !Regex.simpleMatch(tracerLogExclude, action);
- }
-
- /**
- * Registers a new request handler.
- *
- * @param action the action the request handler is associated with
- * @param request the request class that will be used to construct new instances for streaming
- * @param executor the executor the request handling will be executed on
- * @param handler the handler itself that implements the request handling
- */
- private void registerRequestHandler(String action, Supplier request,
- String executor,
- TransportRequestHandler handler) {
- handler = interceptor.interceptHandler(action, executor, false, handler);
- RequestHandlerRegistry reg = new RequestHandlerRegistry<>(
- action, request, taskManager, handler, executor, false, false);
- registerRequestHandler(reg);
- }
-
- @SuppressWarnings("unchecked")
- private void registerRequestHandler(RequestHandlerRegistry reg) {
- synchronized (requestHandlerMutex) {
- if (requestHandlers.containsKey(reg.getAction())) {
- throw new IllegalArgumentException("transport handlers for action " +
- reg.getAction() + " is already registered");
- }
- requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(),
- (RequestHandlerRegistry) reg).immutableMap();
- }
- }
-
- private boolean isLocalNode(DiscoveryNode discoveryNode) {
- return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode);
- }
-
- static class HandshakeRequest extends TransportRequest {
-
- static final HandshakeRequest INSTANCE = new HandshakeRequest();
-
- private HandshakeRequest() {
- }
-
- }
-
- /**
- *
- */
- public static class HandshakeResponse extends TransportResponse {
-
- private DiscoveryNode discoveryNode;
-
- private ClusterName clusterName;
-
- private Version version;
-
- /**
- * For extern construction.
- */
- public HandshakeResponse() {
- }
-
- HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) {
- this.discoveryNode = discoveryNode;
- this.version = version;
- this.clusterName = clusterName;
- }
-
- @Override
- public void readFrom(StreamInput in) throws IOException {
- super.readFrom(in);
- discoveryNode = in.readOptionalWriteable(DiscoveryNode::new);
- clusterName = new ClusterName(in);
- version = Version.readVersion(in);
- }
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- super.writeTo(out);
- out.writeOptionalWriteable(discoveryNode);
- clusterName.writeTo(out);
- Version.writeVersion(version, out);
- }
- }
-
- private final class Adapter implements TransportServiceAdapter {
-
- final MeanMetric rxMetric = new MeanMetric();
-
- final MeanMetric txMetric = new MeanMetric();
-
- @Override
- public void addBytesReceived(long size) {
- rxMetric.inc(size);
- }
-
- @Override
- public void addBytesSent(long size) {
- txMetric.inc(size);
- }
-
- @Override
- public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request,
- TransportRequestOptions options) {
- if (traceEnabled() && shouldTraceAction(action)) {
- traceRequestSent(node, requestId, action, options);
- }
- }
-
- boolean traceEnabled() {
- return tracerLog.isTraceEnabled();
- }
-
- @Override
- public void onResponseSent(long requestId, String action, TransportResponse response,
- TransportResponseOptions options) {
- if (traceEnabled() && shouldTraceAction(action)) {
- traceResponseSent(requestId, action);
- }
- }
-
- @Override
- public void onResponseSent(long requestId, String action, Exception e) {
- if (traceEnabled() && shouldTraceAction(action)) {
- traceResponseSent(requestId, action, e);
- }
- }
-
- void traceResponseSent(long requestId, String action, Exception e) {
- tracerLog.trace(
- (org.apache.logging.log4j.util.Supplier>)
- () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e);
- }
-
- @Override
- public void onRequestReceived(long requestId, String action) {
- try {
- blockIncomingRequestsLatch.await();
- } catch (InterruptedException e) {
- logger.trace("interrupted while waiting for incoming requests block to be removed");
- }
- if (traceEnabled() && shouldTraceAction(action)) {
- traceReceivedRequest(requestId, action);
- }
- }
-
- @Override
- public RequestHandlerRegistry getRequestHandler(String action) {
- return requestHandlers.get(action);
- }
-
- @Override
- public TransportResponseHandler onResponseReceived(final long requestId) {
- RequestHolder holder = clientHandlers.remove(requestId);
- if (holder == null) {
- checkForTimeout(requestId);
- return null;
- }
- holder.cancelTimeout();
- if (traceEnabled() && shouldTraceAction(holder.action())) {
- traceReceivedResponse(requestId, holder.node(), holder.action());
- }
- return holder.handler();
- }
-
- void checkForTimeout(long requestId) {
- // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout
- // handling has finished
- final DiscoveryNode sourceNode;
- final String action;
- if (clientHandlers.get(requestId) != null) {
- throw new IllegalStateException();
- }
- TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId);
- if (timeoutInfoHolder != null) {
- long time = System.currentTimeMillis();
- logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " +
- "action [{}], node [{}], id [{}]", time - timeoutInfoHolder.sentTime(),
- time - timeoutInfoHolder.timeoutTime(),
- timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId);
- action = timeoutInfoHolder.action();
- sourceNode = timeoutInfoHolder.node();
- } else {
- logger.warn("Transport response handler not found of id [{}]", requestId);
- action = null;
- sourceNode = null;
- }
- // call tracer out of lock
- if (!traceEnabled()) {
- return;
- }
- if (action == null) {
- assert sourceNode == null;
- traceUnresolvedResponse(requestId);
- } else if (shouldTraceAction(action)) {
- traceReceivedResponse(requestId, sourceNode, action);
- }
- }
-
- @Override
- public void onNodeConnected(final DiscoveryNode node) {
- }
-
- @Override
- public void onConnectionOpened(DiscoveryNode node) {
- }
-
- @Override
- public void onNodeDisconnected(final DiscoveryNode node) {
- try {
- for (Map.Entry> entry : clientHandlers.entrySet()) {
- RequestHolder extends TransportResponse> holder = entry.getValue();
- if (holder.node().equals(node)) {
- final RequestHolder extends TransportResponse> holderToNotify = clientHandlers.remove(entry.getKey());
- if (holderToNotify != null) {
- // callback that an exception happened, but on a different thread since we don't
- // want handlers to worry about stack overflows
- threadPool.generic().execute(() -> holderToNotify.handler()
- .handleException(new NodeDisconnectedException(node,
- holderToNotify.action())));
- }
- }
- }
- } catch (EsRejectedExecutionException ex) {
- logger.debug("Rejected execution on NodeDisconnected", ex);
- }
- }
-
- void traceReceivedRequest(long requestId, String action) {
- tracerLog.trace("[{}][{}] received request", requestId, action);
- }
-
- void traceResponseSent(long requestId, String action) {
- tracerLog.trace("[{}][{}] sent response", requestId, action);
- }
-
- void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) {
- tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode);
- }
-
- void traceUnresolvedResponse(long requestId) {
- tracerLog.trace("[{}] received response but can't resolve it to a request", requestId);
- }
-
- void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
- tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout());
- }
- }
-
- private final class TimeoutHandler implements Runnable {
-
- private final long requestId;
-
- private final long sentTime = System.currentTimeMillis();
-
- volatile ScheduledFuture> future;
-
- TimeoutHandler(long requestId) {
- this.requestId = requestId;
- }
-
- @Override
- public void run() {
- // we get first to make sure we only add the TimeoutInfoHandler if needed.
- final RequestHolder extends TransportResponse> holder = clientHandlers.get(requestId);
- if (holder != null) {
- // add it to the timeout information holder, in case we are going to get a response later
- long timeoutTime = System.currentTimeMillis();
- timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(holder.node(), holder.action(), sentTime,
- timeoutTime));
- // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id
- final RequestHolder extends TransportResponse> removedHolder = clientHandlers.remove(requestId);
- if (removedHolder != null) {
- assert removedHolder == holder : "two different holder instances for request [" + requestId + "]";
- removedHolder.handler().handleException(
- new ReceiveTimeoutTransportException(holder.node(), holder.action(),
- "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]"));
- } else {
- // response was processed, remove timeout info.
- timeoutInfoHandlers.remove(requestId);
- }
- }
- }
-
- /**
- * Cancels timeout handling. This is a best effort only to avoid running it.
- * Remove the requestId from {@link #clientHandlers} to make sure this doesn't run.
- */
- void cancel() {
- if (clientHandlers.get(requestId) != null) {
- throw new IllegalStateException("cancel must be called after the requestId [" +
- requestId + "] has been removed from clientHandlers");
- }
- FutureUtils.cancel(future);
- }
- }
-
- private static class TimeoutInfoHolder {
-
- private final DiscoveryNode node;
- private final String action;
- private final long sentTime;
- private final long timeoutTime;
-
- TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) {
- this.node = node;
- this.action = action;
- this.sentTime = sentTime;
- this.timeoutTime = timeoutTime;
- }
-
- public DiscoveryNode node() {
- return node;
- }
-
- String action() {
- return action;
- }
-
- long sentTime() {
- return sentTime;
- }
-
- long timeoutTime() {
- return timeoutTime;
- }
- }
-
- private static class RequestHolder {
-
- private final TransportResponseHandler handler;
-
- private final DiscoveryNode node;
-
- private final String action;
-
- private final TimeoutHandler timeoutHandler;
-
- RequestHolder(TransportResponseHandler handler, DiscoveryNode node, String action,
- TimeoutHandler timeoutHandler) {
- this.handler = handler;
- this.node = node;
- this.action = action;
- this.timeoutHandler = timeoutHandler;
- }
-
- TransportResponseHandler handler() {
- return handler;
- }
-
- public DiscoveryNode node() {
- return this.node;
- }
-
- String action() {
- return this.action;
- }
-
- void cancelTimeout() {
- if (timeoutHandler != null) {
- timeoutHandler.cancel();
- }
- }
- }
-
- /**
- * This handler wrapper ensures that the response thread executes with the correct thread context.
- * Before any of the handle methods are invoked we restore the context.
- * @param thr transport response type
- */
- public static final class ContextRestoreResponseHandler
- implements TransportResponseHandler {
-
- private final TransportResponseHandler delegate;
-
- private final Supplier contextSupplier;
-
- ContextRestoreResponseHandler(Supplier contextSupplier,
- TransportResponseHandler delegate) {
- this.delegate = delegate;
- this.contextSupplier = contextSupplier;
- }
-
- @Override
- public T newInstance() {
- return delegate.newInstance();
- }
-
- @SuppressWarnings("try")
- @Override
- public void handleResponse(T response) {
- try (ThreadContext.StoredContext ignore = contextSupplier.get()) {
- delegate.handleResponse(response);
- }
- }
-
- @SuppressWarnings("try")
- @Override
- public void handleException(TransportException exp) {
- try (ThreadContext.StoredContext ignore = contextSupplier.get()) {
- delegate.handleException(exp);
- }
- }
-
- @Override
- public String executor() {
- return delegate.executor();
- }
-
- @Override
- public String toString() {
- return getClass().getName() + "/" + delegate.toString();
- }
-
- }
-
- static class DirectResponseChannel implements TransportChannel {
-
- private static final String DIRECT_RESPONSE_PROFILE = ".direct";
-
- private final Logger logger;
-
- private final DiscoveryNode localNode;
-
- private final String action;
-
- private final long requestId;
-
- private final TransportServiceAdapter adapter;
-
- private final ThreadPool threadPool;
-
- DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId,
- TransportServiceAdapter adapter, ThreadPool threadPool) {
- this.logger = logger;
- this.localNode = localNode;
- this.action = action;
- this.requestId = requestId;
- this.adapter = adapter;
- this.threadPool = threadPool;
- }
-
- @Override
- public String action() {
- return action;
- }
-
- @Override
- public String getProfileName() {
- return DIRECT_RESPONSE_PROFILE;
- }
-
- @Override
- public void sendResponse(TransportResponse response) throws IOException {
- sendResponse(response, TransportResponseOptions.EMPTY);
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public void sendResponse(final TransportResponse response, TransportResponseOptions options)
- throws IOException {
- adapter.onResponseSent(requestId, action, response, options);
- final TransportResponseHandler handler = adapter.onResponseReceived(requestId);
- if (handler != null) {
- final String executor = handler.executor();
- if (ThreadPool.Names.SAME.equals(executor)) {
- processResponse(handler, response);
- } else {
- threadPool.executor(executor).execute(() -> processResponse(handler, response));
- }
- }
- }
-
- void processResponse(TransportResponseHandler handler, TransportResponse response) {
- try {
- handler.handleResponse(response);
- } catch (Exception e) {
- processException(handler, wrapInRemote(new ResponseHandlerFailureTransportException(e)));
- }
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public void sendResponse(Exception exception) throws IOException {
- adapter.onResponseSent(requestId, action, exception);
- final TransportResponseHandler handler = adapter.onResponseReceived(requestId);
- if (handler != null) {
- final RemoteTransportException rtx = wrapInRemote(exception);
- final String executor = handler.executor();
- if (ThreadPool.Names.SAME.equals(executor)) {
- processException(handler, rtx);
- } else {
- threadPool.executor(handler.executor()).execute(() -> processException(handler, rtx));
- }
- }
- }
-
- RemoteTransportException wrapInRemote(Exception e) {
- if (e instanceof RemoteTransportException) {
- return (RemoteTransportException) e;
- }
- return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e);
- }
-
- void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) {
- try {
- handler.handleException(rtx);
- } catch (Exception e) {
- logger.error((Supplier>) () -> new ParameterizedMessage(
- "failed to handle exception for action [{}], handler [{}]", action, handler), e);
- }
- }
-
- @Override
- public long getRequestId() {
- return requestId;
- }
-
- @Override
- public String getChannelType() {
- return "direct";
- }
-
- @Override
- public Version getVersion() {
- return localNode.getVersion();
- }
- }
-}
diff --git a/build.gradle b/build.gradle
index 712808f..121df15 100644
--- a/build.gradle
+++ b/build.gradle
@@ -1,27 +1,13 @@
-import java.time.ZonedDateTime
-import java.time.format.DateTimeFormatter
-
-buildscript {
- repositories {
- jcenter()
- maven {
- url 'http://xbib.org/repository'
- }
- }
- dependencies {
- classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4"
- }
-}
plugins {
id "org.sonarqube" version "2.6.1"
id "io.codearte.nexus-staging" version "0.11.0"
+ id "com.github.spotbugs" version "1.6.9"
id "org.xbib.gradle.plugin.asciidoctor" version "1.6.0.1"
}
-printf "Date: %s\nHost: %s\nOS: %s %s %s\nJava: %s %s %s %s\nGradle: %s Groovy: %s Java: %s\n" +
+printf "Host: %s\nOS: %s %s %s\nJava: %s %s %s %s\nGradle: %s Groovy: %s Java: %s\n" +
"Build: group: ${project.group} name: ${project.name} version: ${project.version}\n",
- ZonedDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME),
InetAddress.getLocalHost(),
System.getProperty("os.name"),
System.getProperty("os.arch"),
@@ -33,31 +19,28 @@ printf "Date: %s\nHost: %s\nOS: %s %s %s\nJava: %s %s %s %s\nGradle: %s Groovy:
gradle.gradleVersion, GroovySystem.getVersion(), JavaVersion.current()
-apply plugin: "io.codearte.nexus-staging"
-apply plugin: 'org.xbib.gradle.plugin.asciidoctor'
-
-ext {
- user = 'jprante'
- name = 'elx'
- description = 'Elasticsearch extensions'
- scmUrl = 'https://github.com/' + user + '/' + name
- scmConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git'
- scmDeveloperConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git'
+if (JavaVersion.current() < JavaVersion.VERSION_11) {
+ throw new GradleException("This build must be run with java 11 or higher")
}
subprojects {
apply plugin: 'java'
apply plugin: 'maven'
apply plugin: 'signing'
+ apply plugin: 'com.github.spotbugs'
+ apply plugin: 'pmd'
+ apply plugin: 'checkstyle'
+ apply plugin: 'org.xbib.gradle.plugin.asciidoctor'
configurations {
- wagon
- alpnagent
asciidoclet
+ wagon
}
dependencies {
- alpnagent "org.mortbay.jetty.alpn:jetty-alpn-agent:${project.property('alpnagent.version')}"
+ testCompile "junit:junit:${project.property('junit.version')}"
+ testCompile "org.apache.logging.log4j:log4j-core:${project.property('log4j.version')}"
+ testCompile "org.apache.logging.log4j:log4j-slf4j-impl:${project.property('log4j.version')}"
asciidoclet "org.xbib:asciidoclet:${project.property('asciidoclet.version')}"
wagon "org.apache.maven.wagon:wagon-ssh:${project.property('wagon.version')}"
}
@@ -71,10 +54,32 @@ subprojects {
targetCompatibility = JavaVersion.VERSION_11
}
- jar {
- baseName "${rootProject.name}-${project.name}"
+ tasks.withType(JavaCompile) {
+ options.compilerArgs << "-Xlint:all"
+ if (!options.compilerArgs.contains("-processor")) {
+ options.compilerArgs << '-proc:none'
+ }
}
+ test {
+ jvmArgs =[
+ '--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED',
+ '--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED',
+ '--add-opens=java.base/java.nio=ALL-UNNAMED'
+ ]
+ systemProperty 'jna.debug_load', 'true'
+ testLogging {
+ showStandardStreams = true
+ exceptionFormat = 'full'
+ }
+ }
+
+ clean {
+ delete "data"
+ delete "logs"
+ delete "out"
+ }
+
/*javadoc {
options.docletpath = configurations.asciidoclet.files.asType(List)
options.doclet = 'org.xbib.asciidoclet.Asciidoclet'
@@ -105,16 +110,76 @@ subprojects {
archives javadocJar, sourcesJar
}*/
+ task javadocJar(type: Jar, dependsOn: javadoc) {
+ classifier 'javadoc'
+ }
+
+ task sourcesJar(type: Jar, dependsOn: classes) {
+ from sourceSets.main.allSource
+ classifier 'sources'
+ }
+
+ artifacts {
+ archives javadocJar, sourcesJar
+ }
+
if (project.hasProperty('signing.keyId')) {
signing {
sign configurations.archives
}
}
- apply from: "${rootProject.projectDir}/gradle/ext.gradle"
apply from: "${rootProject.projectDir}/gradle/publish.gradle"
- //apply from: "${rootProject.projectDir}/gradle/sonarqube.gradle"
+ spotbugs {
+ effort = "max"
+ reportLevel = "low"
+ //includeFilter = file("findbugs-exclude.xml")
+ }
+
+ tasks.withType(com.github.spotbugs.SpotBugsTask) {
+ ignoreFailures = true
+ reports {
+ xml.enabled = false
+ html.enabled = true
+ }
+ }
+
+ tasks.withType(Pmd) {
+ ignoreFailures = true
+ reports {
+ xml.enabled = true
+ html.enabled = true
+ }
+ }
+ tasks.withType(Checkstyle) {
+ ignoreFailures = true
+ reports {
+ xml.enabled = true
+ html.enabled = true
+ }
+ }
+
+ pmd {
+ toolVersion = '6.11.0'
+ ruleSets = ['category/java/bestpractices.xml']
+ }
+
+ checkstyle {
+ configFile = rootProject.file('config/checkstyle/checkstyle.xml')
+ ignoreFailures = true
+ showViolations = true
+ }
+
+ sonarqube {
+ properties {
+ property "sonar.projectName", "${project.group} ${project.name}"
+ property "sonar.sourceEncoding", "UTF-8"
+ property "sonar.tests", "src/test/java"
+ property "sonar.scm.provider", "git"
+ property "sonar.junit.reportsPath", "build/test-results/test/"
+ }
+ }
}
/*asciidoctor {
diff --git a/common/build.gradle b/common/build.gradle
deleted file mode 100644
index 5e961d5..0000000
--- a/common/build.gradle
+++ /dev/null
@@ -1,70 +0,0 @@
-buildscript {
- repositories {
- jcenter()
- maven {
- url 'http://xbib.org/repository'
- }
- }
- dependencies {
- classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4"
- }
-}
-
-apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build'
-
-configurations {
- main
- tests
-}
-
-dependencies {
- compile project(':api')
- compile "org.xbib:metrics:${project.property('xbib-metrics.version')}"
- compileOnly "org.apache.logging.log4j:log4j-api:${project.property('log4j.version')}"
- testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}"
- testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}"
-}
-
-jar {
- baseName "${rootProject.name}-common"
-}
-
-/*
-task testJar(type: Jar, dependsOn: testClasses) {
- baseName = "${project.archivesBaseName}-tests"
- from sourceSets.test.output
-}
-*/
-
-artifacts {
- main jar
- tests testJar
- archives sourcesJar, javadocJar
-}
-
-test {
- enabled = false
- //jvmArgs "-javaagent:" + configurations.alpnagent.asPath
- systemProperty 'path.home', project.buildDir.absolutePath
- testLogging {
- showStandardStreams = true
- exceptionFormat = 'full'
- }
-}
-
-randomizedTest {
- enabled = false
-}
-
-esTest {
- // test with the jars, not the classes, for security manager
- // classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files
- systemProperty 'tests.security.manager', 'true'
-}
-esTest.dependsOn jar, testJar
-
-dependencyLicenses.enabled = false
-
-// we not like to examine Netty
-thirdPartyAudit.enabled = false
-
diff --git a/common/config/checkstyle/checkstyle.xml b/common/config/checkstyle/checkstyle.xml
deleted file mode 100644
index 8cb4438..0000000
--- a/common/config/checkstyle/checkstyle.xml
+++ /dev/null
@@ -1,321 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/common/licenses/classloader-6.3.2.1.jar.sha1 b/common/licenses/classloader-6.3.2.1.jar.sha1
deleted file mode 100644
index c959ad5..0000000
--- a/common/licenses/classloader-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f14124d1557cd7c21742f09cd18913a861125e56
\ No newline at end of file
diff --git a/common/licenses/elasticsearch-6.3.2.1.jar.sha1 b/common/licenses/elasticsearch-6.3.2.1.jar.sha1
deleted file mode 100644
index 7f6a7c3..0000000
--- a/common/licenses/elasticsearch-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2bc144784abc748426b125a948b0bdd4fc4dd7d6
\ No newline at end of file
diff --git a/common/licenses/elx-api-6.3.2.0.jar.sha1 b/common/licenses/elx-api-6.3.2.0.jar.sha1
deleted file mode 100644
index 06a07c6..0000000
--- a/common/licenses/elx-api-6.3.2.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-af8cf6c3e7de988bbb7e6e441a2235ba1df8eaf8
\ No newline at end of file
diff --git a/common/licenses/elx-api-LICENSE.txt b/common/licenses/elx-api-LICENSE.txt
deleted file mode 100644
index d645695..0000000
--- a/common/licenses/elx-api-LICENSE.txt
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/common/licenses/hdrhistogram-6.3.2.1.jar.sha1 b/common/licenses/hdrhistogram-6.3.2.1.jar.sha1
deleted file mode 100644
index 72d7e23..0000000
--- a/common/licenses/hdrhistogram-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-436454f1e6e821f6f18def7a2e4b467eeb341430
\ No newline at end of file
diff --git a/common/licenses/hppc-6.3.2.1.jar.sha1 b/common/licenses/hppc-6.3.2.1.jar.sha1
deleted file mode 100644
index 55b3ead..0000000
--- a/common/licenses/hppc-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-80ef947c9edfaacb261ee27e2c7fa5968b3eeaa6
\ No newline at end of file
diff --git a/common/licenses/jackson-6.3.2.1.jar.sha1 b/common/licenses/jackson-6.3.2.1.jar.sha1
deleted file mode 100644
index d2c2967..0000000
--- a/common/licenses/jackson-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-abf31b393745f2a6e133819ee7485420d6bc5160
\ No newline at end of file
diff --git a/common/licenses/jna-6.3.2.1.jar.sha1 b/common/licenses/jna-6.3.2.1.jar.sha1
deleted file mode 100644
index 5142c47..0000000
--- a/common/licenses/jna-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-68463acec824eb54989fcecbe44074a41ee639e3
\ No newline at end of file
diff --git a/common/licenses/joda-6.3.2.1.jar.sha1 b/common/licenses/joda-6.3.2.1.jar.sha1
deleted file mode 100644
index aade01f..0000000
--- a/common/licenses/joda-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4c232fdaf23b8c7b1ff1ca1ba9b91fcc0fa01938
\ No newline at end of file
diff --git a/common/licenses/joptsimple-6.3.2.1.jar.sha1 b/common/licenses/joptsimple-6.3.2.1.jar.sha1
deleted file mode 100644
index 6059409..0000000
--- a/common/licenses/joptsimple-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7834ee69f91a3360f17a31cf6a27b245a3a2f668
\ No newline at end of file
diff --git a/common/licenses/jts-6.3.2.1.jar.sha1 b/common/licenses/jts-6.3.2.1.jar.sha1
deleted file mode 100644
index 8d0ab58..0000000
--- a/common/licenses/jts-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b10c7f51ab98e6f6f252c931534edbb632cb108e
\ No newline at end of file
diff --git a/common/licenses/log4j-6.3.2.1.jar.sha1 b/common/licenses/log4j-6.3.2.1.jar.sha1
deleted file mode 100644
index c6f346a..0000000
--- a/common/licenses/log4j-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-824c180dc70fda00b70a146d2f2be9a8f36cfdbb
\ No newline at end of file
diff --git a/common/licenses/lucene-6.3.2.1.jar.sha1 b/common/licenses/lucene-6.3.2.1.jar.sha1
deleted file mode 100644
index bee6197..0000000
--- a/common/licenses/lucene-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-68fba4b570c4717cda49a3f187e2bfb909697fc8
\ No newline at end of file
diff --git a/common/licenses/metrics-1.1.0.jar.sha1 b/common/licenses/metrics-1.1.0.jar.sha1
deleted file mode 100644
index 959a34a..0000000
--- a/common/licenses/metrics-1.1.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e8949a50a223ab837edc312e34ee597febe86464
\ No newline at end of file
diff --git a/common/licenses/netty-buffer-4.1.33.Final.jar.sha1 b/common/licenses/netty-buffer-4.1.33.Final.jar.sha1
deleted file mode 100644
index 6bab3bb..0000000
--- a/common/licenses/netty-buffer-4.1.33.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0d4fdb13d5832a0f348e4d855c71201a2b15d560
\ No newline at end of file
diff --git a/common/licenses/netty-codec-4.1.33.Final.jar.sha1 b/common/licenses/netty-codec-4.1.33.Final.jar.sha1
deleted file mode 100644
index e103a84..0000000
--- a/common/licenses/netty-codec-4.1.33.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-600762bf6861fa62b061782debb6fcdeff1f1984
\ No newline at end of file
diff --git a/common/licenses/netty-codec-http-4.1.33.Final.jar.sha1 b/common/licenses/netty-codec-http-4.1.33.Final.jar.sha1
deleted file mode 100644
index b5a8826..0000000
--- a/common/licenses/netty-codec-http-4.1.33.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ad557dffc0777b1b24558d6c57b77b0198dbb58d
\ No newline at end of file
diff --git a/common/licenses/netty-common-4.1.33.Final.jar.sha1 b/common/licenses/netty-common-4.1.33.Final.jar.sha1
deleted file mode 100644
index 22d10fa..0000000
--- a/common/licenses/netty-common-4.1.33.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-398b725cbaca8c691b74759ae6c3d69b8eeb0574
\ No newline at end of file
diff --git a/common/licenses/netty-handler-4.1.33.Final.jar.sha1 b/common/licenses/netty-handler-4.1.33.Final.jar.sha1
deleted file mode 100644
index 8d86585..0000000
--- a/common/licenses/netty-handler-4.1.33.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3bcc2db64f7b0ebacba552aff319b41962c2df96
\ No newline at end of file
diff --git a/common/licenses/netty-resolver-4.1.33.Final.jar.sha1 b/common/licenses/netty-resolver-4.1.33.Final.jar.sha1
deleted file mode 100644
index 3b12aa7..0000000
--- a/common/licenses/netty-resolver-4.1.33.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f3873f5ed509b5c169fb7cbaf34b694d8c748926
\ No newline at end of file
diff --git a/common/licenses/netty-transport-4.1.33.Final.jar.sha1 b/common/licenses/netty-transport-4.1.33.Final.jar.sha1
deleted file mode 100644
index fdad609..0000000
--- a/common/licenses/netty-transport-4.1.33.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b9f9af72dfcd8464c16169670d52c6dc5fe65897
\ No newline at end of file
diff --git a/common/licenses/noggit-6.3.2.1.jar.sha1 b/common/licenses/noggit-6.3.2.1.jar.sha1
deleted file mode 100644
index ac01817..0000000
--- a/common/licenses/noggit-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-fe9c516ca4ead60f713eceb398e6f636b83d0a5b
\ No newline at end of file
diff --git a/common/licenses/s2geo-6.3.2.1.jar.sha1 b/common/licenses/s2geo-6.3.2.1.jar.sha1
deleted file mode 100644
index 2ec741a..0000000
--- a/common/licenses/s2geo-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b1bd19c1f50b6764f104cdcbfa3f01b1b3bb2045
\ No newline at end of file
diff --git a/common/licenses/securesm-6.3.2.1.jar.sha1 b/common/licenses/securesm-6.3.2.1.jar.sha1
deleted file mode 100644
index 9632107..0000000
--- a/common/licenses/securesm-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ce2c501f3c72eb1099467d708b9c134ed0b7bb2a
\ No newline at end of file
diff --git a/common/licenses/snakeyaml-6.3.2.1.jar.sha1 b/common/licenses/snakeyaml-6.3.2.1.jar.sha1
deleted file mode 100644
index 2f3cc62..0000000
--- a/common/licenses/snakeyaml-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-62a48b60b17e6d2a823439a5e68f31ef196f11e7
\ No newline at end of file
diff --git a/common/licenses/spatial4j-6.3.2.1.jar.sha1 b/common/licenses/spatial4j-6.3.2.1.jar.sha1
deleted file mode 100644
index 5c26ca0..0000000
--- a/common/licenses/spatial4j-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-02855ff60b4cecf9dd15e6e91e3cc0902d2e7eac
\ No newline at end of file
diff --git a/common/licenses/tdigest-6.3.2.1.jar.sha1 b/common/licenses/tdigest-6.3.2.1.jar.sha1
deleted file mode 100644
index 9ae2373..0000000
--- a/common/licenses/tdigest-6.3.2.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d28517afc71abe5b7f224944280d5f03ed2f2cc
\ No newline at end of file
diff --git a/common/src/docs/asciidoc/css/foundation.css b/common/src/docs/asciidoc/css/foundation.css
deleted file mode 100644
index 27be611..0000000
--- a/common/src/docs/asciidoc/css/foundation.css
+++ /dev/null
@@ -1,684 +0,0 @@
-/*! normalize.css v2.1.2 | MIT License | git.io/normalize */
-/* ========================================================================== HTML5 display definitions ========================================================================== */
-/** Correct `block` display not defined in IE 8/9. */
-article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; }
-
-/** Correct `inline-block` display not defined in IE 8/9. */
-audio, canvas, video { display: inline-block; }
-
-/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */
-audio:not([controls]) { display: none; height: 0; }
-
-/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */
-[hidden], template { display: none; }
-
-script { display: none !important; }
-
-/* ========================================================================== Base ========================================================================== */
-/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */
-html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ }
-
-/** Remove default margin. */
-body { margin: 0; }
-
-/* ========================================================================== Links ========================================================================== */
-/** Remove the gray background color from active links in IE 10. */
-a { background: transparent; }
-
-/** Address `outline` inconsistency between Chrome and other browsers. */
-a:focus { outline: thin dotted; }
-
-/** Improve readability when focused and also mouse hovered in all browsers. */
-a:active, a:hover { outline: 0; }
-
-/* ========================================================================== Typography ========================================================================== */
-/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */
-h1 { font-size: 2em; margin: 0.67em 0; }
-
-/** Address styling not present in IE 8/9, Safari 5, and Chrome. */
-abbr[title] { border-bottom: 1px dotted; }
-
-/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */
-b, strong { font-weight: bold; }
-
-/** Address styling not present in Safari 5 and Chrome. */
-dfn { font-style: italic; }
-
-/** Address differences between Firefox and other browsers. */
-hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; }
-
-/** Address styling not present in IE 8/9. */
-mark { background: #ff0; color: #000; }
-
-/** Correct font family set oddly in Safari 5 and Chrome. */
-code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; }
-
-/** Improve readability of pre-formatted text in all browsers. */
-pre { white-space: pre-wrap; }
-
-/** Set consistent quote types. */
-q { quotes: "\201C" "\201D" "\2018" "\2019"; }
-
-/** Address inconsistent and variable font size in all browsers. */
-small { font-size: 80%; }
-
-/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */
-sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; }
-
-sup { top: -0.5em; }
-
-sub { bottom: -0.25em; }
-
-/* ========================================================================== Embedded content ========================================================================== */
-/** Remove border when inside `a` element in IE 8/9. */
-img { border: 0; }
-
-/** Correct overflow displayed oddly in IE 9. */
-svg:not(:root) { overflow: hidden; }
-
-/* ========================================================================== Figures ========================================================================== */
-/** Address margin not present in IE 8/9 and Safari 5. */
-figure { margin: 0; }
-
-/* ========================================================================== Forms ========================================================================== */
-/** Define consistent border, margin, and padding. */
-fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; }
-
-/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */
-legend { border: 0; /* 1 */ padding: 0; /* 2 */ }
-
-/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */
-button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ }
-
-/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */
-button, input { line-height: normal; }
-
-/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */
-button, select { text-transform: none; }
-
-/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */
-button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ }
-
-/** Re-set default cursor for disabled elements. */
-button[disabled], html input[disabled] { cursor: default; }
-
-/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */
-input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ }
-
-/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */
-input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; }
-
-/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */
-input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; }
-
-/** Remove inner padding and border in Firefox 4+. */
-button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; }
-
-/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */
-textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ }
-
-/* ========================================================================== Tables ========================================================================== */
-/** Remove most spacing between table cells. */
-table { border-collapse: collapse; border-spacing: 0; }
-
-meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; }
-
-meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; }
-
-meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; }
-
-*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; }
-
-html, body { font-size: 100%; }
-
-body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; }
-
-a:hover { cursor: pointer; }
-
-img, object, embed { max-width: 100%; height: auto; }
-
-object, embed { height: 100%; }
-
-img { -ms-interpolation-mode: bicubic; }
-
-#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; }
-
-.left { float: left !important; }
-
-.right { float: right !important; }
-
-.text-left { text-align: left !important; }
-
-.text-right { text-align: right !important; }
-
-.text-center { text-align: center !important; }
-
-.text-justify { text-align: justify !important; }
-
-.hide { display: none; }
-
-.antialiased { -webkit-font-smoothing: antialiased; }
-
-img { display: inline-block; vertical-align: middle; }
-
-textarea { height: auto; min-height: 50px; }
-
-select { width: 100%; }
-
-object, svg { display: inline-block; vertical-align: middle; }
-
-.center { margin-left: auto; margin-right: auto; }
-
-.spread { width: 100%; }
-
-p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; }
-
-.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; }
-
-/* Typography resets */
-div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; }
-
-/* Default Link Styles */
-a { color: #2ba6cb; text-decoration: none; line-height: inherit; }
-a:hover, a:focus { color: #2795b6; }
-a img { border: none; }
-
-/* Default paragraph styles */
-p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; }
-p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; }
-
-/* Default header styles */
-h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; }
-h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; }
-
-h1 { font-size: 2.125em; }
-
-h2 { font-size: 1.6875em; }
-
-h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; }
-
-h4 { font-size: 1.125em; }
-
-h5 { font-size: 1.125em; }
-
-h6 { font-size: 1em; }
-
-hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; }
-
-/* Helpful Typography Defaults */
-em, i { font-style: italic; line-height: inherit; }
-
-strong, b { font-weight: bold; line-height: inherit; }
-
-small { font-size: 60%; line-height: inherit; }
-
-code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; }
-
-/* Lists */
-ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; }
-
-ul, ol { margin-left: 1.5em; }
-ul.no-bullet, ol.no-bullet { margin-left: 1.5em; }
-
-/* Unordered Lists */
-ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ }
-ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; }
-ul.square { list-style-type: square; }
-ul.circle { list-style-type: circle; }
-ul.disc { list-style-type: disc; }
-ul.no-bullet { list-style: none; }
-
-/* Ordered Lists */
-ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; }
-
-/* Definition Lists */
-dl dt { margin-bottom: 0.3125em; font-weight: bold; }
-dl dd { margin-bottom: 1.25em; }
-
-/* Abbreviations */
-abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; }
-
-abbr { text-transform: none; }
-
-/* Blockquotes */
-blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; }
-blockquote cite { display: block; font-size: 0.8125em; color: #555555; }
-blockquote cite:before { content: "\2014 \0020"; }
-blockquote cite a, blockquote cite a:visited { color: #555555; }
-
-blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; }
-
-/* Microformats */
-.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; }
-.vcard li { margin: 0; display: block; }
-.vcard .fn { font-weight: bold; font-size: 0.9375em; }
-
-.vevent .summary { font-weight: bold; }
-.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; }
-
-@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; }
- h1 { font-size: 2.75em; }
- h2 { font-size: 2.3125em; }
- h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; }
- h4 { font-size: 1.4375em; } }
-/* Tables */
-table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; }
-table thead, table tfoot { background: whitesmoke; font-weight: bold; }
-table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; }
-table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; }
-table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; }
-table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; }
-
-body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; }
-
-h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; }
-
-.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; }
-.clearfix:after, .float-group:after { clear: both; }
-
-*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; }
-*:not(pre) > code.nobreak { word-wrap: normal; }
-*:not(pre) > code.nowrap { white-space: nowrap; }
-
-pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; }
-
-em em { font-style: normal; }
-
-strong strong { font-weight: normal; }
-
-.keyseq { color: #555555; }
-
-kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; }
-
-.keyseq kbd:first-child { margin-left: 0; }
-
-.keyseq kbd:last-child { margin-right: 0; }
-
-.menuseq, .menu { color: #090909; }
-
-b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; }
-
-b.button:before { content: "["; padding: 0 3px 0 2px; }
-
-b.button:after { content: "]"; padding: 0 2px 0 3px; }
-
-#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; }
-#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; }
-#header:after, #content:after, #footnotes:after, #footer:after { clear: both; }
-
-#content { margin-top: 1.25em; }
-
-#content:before { content: none; }
-
-#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; }
-#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; }
-#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; }
-#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; }
-#header .details span:first-child { margin-left: -0.125em; }
-#header .details span.email a { color: #6f6f6f; }
-#header .details br { display: none; }
-#header .details br + span:before { content: "\00a0\2013\00a0"; }
-#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; }
-#header .details br + span#revremark:before { content: "\00a0|\00a0"; }
-#header #revnumber { text-transform: capitalize; }
-#header #revnumber:after { content: "\00a0"; }
-
-#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; }
-
-#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; }
-#toc > ul { margin-left: 0.125em; }
-#toc ul.sectlevel0 > li > a { font-style: italic; }
-#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; }
-#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; }
-#toc li { line-height: 1.3334; margin-top: 0.3334em; }
-#toc a { text-decoration: none; }
-#toc a:active { text-decoration: underline; }
-
-#toctitle { color: #6f6f6f; font-size: 1.2em; }
-
-@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; }
- body.toc2 { padding-left: 15em; padding-right: 0; }
- #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; }
- #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; }
- #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; }
- #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; }
- #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; }
- body.toc2.toc-right { padding-left: 0; padding-right: 15em; }
- body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } }
-@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; }
- #toc.toc2 { width: 20em; }
- #toc.toc2 #toctitle { font-size: 1.375em; }
- #toc.toc2 > ul { font-size: 0.95em; }
- #toc.toc2 ul ul { padding-left: 1.25em; }
- body.toc2.toc-right { padding-left: 0; padding-right: 20em; } }
-#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; }
-#content #toc > :first-child { margin-top: 0; }
-#content #toc > :last-child { margin-bottom: 0; }
-
-#footer { max-width: 100%; background-color: #222222; padding: 1.25em; }
-
-#footer-text { color: #dddddd; line-height: 1.44; }
-
-.sect1 { padding-bottom: 0.625em; }
-
-@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } }
-.sect1 + .sect1 { border-top: 1px solid #dddddd; }
-
-#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; }
-#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; }
-#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; }
-#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; }
-#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; }
-
-.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; }
-
-.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; }
-
-table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; }
-
-.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; }
-
-table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; }
-
-.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; }
-.admonitionblock > table td.icon { text-align: center; width: 80px; }
-.admonitionblock > table td.icon img { max-width: initial; }
-.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; }
-.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; }
-.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; }
-
-.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; }
-.exampleblock > .content > :first-child { margin-top: 0; }
-.exampleblock > .content > :last-child { margin-bottom: 0; }
-
-.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; }
-.sidebarblock > :first-child { margin-top: 0; }
-.sidebarblock > :last-child { margin-bottom: 0; }
-.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; }
-
-.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; }
-
-.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; }
-.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; }
-
-.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; }
-.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; }
-@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } }
-@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } }
-
-.literalblock.output pre { color: #eeeeee; background-color: black; }
-
-.listingblock pre.highlightjs { padding: 0; }
-.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; }
-
-.listingblock > .content { position: relative; }
-
-.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; }
-
-.listingblock:hover code[data-lang]:before { display: block; }
-
-.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; }
-
-.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; }
-
-table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; }
-
-table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; }
-
-table.pyhltable td.code { padding-left: .75em; padding-right: 0; }
-
-pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; }
-
-pre.pygments .lineno { display: inline-block; margin-right: .25em; }
-
-table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; }
-
-.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; }
-.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; }
-.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; }
-.quoteblock blockquote { margin: 0; padding: 0; border: 0; }
-.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); }
-.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; }
-.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; }
-.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; }
-.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; }
-.quoteblock .quoteblock blockquote:before { display: none; }
-
-.verseblock { margin: 0 1em 1.25em 1em; }
-.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; }
-.verseblock pre strong { font-weight: 400; }
-.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; }
-
-.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; }
-.quoteblock .attribution br, .verseblock .attribution br { display: none; }
-.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; }
-
-.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; }
-.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; }
-.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; }
-
-table.tableblock { max-width: 100%; border-collapse: separate; }
-table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; }
-
-table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; }
-
-table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; }
-
-table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; }
-
-table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; }
-
-table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; }
-
-table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; }
-
-table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; }
-
-table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; }
-
-table.frame-all { border-width: 1px; }
-
-table.frame-sides { border-width: 0 1px; }
-
-table.frame-topbot { border-width: 1px 0; }
-
-th.halign-left, td.halign-left { text-align: left; }
-
-th.halign-right, td.halign-right { text-align: right; }
-
-th.halign-center, td.halign-center { text-align: center; }
-
-th.valign-top, td.valign-top { vertical-align: top; }
-
-th.valign-bottom, td.valign-bottom { vertical-align: bottom; }
-
-th.valign-middle, td.valign-middle { vertical-align: middle; }
-
-table thead th, table tfoot th { font-weight: bold; }
-
-tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; }
-
-tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; }
-
-p.tableblock > code:only-child { background: none; padding: 0; }
-
-p.tableblock { font-size: 1em; }
-
-td > div.verse { white-space: pre; }
-
-ol { margin-left: 1.75em; }
-
-ul li ol { margin-left: 1.5em; }
-
-dl dd { margin-left: 1.125em; }
-
-dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; }
-
-ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; }
-
-ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; }
-
-ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; }
-
-ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; }
-
-ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; }
-
-ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; }
-ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; }
-ul.inline > li > * { display: block; }
-
-.unstyled dl dt { font-weight: normal; font-style: normal; }
-
-ol.arabic { list-style-type: decimal; }
-
-ol.decimal { list-style-type: decimal-leading-zero; }
-
-ol.loweralpha { list-style-type: lower-alpha; }
-
-ol.upperalpha { list-style-type: upper-alpha; }
-
-ol.lowerroman { list-style-type: lower-roman; }
-
-ol.upperroman { list-style-type: upper-roman; }
-
-ol.lowergreek { list-style-type: lower-greek; }
-
-.hdlist > table, .colist > table { border: 0; background: none; }
-.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; }
-
-td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; }
-
-td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; }
-
-.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; }
-
-.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; }
-.colist > table tr > td:first-of-type img { max-width: initial; }
-.colist > table tr > td:last-of-type { padding: 0.25em 0; }
-
-.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; }
-
-.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; }
-.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; }
-.imageblock > .title { margin-bottom: 0; }
-.imageblock.thumb, .imageblock.th { border-width: 6px; }
-.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; }
-
-.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; }
-.image.left { margin-right: 0.625em; }
-.image.right { margin-left: 0.625em; }
-
-a.image { text-decoration: none; display: inline-block; }
-a.image object { pointer-events: none; }
-
-sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; }
-sup.footnote a, sup.footnoteref a { text-decoration: none; }
-sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; }
-
-#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; }
-#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; }
-#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; }
-#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; }
-#footnotes .footnote:last-of-type { margin-bottom: 0; }
-#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; }
-
-.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; }
-.gist .file-data > table td.line-data { width: 99%; }
-
-div.unbreakable { page-break-inside: avoid; }
-
-.big { font-size: larger; }
-
-.small { font-size: smaller; }
-
-.underline { text-decoration: underline; }
-
-.overline { text-decoration: overline; }
-
-.line-through { text-decoration: line-through; }
-
-.aqua { color: #00bfbf; }
-
-.aqua-background { background-color: #00fafa; }
-
-.black { color: black; }
-
-.black-background { background-color: black; }
-
-.blue { color: #0000bf; }
-
-.blue-background { background-color: #0000fa; }
-
-.fuchsia { color: #bf00bf; }
-
-.fuchsia-background { background-color: #fa00fa; }
-
-.gray { color: #606060; }
-
-.gray-background { background-color: #7d7d7d; }
-
-.green { color: #006000; }
-
-.green-background { background-color: #007d00; }
-
-.lime { color: #00bf00; }
-
-.lime-background { background-color: #00fa00; }
-
-.maroon { color: #600000; }
-
-.maroon-background { background-color: #7d0000; }
-
-.navy { color: #000060; }
-
-.navy-background { background-color: #00007d; }
-
-.olive { color: #606000; }
-
-.olive-background { background-color: #7d7d00; }
-
-.purple { color: #600060; }
-
-.purple-background { background-color: #7d007d; }
-
-.red { color: #bf0000; }
-
-.red-background { background-color: #fa0000; }
-
-.silver { color: #909090; }
-
-.silver-background { background-color: #bcbcbc; }
-
-.teal { color: #006060; }
-
-.teal-background { background-color: #007d7d; }
-
-.white { color: #bfbfbf; }
-
-.white-background { background-color: #fafafa; }
-
-.yellow { color: #bfbf00; }
-
-.yellow-background { background-color: #fafa00; }
-
-span.icon > .fa { cursor: default; }
-
-.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; }
-.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; }
-.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; }
-.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; }
-.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; }
-.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; }
-
-.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; }
-.conum[data-value] * { color: #fff !important; }
-.conum[data-value] + b { display: none; }
-.conum[data-value]:after { content: attr(data-value); }
-pre .conum[data-value] { position: relative; top: -0.125em; }
-
-b.conum * { color: inherit !important; }
-
-.conum:not([data-value]):empty { display: none; }
-
-.literalblock pre, .listingblock pre { background: #eeeeee; }
diff --git a/common/src/docs/asciidoclet/overview.adoc b/common/src/docs/asciidoclet/overview.adoc
deleted file mode 100644
index 7947331..0000000
--- a/common/src/docs/asciidoclet/overview.adoc
+++ /dev/null
@@ -1,4 +0,0 @@
-= Elasticsearch Java client
-Jörg Prante
-Version 5.4.0.0
-
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java b/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java
deleted file mode 100644
index 6db0452..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java
+++ /dev/null
@@ -1,925 +0,0 @@
-package org.xbib.elasticsearch.client;
-
-import com.carrotsearch.hppc.cursors.ObjectCursor;
-import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.ElasticsearchTimeoutException;
-import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
-import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
-import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
-import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
-import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
-import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
-import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
-import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
-import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
-import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
-import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
-import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
-import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
-import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
-import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
-import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
-import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
-import org.elasticsearch.action.admin.indices.flush.FlushAction;
-import org.elasticsearch.action.admin.indices.flush.FlushRequest;
-import org.elasticsearch.action.admin.indices.get.GetIndexAction;
-import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
-import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
-import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
-import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
-import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
-import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
-import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
-import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
-import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
-import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
-import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
-import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
-import org.elasticsearch.action.bulk.BulkItemResponse;
-import org.elasticsearch.action.bulk.BulkRequest;
-import org.elasticsearch.action.bulk.BulkResponse;
-import org.elasticsearch.action.delete.DeleteRequest;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.search.SearchAction;
-import org.elasticsearch.action.search.SearchRequestBuilder;
-import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.action.update.UpdateRequest;
-import org.elasticsearch.client.ElasticsearchClient;
-import org.elasticsearch.client.transport.NoNodeAvailableException;
-import org.elasticsearch.cluster.health.ClusterHealthStatus;
-import org.elasticsearch.cluster.metadata.AliasMetaData;
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.io.Streams;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.search.SearchHit;
-import org.elasticsearch.search.sort.SortBuilder;
-import org.elasticsearch.search.sort.SortBuilders;
-import org.elasticsearch.search.sort.SortOrder;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.StringWriter;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-public abstract class AbstractClient implements ClientMethods {
-
- private static final Logger logger = LogManager.getLogger(AbstractClient.class.getName());
-
- private Settings.Builder settingsBuilder;
-
- private Settings settings;
-
- private Map mappings;
-
- private ElasticsearchClient client;
-
- protected BulkProcessor bulkProcessor;
-
- protected BulkMetric metric;
-
- protected BulkControl control;
-
- protected Throwable throwable;
-
- protected boolean closed;
-
- protected int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST;
-
- protected int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS;
-
- protected String maxVolumePerRequest = DEFAULT_MAX_VOLUME_PER_REQUEST;
-
- protected String flushIngestInterval = DEFAULT_FLUSH_INTERVAL;
-
- @Override
- public AbstractClient init(ElasticsearchClient client, Settings settings,
- final BulkMetric metric, final BulkControl control) {
- this.client = client;
- this.mappings = new HashMap<>();
- if (settings == null) {
- settings = findSettings();
- }
- if (client == null && settings != null) {
- try {
- this.client = createClient(settings);
- } catch (IOException e) {
- logger.error(e.getMessage(), e);
- }
- }
- this.metric = metric;
- this.control = control;
- if (metric != null) {
- metric.start();
- }
- resetSettings();
- BulkProcessor.Listener listener = new BulkProcessor.Listener() {
-
- private final Logger logger = LogManager.getLogger(getClass().getName() + ".Listener");
-
- @Override
- public void beforeBulk(long executionId, BulkRequest request) {
- long l = -1;
- if (metric != null) {
- metric.getCurrentIngest().inc();
- l = metric.getCurrentIngest().getCount();
- int n = request.numberOfActions();
- metric.getSubmitted().inc(n);
- metric.getCurrentIngestNumDocs().inc(n);
- metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
- }
- logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
- executionId,
- request.numberOfActions(),
- request.estimatedSizeInBytes(),
- l);
- }
-
- @Override
- public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
- long l = -1;
- if (metric != null) {
- metric.getCurrentIngest().dec();
- l = metric.getCurrentIngest().getCount();
- metric.getSucceeded().inc(response.getItems().length);
- }
- int n = 0;
- for (BulkItemResponse itemResponse : response.getItems()) {
- if (metric != null) {
- metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
- }
- if (itemResponse.isFailed()) {
- n++;
- if (metric != null) {
- metric.getSucceeded().dec(1);
- metric.getFailed().inc(1);
- }
- }
- }
- if (metric != null) {
- logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests",
- executionId,
- metric.getSucceeded().getCount(),
- metric.getFailed().getCount(),
- response.getTook().millis(),
- l);
- }
- if (n > 0) {
- logger.error("bulk [{}] failed with {} failed items, failure message = {}",
- executionId, n, response.buildFailureMessage());
- } else {
- if (metric != null) {
- metric.getCurrentIngestNumDocs().dec(response.getItems().length);
- }
- }
- }
-
- @Override
- public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
- if (metric != null) {
- metric.getCurrentIngest().dec();
- }
- throwable = failure;
- closed = true;
- logger.error("after bulk [" + executionId + "] error", failure);
- }
- };
- if (this.client != null) {
- BulkProcessor.Builder builder = BulkProcessor.builder(this.client, listener)
- .setBulkActions(maxActionsPerRequest)
- .setConcurrentRequests(maxConcurrentRequests)
- .setFlushInterval(TimeValue.parseTimeValue(flushIngestInterval, "flushIngestInterval"));
- if (maxVolumePerRequest != null) {
- builder.setBulkSize(ByteSizeValue.parseBytesSizeValue(maxVolumePerRequest, "maxVolumePerRequest"));
- }
- this.bulkProcessor = builder.build();
- }
- this.closed = false;
- return this;
- }
-
- protected abstract ElasticsearchClient createClient(Settings settings) throws IOException;
-
- @Override
- public ElasticsearchClient client() {
- return client;
- }
-
- @Override
- public ClientMethods maxActionsPerRequest(int maxActionsPerRequest) {
- this.maxActionsPerRequest = maxActionsPerRequest;
- return this;
- }
-
- @Override
- public ClientMethods maxConcurrentRequests(int maxConcurrentRequests) {
- this.maxConcurrentRequests = maxConcurrentRequests;
- return this;
- }
-
- @Override
- public ClientMethods maxVolumePerRequest(String maxVolumePerRequest) {
- this.maxVolumePerRequest = maxVolumePerRequest;
- return this;
- }
-
- @Override
- public ClientMethods flushIngestInterval(String flushIngestInterval) {
- this.flushIngestInterval = flushIngestInterval;
- return this;
- }
-
- @Override
- public BulkMetric getMetric() {
- return metric;
- }
-
- public void resetSettings() {
- this.settingsBuilder = Settings.builder();
- settings = null;
- mappings = new HashMap<>();
- }
-
- public void setSettings(Settings settings) {
- this.settings = settings;
- }
-
- public void setting(String key, String value) {
- if (settingsBuilder == null) {
- settingsBuilder = Settings.builder();
- }
- settingsBuilder.put(key, value);
- }
-
- public void setting(String key, Boolean value) {
- if (settingsBuilder == null) {
- settingsBuilder = Settings.builder();
- }
- settingsBuilder.put(key, value);
- }
-
- public void setting(String key, Integer value) {
- if (settingsBuilder == null) {
- settingsBuilder = Settings.builder();
- }
- settingsBuilder.put(key, value);
- }
-
- public void setting(InputStream in) throws IOException {
- settingsBuilder = Settings.builder().loadFromStream(".json", in, true);
- }
-
- public Settings.Builder settingsBuilder() {
- return settingsBuilder != null ? settingsBuilder : Settings.builder();
- }
-
- public Settings settings() {
- if (settings != null) {
- return settings;
- }
- if (settingsBuilder == null) {
- settingsBuilder = Settings.builder();
- }
- return settingsBuilder.build();
- }
-
- @Override
- public void mapping(String type, String mapping) throws IOException {
- mappings.put(type, mapping);
- }
-
- @Override
- public void mapping(String type, InputStream in) throws IOException {
- if (type == null) {
- return;
- }
- StringWriter sw = new StringWriter();
- Streams.copy(new InputStreamReader(in, StandardCharsets.UTF_8), sw);
- mappings.put(type, sw.toString());
- }
-
- @Override
- public ClientMethods index(String index, String type, String id, boolean create, BytesReference source) {
- return indexRequest(new IndexRequest(index).type(type).id(id).create(create).source(source, XContentType.JSON));
- }
-
- @Override
- public ClientMethods index(String index, String type, String id, boolean create, String source) {
- return indexRequest(new IndexRequest(index).type(type).id(id).create(create).source(source, XContentType.JSON));
- }
-
- @Override
- public ClientMethods indexRequest(IndexRequest indexRequest) {
- if (closed) {
- throwClose();
- }
- try {
- if (metric != null) {
- metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id());
- }
- bulkProcessor.add(indexRequest);
- } catch (Exception e) {
- throwable = e;
- closed = true;
- logger.error("bulk add of index request failed: " + e.getMessage(), e);
- }
- return this;
- }
-
- @Override
- public ClientMethods delete(String index, String type, String id) {
- return deleteRequest(new DeleteRequest(index).type(type).id(id));
- }
-
- @Override
- public ClientMethods deleteRequest(DeleteRequest deleteRequest) {
- if (closed) {
- throwClose();
- }
- try {
- if (metric != null) {
- metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
- }
- bulkProcessor.add(deleteRequest);
- } catch (Exception e) {
- throwable = e;
- closed = true;
- logger.error("bulk add of delete failed: " + e.getMessage(), e);
- }
- return this;
- }
-
- @Override
- public ClientMethods update(String index, String type, String id, BytesReference source) {
- return updateRequest(new UpdateRequest().index(index).type(type).id(id).upsert(source, XContentType.JSON));
- }
-
- @Override
- public ClientMethods update(String index, String type, String id, String source) {
- return updateRequest(new UpdateRequest().index(index).type(type).id(id).upsert(source, XContentType.JSON));
- }
-
- @Override
- public ClientMethods updateRequest(UpdateRequest updateRequest) {
- if (closed) {
- throwClose();
- }
- try {
- if (metric != null) {
- metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id());
- }
- bulkProcessor.add(updateRequest);
- } catch (Exception e) {
- throwable = e;
- closed = true;
- logger.error("bulk add of update request failed: " + e.getMessage(), e);
- }
- return this;
- }
-
- @Override
- public ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds)
- throws IOException {
- if (control == null) {
- return this;
- }
- if (!control.isBulk(index) && startRefreshIntervalSeconds > 0L && stopRefreshIntervalSeconds > 0L) {
- control.startBulk(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds);
- updateIndexSetting(index, "refresh_interval", startRefreshIntervalSeconds + "s");
- }
- return this;
- }
-
- @Override
- public ClientMethods stopBulk(String index) throws IOException {
- if (control == null) {
- return this;
- }
- if (control.isBulk(index)) {
- long secs = control.getStopBulkRefreshIntervals().get(index);
- if (secs > 0L) {
- updateIndexSetting(index, "refresh_interval", secs + "s");
- }
- control.finishBulk(index);
- }
- return this;
- }
-
- @Override
- public ClientMethods flushIngest() {
- if (closed) {
- throwClose();
- }
- logger.debug("flushing bulk processor");
- bulkProcessor.flush();
- return this;
- }
-
- @Override
- public synchronized void shutdown() throws IOException {
- if (closed) {
- throwClose();
- }
- if (bulkProcessor != null) {
- logger.info("closing bulk processor...");
- bulkProcessor.close();
- }
- if (metric != null) {
- logger.info("stopping metric");
- metric.stop();
- }
- if (control != null && control.indices() != null && !control.indices().isEmpty()) {
- logger.info("stopping bulk mode for indices {}...", control.indices());
- for (String index : control.indices()) {
- stopBulk(index);
- }
- }
- }
-
- @Override
- public ClientMethods newIndex(String index) {
- if (closed) {
- throwClose();
- }
- return newIndex(index, null, null);
- }
-
- @Override
- public ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException {
- resetSettings();
- setting(settings);
- mapping(type, mappings);
- return newIndex(index, settings(), this.mappings);
- }
-
- @Override
- public ClientMethods newIndex(String index, Settings settings, Map mappings) {
- if (closed) {
- throwClose();
- }
- if (client() == null) {
- logger.warn("no client for create index");
- return this;
- }
- if (index == null) {
- logger.warn("no index name given to create index");
- return this;
- }
- CreateIndexRequestBuilder createIndexRequestBuilder =
- new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index);
- if (settings != null) {
- logger.info("found settings {}", settings.toString());
- createIndexRequestBuilder.setSettings(settings);
- }
- if (mappings != null) {
- for (Map.Entry entry : mappings.entrySet()) {
- String type = entry.getKey();
- String mapping = entry.getValue();
- logger.info("found mapping for {}", type);
- createIndexRequestBuilder.addMapping(type, mapping, XContentType.JSON);
- }
- }
- CreateIndexResponse createIndexResponse = createIndexRequestBuilder.execute().actionGet();
- logger.info("index {} created: {}", index, createIndexResponse);
- return this;
- }
-
-
- @Override
- public ClientMethods newMapping(String index, String type, Map mapping) {
- PutMappingRequestBuilder putMappingRequestBuilder =
- new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE)
- .setIndices(index)
- .setType(type)
- .setSource(mapping);
- putMappingRequestBuilder.execute().actionGet();
- logger.info("mapping created for index {} and type {}", index, type);
- return this;
- }
-
- @Override
- public ClientMethods deleteIndex(String index) {
- if (closed) {
- throwClose();
- }
- if (client == null) {
- logger.warn("no client");
- return this;
- }
- if (index == null) {
- logger.warn("no index name given to delete index");
- return this;
- }
- DeleteIndexRequestBuilder deleteIndexRequestBuilder =
- new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index);
- deleteIndexRequestBuilder.execute().actionGet();
- return this;
- }
-
- @Override
- public ClientMethods waitForResponses(String maxWaitTime) throws InterruptedException, ExecutionException {
- if (closed) {
- throwClose();
- }
- long millis = TimeValue.parseTimeValue(maxWaitTime, "millis").getMillis();
- while (!bulkProcessor.awaitClose(millis, TimeUnit.MILLISECONDS)) {
- logger.warn("still waiting for responses");
- }
- return this;
- }
-
- public void waitForRecovery() throws IOException {
- if (client() == null) {
- return;
- }
- client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).actionGet();
- }
-
- @Override
- public int waitForRecovery(String index) throws IOException {
- if (client() == null) {
- return -1;
- }
- if (index == null) {
- throw new IOException("unable to waitfor recovery, index not set");
- }
- RecoveryResponse response = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest(index)).actionGet();
- int shards = response.getTotalShards();
- client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest(index)
- .waitForActiveShards(shards)).actionGet();
- return shards;
- }
-
- @Override
- public void waitForCluster(String statusString, String timeout) throws IOException {
- if (client() == null) {
- return;
- }
- ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString);
- ClusterHealthResponse healthResponse =
- client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest()
- .waitForStatus(status).timeout(timeout)).actionGet();
- if (healthResponse != null && healthResponse.isTimedOut()) {
- throw new IOException("cluster state is " + healthResponse.getStatus().name()
- + " and not " + status.name()
- + ", from here on, everything will fail!");
- }
- }
-
- public String fetchClusterName() {
- if (client() == null) {
- return null;
- }
- try {
- ClusterStateRequestBuilder clusterStateRequestBuilder =
- new ClusterStateRequestBuilder(client(), ClusterStateAction.INSTANCE).all();
- ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet();
- String name = clusterStateResponse.getClusterName().value();
- int nodeCount = clusterStateResponse.getState().getNodes().getSize();
- return name + " (" + nodeCount + " nodes connected)";
- } catch (ElasticsearchTimeoutException e) {
- logger.warn(e.getMessage(), e);
- return "TIMEOUT";
- } catch (NoNodeAvailableException e) {
- logger.warn(e.getMessage(), e);
- return "DISCONNECTED";
- } catch (Exception e) {
- logger.warn(e.getMessage(), e);
- return "[" + e.getMessage() + "]";
- }
- }
-
- public String healthColor() {
- if (client() == null) {
- return null;
- }
- try {
- ClusterHealthResponse healthResponse =
- client().execute(ClusterHealthAction.INSTANCE,
- new ClusterHealthRequest().timeout(TimeValue.timeValueSeconds(30))).actionGet();
- ClusterHealthStatus status = healthResponse.getStatus();
- return status.name();
- } catch (ElasticsearchTimeoutException e) {
- logger.warn(e.getMessage(), e);
- return "TIMEOUT";
- } catch (NoNodeAvailableException e) {
- logger.warn(e.getMessage(), e);
- return "DISCONNECTED";
- } catch (Exception e) {
- logger.warn(e.getMessage(), e);
- return "[" + e.getMessage() + "]";
- }
- }
-
- public int updateReplicaLevel(String index, int level) throws IOException {
- waitForCluster("YELLOW","30s");
- updateIndexSetting(index, "number_of_replicas", level);
- return waitForRecovery(index);
- }
-
- public void flushIndex(String index) {
- if (client() == null) {
- return;
- }
- if (index != null) {
- client().execute(FlushAction.INSTANCE, new FlushRequest(index)).actionGet();
- }
- }
-
- public void refreshIndex(String index) {
- if (client() == null) {
- return;
- }
- if (index != null) {
- client().execute(RefreshAction.INSTANCE, new RefreshRequest(index)).actionGet();
- }
- }
-
- public void putMapping(String index) {
- if (client() == null) {
- return;
- }
- if (!mappings.isEmpty()) {
- for (Map.Entry me : mappings.entrySet()) {
- client().execute(PutMappingAction.INSTANCE,
- new PutMappingRequest(index).type(me.getKey()).source(me.getValue(), XContentType.JSON)).actionGet();
- }
- }
- }
-
- public String resolveAlias(String alias) {
- if (client() == null) {
- return alias;
- }
- GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
- GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet();
- if (!getAliasesResponse.getAliases().isEmpty()) {
- return getAliasesResponse.getAliases().keys().iterator().next().value;
- }
- return alias;
- }
-
- public String resolveMostRecentIndex(String alias) {
- if (client() == null) {
- return alias;
- }
- if (alias == null) {
- return null;
- }
- GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
- GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet();
- Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
- Set indices = new TreeSet<>(Collections.reverseOrder());
- for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) {
- Matcher m = pattern.matcher(indexName.value);
- if (m.matches() && alias.equals(m.group(1))) {
- indices.add(indexName.value);
- }
- }
- return indices.isEmpty() ? alias : indices.iterator().next();
- }
-
- public Map getAliasFilters(String alias) {
- GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
- return getFilters(getAliasesRequestBuilder.setIndices(resolveAlias(alias)).execute().actionGet());
- }
-
- public Map getIndexFilters(String index) {
- GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE);
- return getFilters(getAliasesRequestBuilder.setIndices(index).execute().actionGet());
- }
-
-
- @Override
- public void switchAliases(String index, String concreteIndex, List extraAliases) {
- switchAliases(index, concreteIndex, extraAliases, null);
- }
-
- @Override
- public void switchAliases(String index, String concreteIndex,
- List extraAliases, IndexAliasAdder adder) {
- if (client() == null) {
- return;
- }
- if (index.equals(concreteIndex)) {
- return;
- }
- // two situations: 1. there is a new alias 2. there is already an old index with the alias
- String oldIndex = resolveAlias(index);
- final Map oldFilterMap = oldIndex.equals(index) ? null : getIndexFilters(oldIndex);
- final List newAliases = new LinkedList<>();
- final List switchAliases = new LinkedList<>();
- IndicesAliasesRequestBuilder requestBuilder = new IndicesAliasesRequestBuilder(client(), IndicesAliasesAction.INSTANCE);
- if (oldFilterMap == null || !oldFilterMap.containsKey(index)) {
- // never apply a filter for trunk index name
- requestBuilder.addAlias(concreteIndex, index);
- newAliases.add(index);
- }
- // switch existing aliases
- if (oldFilterMap != null) {
- for (Map.Entry entry : oldFilterMap.entrySet()) {
- String alias = entry.getKey();
- String filter = entry.getValue();
- requestBuilder.removeAlias(oldIndex, alias);
- if (filter != null) {
- requestBuilder.addAlias(concreteIndex, alias, filter);
- } else {
- requestBuilder.addAlias(concreteIndex, alias);
- }
- switchAliases.add(alias);
- }
- }
- // a list of aliases that should be added, check if new or old
- if (extraAliases != null) {
- for (String extraAlias : extraAliases) {
- if (oldFilterMap == null || !oldFilterMap.containsKey(extraAlias)) {
- // index alias adder only active on extra aliases, and if alias is new
- if (adder != null) {
- adder.addIndexAlias(requestBuilder, concreteIndex, extraAlias);
- } else {
- requestBuilder.addAlias(concreteIndex, extraAlias);
- }
- newAliases.add(extraAlias);
- } else {
- String filter = oldFilterMap.get(extraAlias);
- requestBuilder.removeAlias(oldIndex, extraAlias);
- if (filter != null) {
- requestBuilder.addAlias(concreteIndex, extraAlias, filter);
- } else {
- requestBuilder.addAlias(concreteIndex, extraAlias);
- }
- switchAliases.add(extraAlias);
- }
- }
- }
- if (!newAliases.isEmpty() || !switchAliases.isEmpty()) {
- logger.info("new aliases = {}, switch aliases = {}", newAliases, switchAliases);
- requestBuilder.execute().actionGet();
- }
- }
-
- @Override
- public void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep) {
- if (client() == null) {
- return;
- }
- if (index.equals(concreteIndex)) {
- return;
- }
- GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client(), GetIndexAction.INSTANCE);
- GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet();
- Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
- Set indices = new TreeSet<>();
- logger.info("{} indices", getIndexResponse.getIndices().length);
- for (String s : getIndexResponse.getIndices()) {
- Matcher m = pattern.matcher(s);
- if (m.matches() && index.equals(m.group(1)) && !s.equals(concreteIndex)) {
- indices.add(s);
- }
- }
- if (indices.isEmpty()) {
- logger.info("no indices found, retention policy skipped");
- return;
- }
- if (mintokeep > 0 && indices.size() <= mintokeep) {
- logger.info("{} indices found, not enough for retention policy ({}), skipped",
- indices.size(), mintokeep);
- return;
- } else {
- logger.info("candidates for deletion = {}", indices);
- }
- List indicesToDelete = new ArrayList<>();
- // our index
- Matcher m1 = pattern.matcher(concreteIndex);
- if (m1.matches()) {
- Integer i1 = Integer.parseInt(m1.group(2));
- for (String s : indices) {
- Matcher m2 = pattern.matcher(s);
- if (m2.matches()) {
- Integer i2 = Integer.parseInt(m2.group(2));
- int kept = indices.size() - indicesToDelete.size();
- if ((timestampdiff == 0 || (timestampdiff > 0 && i1 - i2 > timestampdiff)) && mintokeep <= kept) {
- indicesToDelete.add(s);
- }
- }
- }
- }
- logger.info("indices to delete = {}", indicesToDelete);
- if (indicesToDelete.isEmpty()) {
- logger.info("not enough indices found to delete, retention policy complete");
- return;
- }
- String[] s = indicesToDelete.toArray(new String[indicesToDelete.size()]);
- DeleteIndexRequestBuilder requestBuilder = new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, s);
- DeleteIndexResponse response = requestBuilder.execute().actionGet();
- if (!response.isAcknowledged()) {
- logger.warn("retention delete index operation was not acknowledged");
- }
- }
-
- @Override
- public Long mostRecentDocument(String index, String timestampfieldname) {
- if (client() == null) {
- return null;
- }
- SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client(), SearchAction.INSTANCE);
- SortBuilder> sort = SortBuilders.fieldSort(timestampfieldname).order(SortOrder.DESC);
- SearchResponse searchResponse = searchRequestBuilder.setIndices(index)
- .addStoredField(timestampfieldname)
- .setSize(1)
- .addSort(sort)
- .execute().actionGet();
- if (searchResponse.getHits().getHits().length == 1) {
- SearchHit hit = searchResponse.getHits().getHits()[0];
- if (hit.getFields().get(timestampfieldname) != null) {
- return hit.getFields().get(timestampfieldname).getValue();
- } else {
- return 0L;
- }
- }
- return null;
- }
-
- @Override
- public boolean hasThrowable() {
- return throwable != null;
- }
-
- @Override
- public Throwable getThrowable() {
- return throwable;
- }
-
- protected static void throwClose() {
- throw new ElasticsearchException("client is closed");
- }
-
-
- protected void updateIndexSetting(String index, String key, Object value) throws IOException {
- if (client() == null) {
- return;
- }
- if (index == null) {
- throw new IOException("no index name given");
- }
- if (key == null) {
- throw new IOException("no key given");
- }
- if (value == null) {
- throw new IOException("no value given");
- }
- Settings.Builder updateSettingsBuilder = Settings.builder();
- updateSettingsBuilder.put(key, value.toString());
- UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index)
- .settings(updateSettingsBuilder);
- client().execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet();
- }
-
- private Map getFilters(GetAliasesResponse getAliasesResponse) {
- Map result = new HashMap<>();
- for (ObjectObjectCursor> object : getAliasesResponse.getAliases()) {
- List aliasMetaDataList = object.value;
- for (AliasMetaData aliasMetaData : aliasMetaDataList) {
- if (aliasMetaData.filteringRequired()) {
- String metaData = new String(aliasMetaData.getFilter().uncompressed(), StandardCharsets.UTF_8);
- result.put(aliasMetaData.alias(), metaData);
- } else {
- result.put(aliasMetaData.alias(), null);
- }
- }
- }
- return result;
- }
-
- private Settings findSettings() {
- Settings.Builder settingsBuilder = Settings.builder();
- settingsBuilder.put("host", "localhost");
- try {
- String hostname = NetworkUtils.getLocalAddress().getHostName();
- logger.debug("the hostname is {}", hostname);
- settingsBuilder.put("host", hostname)
- .put("port", 9300);
- } catch (Exception e) {
- logger.warn(e.getMessage(), e);
- }
- return settingsBuilder.build();
- }
-}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java b/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java
deleted file mode 100644
index fc9c1fd..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java
+++ /dev/null
@@ -1,19 +0,0 @@
-package org.xbib.elasticsearch.client;
-
-import java.util.Map;
-import java.util.Set;
-
-public interface BulkControl {
-
- void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval);
-
- boolean isBulk(String indexName);
-
- void finishBulk(String indexName);
-
- Set indices();
-
- Map getStartBulkRefreshIntervals();
-
- Map getStopBulkRefreshIntervals();
-}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java b/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java
deleted file mode 100644
index 2865266..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java
+++ /dev/null
@@ -1,100 +0,0 @@
-package org.xbib.elasticsearch.client;
-
-import org.elasticsearch.client.Client;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.common.unit.TimeValue;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.ServiceLoader;
-
-public final class ClientBuilder implements Parameters {
-
- private final Settings.Builder settingsBuilder;
-
- private Map, ClientMethods> clientMethodsMap;
-
- private BulkMetric metric;
-
- private BulkControl control;
-
- public ClientBuilder() {
- this(Thread.currentThread().getContextClassLoader());
- }
-
- public ClientBuilder(ClassLoader classLoader) {
- this.settingsBuilder = Settings.builder();
- //settingsBuilder.put("node.name", "clientnode");
- this.clientMethodsMap = new HashMap<>();
- ServiceLoader serviceLoader = ServiceLoader.load(ClientMethods.class,
- classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader());
- for (ClientMethods clientMethods : serviceLoader) {
- clientMethodsMap.put(clientMethods.getClass(), clientMethods);
- }
- }
-
- public static ClientBuilder builder() {
- return new ClientBuilder();
- }
-
- public ClientBuilder put(String key, String value) {
- settingsBuilder.put(key, value);
- return this;
- }
-
- public ClientBuilder put(String key, Integer value) {
- settingsBuilder.put(key, value);
- return this;
- }
-
- public ClientBuilder put(String key, Long value) {
- settingsBuilder.put(key, value);
- return this;
- }
-
- public ClientBuilder put(String key, Double value) {
- settingsBuilder.put(key, value);
- return this;
- }
-
- public ClientBuilder put(String key, ByteSizeValue value) {
- settingsBuilder.put(key, value);
- return this;
- }
-
- public ClientBuilder put(String key, TimeValue value) {
- settingsBuilder.put(key, value);
- return this;
- }
-
- public ClientBuilder put(Settings settings) {
- settingsBuilder.put(settings);
- return this;
- }
-
- public ClientBuilder setMetric(BulkMetric metric) {
- this.metric = metric;
- return this;
- }
-
- public ClientBuilder setControl(BulkControl control) {
- this.control = control;
- return this;
- }
-
- public C getClient(Class clientClass) {
- return getClient(null, clientClass);
- }
-
- @SuppressWarnings("unchecked")
- public C getClient(Client client, Class clientClass) {
- Settings settings = settingsBuilder.build();
- return (C) clientMethodsMap.get(clientClass)
- .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST))
- .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS))
- .maxVolumePerRequest(settings.get(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST))
- .flushIngestInterval(settings.get(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL))
- .init(client, settings, metric, control);
- }
-}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java b/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java
deleted file mode 100644
index 4057994..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java
+++ /dev/null
@@ -1,402 +0,0 @@
-package org.xbib.elasticsearch.client;
-
-import org.elasticsearch.action.delete.DeleteRequest;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.update.UpdateRequest;
-import org.elasticsearch.client.ElasticsearchClient;
-import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.settings.Settings;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-
-/**
- * Interface for providing convenient administrative methods for ingesting data into Elasticsearch.
- */
-public interface ClientMethods extends Parameters {
-
- ClientMethods init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control);
-
- /**
- * Return Elasticsearch client.
- *
- * @return Elasticsearch client
- */
- ElasticsearchClient client();
-
- /**
- * Bulked index request. Each request will be added to a queue for bulking requests.
- * Submitting request will be done when bulk limits are exceeded.
- *
- * @param index the index
- * @param type the type
- * @param id the id
- * @param create true if document must be created
- * @param source the source
- * @return this
- */
- ClientMethods index(String index, String type, String id, boolean create, BytesReference source);
-
- /**
- * Bulked index request. Each request will be added to a queue for bulking requests.
- * Submitting request will be done when bulk limits are exceeded.
- *
- * @param index the index
- * @param type the type
- * @param id the id
- * @param create true if document must be created
- * @param source the source
- * @return this
- */
- ClientMethods index(String index, String type, String id, boolean create, String source);
-
- /**
- * Bulked index request. Each request will be added to a queue for bulking requests.
- * Submitting request will be done when bulk limits are exceeded.
- *
- * @param indexRequest the index request to add
- * @return this ingest
- */
- ClientMethods indexRequest(IndexRequest indexRequest);
-
- /**
- * Delete document.
- *
- * @param index the index
- * @param type the type
- * @param id the id
- * @return this ingest
- */
- ClientMethods delete(String index, String type, String id);
-
- /**
- * Bulked delete request. Each request will be added to a queue for bulking requests.
- * Submitting request will be done when bulk limits are exceeded.
- *
- * @param deleteRequest the delete request to add
- * @return this ingest
- */
- ClientMethods deleteRequest(DeleteRequest deleteRequest);
-
- /**
- * Bulked update request. Each request will be added to a queue for bulking requests.
- * Submitting request will be done when bulk limits are exceeded.
- * Note that updates only work correctly when all operations between nodes are synchronized.
- *
- * @param index the index
- * @param type the type
- * @param id the id
- * @param source the source
- * @return this
- */
- ClientMethods update(String index, String type, String id, BytesReference source);
-
- /**
- * Bulked update request. Each request will be added to a queue for bulking requests.
- * Submitting request will be done when bulk limits are exceeded.
- * Note that updates only work correctly when all operations between nodes are synchronized.
- *
- * @param index the index
- * @param type the type
- * @param id the id
- * @param source the source
- * @return this
- */
- ClientMethods update(String index, String type, String id, String source);
-
- /**
- * Bulked update request. Each request will be added to a queue for bulking requests.
- * Submitting request will be done when bulk limits are exceeded.
- * Note that updates only work correctly when all operations between nodes are synchronized.
- *
- * @param updateRequest the update request to add
- * @return this ingest
- */
- ClientMethods updateRequest(UpdateRequest updateRequest);
-
- /**
- * Set the maximum number of actions per request.
- *
- * @param maxActionsPerRequest maximum number of actions per request
- * @return this ingest
- */
- ClientMethods maxActionsPerRequest(int maxActionsPerRequest);
-
- /**
- * Set the maximum concurent requests.
- *
- * @param maxConcurentRequests maximum number of concurrent ingest requests
- * @return this Ingest
- */
- ClientMethods maxConcurrentRequests(int maxConcurentRequests);
-
- /**
- * Set the maximum volume for request before flush.
- *
- * @param maxVolume maximum volume
- * @return this ingest
- */
- ClientMethods maxVolumePerRequest(String maxVolume);
-
- /**
- * Set the flush interval for automatic flushing outstanding ingest requests.
- *
- * @param flushInterval the flush interval, default is 30 seconds
- * @return this ingest
- */
- ClientMethods flushIngestInterval(String flushInterval);
-
- /**
- * Set mapping.
- *
- * @param type mapping type
- * @param in mapping definition as input stream
- * @throws IOException if mapping could not be added
- */
- void mapping(String type, InputStream in) throws IOException;
-
- /**
- * Set mapping.
- *
- * @param type mapping type
- * @param mapping mapping definition as input stream
- * @throws IOException if mapping could not be added
- */
- void mapping(String type, String mapping) throws IOException;
-
- /**
- * Put mapping.
- *
- * @param index index
- */
- void putMapping(String index);
-
- /**
- * Create a new index.
- *
- * @param index index
- * @return this ingest
- */
- ClientMethods newIndex(String index);
-
- /**
- * Create a new index.
- *
- * @param index index
- * @param type type
- * @param settings settings
- * @param mappings mappings
- * @return this ingest
- * @throws IOException if new index creation fails
- */
- ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException;
-
- /**
- * Create a new index.
- *
- * @param index index
- * @param settings settings
- * @param mappings mappings
- * @return this ingest
- */
- ClientMethods newIndex(String index, Settings settings, Map mappings);
-
- /**
- * Create new mapping.
- *
- * @param index index
- * @param type index type
- * @param mapping mapping
- * @return this ingest
- */
- ClientMethods newMapping(String index, String type, Map mapping);
-
- /**
- * Delete index.
- *
- * @param index index
- * @return this ingest
- */
- ClientMethods deleteIndex(String index);
-
- /**
- * Start bulk mode.
- *
- * @param index index
- * @param startRefreshIntervalSeconds refresh interval before bulk
- * @param stopRefreshIntervalSeconds refresh interval after bulk
- * @return this ingest
- * @throws IOException if bulk could not be started
- */
- ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) throws IOException;
-
- /**
- * Stops bulk mode.
- *
- * @param index index
- * @return this Ingest
- * @throws IOException if bulk could not be stopped
- */
- ClientMethods stopBulk(String index) throws IOException;
-
- /**
- * Flush ingest, move all pending documents to the cluster.
- *
- * @return this
- */
- ClientMethods flushIngest();
-
- /**
- * Wait for all outstanding responses.
- *
- * @param maxWaitTime maximum wait time
- * @return this ingest
- * @throws InterruptedException if wait is interrupted
- * @throws ExecutionException if execution failed
- */
- ClientMethods waitForResponses(String maxWaitTime) throws InterruptedException, ExecutionException;
-
- /**
- * Refresh the index.
- *
- * @param index index
- */
- void refreshIndex(String index);
-
- /**
- * Flush the index.
- *
- * @param index index
- */
- void flushIndex(String index);
-
- /**
- * Update replica level.
- *
- * @param index index
- * @param level the replica level
- * @return number of shards after updating replica level
- * @throws IOException if replica could not be updated
- */
- int updateReplicaLevel(String index, int level) throws IOException;
-
- /**
- * Wait for cluster being healthy.
- *
- * @param healthColor cluster health color to wait for
- * @param timeValue time value
- * @throws IOException if wait failed
- */
- void waitForCluster(String healthColor, String timeValue) throws IOException;
-
- /**
- * Get current health color.
- *
- * @return the cluster health color
- */
- String healthColor();
-
- /**
- * Wait for index recovery (after replica change).
- *
- * @param index index
- * @return number of shards found
- * @throws IOException if wait failed
- */
- int waitForRecovery(String index) throws IOException;
-
- /**
- * Resolve alias.
- *
- * @param alias the alias
- * @return one index name behind the alias or the alias if there is no index
- */
- String resolveAlias(String alias);
-
- /**
- * Resolve alias to all connected indices, sort index names with most recent timestamp on top, return this index
- * name.
- *
- * @param alias the alias
- * @return the most recent index name pointing to the alias
- */
- String resolveMostRecentIndex(String alias);
-
- /**
- * Get all alias filters.
- *
- * @param index index
- * @return map of alias filters
- */
- Map getAliasFilters(String index);
-
- /**
- * Switch aliases from one index to another.
- *
- * @param index the index name
- * @param concreteIndex the index name with timestamp
- * @param extraAliases a list of names that should be set as index aliases
- */
- void switchAliases(String index, String concreteIndex, List extraAliases);
-
- /**
- * Switch aliases from one index to another.
- *
- * @param index the index name
- * @param concreteIndex the index name with timestamp
- * @param extraAliases a list of names that should be set as index aliases
- * @param adder an adder method to create alias term queries
- */
- void switchAliases(String index, String concreteIndex, List extraAliases, IndexAliasAdder adder);
-
- /**
- * Retention policy for an index. All indices before timestampdiff should be deleted,
- * but mintokeep indices must be kept.
- *
- * @param index index name
- * @param concreteIndex index name with timestamp
- * @param timestampdiff timestamp delta (for index timestamps)
- * @param mintokeep minimum number of indices to keep
- */
- void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep);
-
- /**
- * Find the timestamp of the most recently indexed document in the index.
- *
- * @param index the index name
- * @param timestampfieldname the timestamp field name
- * @return millis UTC millis of the most recent document
- * @throws IOException if most rcent document can not be found
- */
- Long mostRecentDocument(String index, String timestampfieldname) throws IOException;
-
- /**
- * Get metric.
- *
- * @return metric
- */
- BulkMetric getMetric();
-
- /**
- * Returns true is a throwable exists.
- *
- * @return true if a Throwable exists
- */
- boolean hasThrowable();
-
- /**
- * Return last throwable if exists.
- *
- * @return last throwable
- */
- Throwable getThrowable();
-
- /**
- * Shutdown the ingesting.
- * @throws IOException is shutdown fails
- */
- void shutdown() throws IOException;
-}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java b/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java
deleted file mode 100644
index 4c6fbb8..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java
+++ /dev/null
@@ -1,9 +0,0 @@
-package org.xbib.elasticsearch.client;
-
-import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
-
-@FunctionalInterface
-public interface IndexAliasAdder {
-
- void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias);
-}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java b/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java
deleted file mode 100644
index 2146977..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package org.xbib.elasticsearch.client;
-
-public interface Parameters {
-
- int DEFAULT_MAX_ACTIONS_PER_REQUEST = 1000;
-
- int DEFAULT_MAX_CONCURRENT_REQUESTS = Runtime.getRuntime().availableProcessors();
-
- String DEFAULT_MAX_VOLUME_PER_REQUEST = "10mb";
-
- String DEFAULT_FLUSH_INTERVAL = "30s";
-
- String MAX_ACTIONS_PER_REQUEST = "max_actions_per_request";
-
- String MAX_CONCURRENT_REQUESTS = "max_concurrent_requests";
-
- String MAX_VOLUME_PER_REQUEST = "max_volume_per_request";
-
- String FLUSH_INTERVAL = "flush_interval";
-}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java b/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java
deleted file mode 100644
index c12ecc1..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java
+++ /dev/null
@@ -1,52 +0,0 @@
-package org.xbib.elasticsearch.client;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-public class SimpleBulkControl implements BulkControl {
-
- private final Set indexNames = new HashSet<>();
-
- private final Map startBulkRefreshIntervals = new HashMap<>();
-
- private final Map stopBulkRefreshIntervals = new HashMap<>();
-
- @Override
- public void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval) {
- synchronized (indexNames) {
- indexNames.add(indexName);
- startBulkRefreshIntervals.put(indexName, startRefreshInterval);
- stopBulkRefreshIntervals.put(indexName, stopRefreshInterval);
- }
- }
-
- @Override
- public boolean isBulk(String indexName) {
- return indexNames.contains(indexName);
- }
-
- @Override
- public void finishBulk(String indexName) {
- synchronized (indexNames) {
- indexNames.remove(indexName);
- }
- }
-
- @Override
- public Set indices() {
- return indexNames;
- }
-
- @Override
- public Map getStartBulkRefreshIntervals() {
- return startBulkRefreshIntervals;
- }
-
- @Override
- public Map getStopBulkRefreshIntervals() {
- return stopBulkRefreshIntervals;
- }
-
-}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/package-info.java b/common/src/main/java/org/xbib/elasticsearch/client/package-info.java
deleted file mode 100644
index 941a500..0000000
--- a/common/src/main/java/org/xbib/elasticsearch/client/package-info.java
+++ /dev/null
@@ -1,4 +0,0 @@
-/**
- * Classes for Elasticsearch client.
- */
-package org.xbib.elasticsearch.client;
diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java b/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java
deleted file mode 100644
index bd1c16d..0000000
--- a/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java
+++ /dev/null
@@ -1,60 +0,0 @@
-package org.xbib.elasticsearch.client.common;
-
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
-import org.elasticsearch.action.bulk.BulkAction;
-import org.elasticsearch.action.bulk.BulkRequestBuilder;
-import org.elasticsearch.action.search.SearchRequestBuilder;
-import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.client.Requests;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.search.sort.SortOrder;
-import org.elasticsearch.testframework.ESSingleNodeTestCase;
-
-public class SearchTests extends ESSingleNodeTestCase {
-
- private static final Logger logger = LogManager.getLogger(SearchTests.class.getName());
-
- public void testSearch() throws Exception {
- long t0 = System.currentTimeMillis();
- BulkRequestBuilder builder = new BulkRequestBuilder(client(), BulkAction.INSTANCE);
- for (int i = 0; i < 1000; i++) {
- builder.add(Requests.indexRequest()
- .index("pages").type("row")
- .source(XContentFactory.jsonBuilder()
- .startObject()
- .field("user1", "kimchy")
- .field("user2", "kimchy")
- .field("user3", "kimchy")
- .field("user4", "kimchy")
- .field("user5", "kimchy")
- .field("user6", "kimchy")
- .field("user7", "kimchy")
- .field("user8", "kimchy")
- .field("user9", "kimchy")
- .field("rowcount", i)
- .field("rs", 1234)
- .endObject()));
- }
- client().bulk(builder.request()).actionGet();
- client().admin().indices().refresh(Requests.refreshRequest()).actionGet();
- long t1 = System.currentTimeMillis();
- logger.info("t1-t0 = {}", t1 - t0);
- for (int i = 0; i < 100; i++) {
- t1 = System.currentTimeMillis();
- QueryBuilder queryStringBuilder =
- QueryBuilders.queryStringQuery("rs:" + 1234);
- SearchRequestBuilder requestBuilder = client().prepareSearch()
- .setIndices("pages")
- .setTypes("row")
- .setQuery(queryStringBuilder)
- .addSort("rowcount", SortOrder.DESC)
- .setFrom(i * 10).setSize(10);
- SearchResponse response = requestBuilder.execute().actionGet();
- long t2 = System.currentTimeMillis();
- logger.info("t2-t1 = {}", t2 - t1);
- }
- }
-}
diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java b/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java
deleted file mode 100644
index aeade4b..0000000
--- a/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package org.xbib.elasticsearch.client.common;
-
-import org.elasticsearch.action.support.WriteRequest;
-import org.elasticsearch.client.Requests;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.query.QueryBuilder;
-import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.testframework.ESSingleNodeTestCase;
-
-import java.io.IOException;
-
-public class WildcardTests extends ESSingleNodeTestCase {
-
- public void testWildcard() throws Exception {
- index("1", "010");
- index("2", "0*0");
- // exact
- validateCount(QueryBuilders.queryStringQuery("010").defaultField("field"), 1);
- validateCount(QueryBuilders.queryStringQuery("0\\*0").defaultField("field"), 1);
- // pattern
- validateCount(QueryBuilders.queryStringQuery("0*0").defaultField("field"), 1); // 2?
- validateCount(QueryBuilders.queryStringQuery("0?0").defaultField("field"), 1); // 2?
- validateCount(QueryBuilders.queryStringQuery("0**0").defaultField("field"), 1); // 2?
- validateCount(QueryBuilders.queryStringQuery("0??0").defaultField("field"), 0);
- validateCount(QueryBuilders.queryStringQuery("*10").defaultField("field"), 1);
- validateCount(QueryBuilders.queryStringQuery("*1*").defaultField("field"), 1);
- validateCount(QueryBuilders.queryStringQuery("*\\*0").defaultField("field"), 0); // 1?
- validateCount(QueryBuilders.queryStringQuery("*\\**").defaultField("field"), 0); // 1?
- }
-
- private void index(String id, String fieldValue) throws IOException {
- client().index(Requests.indexRequest()
- .index("index").type("type").id(id)
- .source(XContentFactory.jsonBuilder().startObject().field("field", fieldValue).endObject())
- .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE))
- .actionGet();
- }
-
- private void validateCount(QueryBuilder queryBuilder, long expectedHits) {
- final long actualHits = count(queryBuilder);
- if (actualHits != expectedHits) {
- throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits);
- }
- }
-
- private long count(QueryBuilder queryBuilder) {
- return client().prepareSearch("index").setTypes("type")
- .setQuery(queryBuilder)
- .execute().actionGet().getHits().getTotalHits();
- }
-}
diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java b/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java
deleted file mode 100644
index af3209f..0000000
--- a/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java
+++ /dev/null
@@ -1,4 +0,0 @@
-/**
- * Classes to test Elasticsearch clients.
- */
-package org.xbib.elasticsearch.client.common;
diff --git a/elx-api/build.gradle b/elx-api/build.gradle
new file mode 100644
index 0000000..6ef61b9
--- /dev/null
+++ b/elx-api/build.gradle
@@ -0,0 +1,4 @@
+dependencies {
+ compile "org.xbib:metrics:${project.property('xbib-metrics.version')}"
+ compile "org.xbib.elasticsearch:elasticsearch:${rootProject.property('elasticsearch-server.version')}"
+}
\ No newline at end of file
diff --git a/elx-api/src/main/java/org/xbib/elx/api/BulkController.java b/elx-api/src/main/java/org/xbib/elx/api/BulkController.java
new file mode 100644
index 0000000..69906ca
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/BulkController.java
@@ -0,0 +1,36 @@
+package org.xbib.elx.api;
+
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.common.settings.Settings;
+
+import java.io.Closeable;
+import java.io.Flushable;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+public interface BulkController extends Closeable, Flushable {
+
+ void init(Settings settings);
+
+ Throwable getLastBulkError();
+
+ void startBulkMode(IndexDefinition indexDefinition) throws IOException;
+
+ void startBulkMode(String indexName, long startRefreshIntervalInSeconds,
+ long stopRefreshIntervalInSeconds) throws IOException;
+
+ void index(IndexRequest indexRequest);
+
+ void delete(DeleteRequest deleteRequest);
+
+ void update(UpdateRequest updateRequest);
+
+ boolean waitForResponses(long timeout, TimeUnit timeUnit);
+
+ void stopBulkMode(IndexDefinition indexDefinition) throws IOException;
+
+ void stopBulkMode(String index, long timeout, TimeUnit timeUnit) throws IOException;
+
+}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java b/elx-api/src/main/java/org/xbib/elx/api/BulkMetric.java
similarity index 64%
rename from common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java
rename to elx-api/src/main/java/org/xbib/elx/api/BulkMetric.java
index 8ed03bb..3a406fb 100644
--- a/common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java
+++ b/elx-api/src/main/java/org/xbib/elx/api/BulkMetric.java
@@ -1,9 +1,14 @@
-package org.xbib.elasticsearch.client;
+package org.xbib.elx.api;
+import org.elasticsearch.common.settings.Settings;
import org.xbib.metrics.Count;
import org.xbib.metrics.Metered;
-public interface BulkMetric {
+import java.io.Closeable;
+
+public interface BulkMetric extends Closeable {
+
+ void init(Settings settings);
Metered getTotalIngest();
@@ -19,9 +24,9 @@ public interface BulkMetric {
Count getFailed();
+ long elapsed();
+
void start();
void stop();
-
- long elapsed();
}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java b/elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java
new file mode 100644
index 0000000..4d38d1c
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java
@@ -0,0 +1,64 @@
+package org.xbib.elx.api;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+
+import java.io.Closeable;
+import java.io.Flushable;
+import java.util.concurrent.TimeUnit;
+
+public interface BulkProcessor extends Closeable, Flushable {
+
+ BulkProcessor add(ActionRequest request);
+
+ BulkProcessor add(ActionRequest request, Object payload);
+
+ boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException;
+
+ boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
+
+ interface BulkRequestHandler {
+
+ void execute(BulkRequest bulkRequest, long executionId);
+
+ boolean close(long timeout, TimeUnit unit) throws InterruptedException;
+
+ }
+
+ /**
+ * A listener for the execution.
+ */
+ public interface Listener {
+
+ /**
+ * Callback before the bulk is executed.
+ *
+ * @param executionId execution ID
+ * @param request request
+ */
+ void beforeBulk(long executionId, BulkRequest request);
+
+ /**
+ * Callback after a successful execution of bulk request.
+ *
+ * @param executionId execution ID
+ * @param request request
+ * @param response response
+ */
+ void afterBulk(long executionId, BulkRequest request, BulkResponse response);
+
+ /**
+ * Callback after a failed execution of bulk request.
+ *
+ * Note that in case an instance of InterruptedException
is passed, which means that request
+ * processing has been
+ * cancelled externally, the thread's interruption status has been restored prior to calling this method.
+ *
+ * @param executionId execution ID
+ * @param request request
+ * @param failure failure
+ */
+ void afterBulk(long executionId, BulkRequest request, Throwable failure);
+ }
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/ExtendedClient.java b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClient.java
new file mode 100644
index 0000000..e08f90a
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClient.java
@@ -0,0 +1,480 @@
+package org.xbib.elx.api;
+
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+
+import java.io.Closeable;
+import java.io.Flushable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Interface for extended managing and indexing methods of an Elasticsearch client.
+ */
+public interface ExtendedClient extends Flushable, Closeable {
+
+ /**
+ * Set an Elasticsearch client to extend from it. May be null for TransportClient.
+ * @param client client
+ * @return this client
+ */
+ ExtendedClient setClient(ElasticsearchClient client);
+
+ /**
+ * Return Elasticsearch client.
+ *
+ * @return Elasticsearch client
+ */
+ ElasticsearchClient getClient();
+
+ /**
+ * Get bulk metric.
+ * @return the bulk metric
+ */
+ BulkMetric getBulkMetric();
+
+ /**
+ * Get buulk control.
+ * @return the bulk control
+ */
+ BulkController getBulkController();
+
+ /**
+ * Initiative the extended client, the bulk metric and bulk controller,
+ * creates instances and connect to cluster, if required.
+ *
+ * @param settings settings
+ * @return this client
+ * @throws IOException if init fails
+ */
+ ExtendedClient init(Settings settings) throws IOException;
+
+ /**
+ * Build index definition from settings.
+ *
+ * @param index the index name
+ * @param settings the settings for the index
+ * @return index definition
+ * @throws IOException if settings/mapping URL is invalid/malformed
+ */
+ IndexDefinition buildIndexDefinitionFromSettings(String index, Settings settings) throws IOException;
+
+ /**
+ * Add index request. Each request will be added to a queue for bulking requests.
+ * Submitting request will be done when limits are exceeded.
+ *
+ * @param index the index
+ * @param id the id
+ * @param create true if document must be created
+ * @param source the source
+ * @return this
+ */
+ ExtendedClient index(String index, String id, boolean create, BytesReference source);
+
+ /**
+ * Index request. Each request will be added to a queue for bulking requests.
+ * Submitting request will be done when limits are exceeded.
+ *
+ * @param index the index
+ * @param id the id
+ * @param create true if document is to be created, false otherwise
+ * @param source the source
+ * @return this client methods
+ */
+ ExtendedClient index(String index, String id, boolean create, String source);
+
+ /**
+ * Index request. Each request will be added to a queue for bulking requests.
+ * Submitting request will be done when bulk limits are exceeded.
+ *
+ * @param indexRequest the index request to add
+ * @return this
+ */
+ ExtendedClient index(IndexRequest indexRequest);
+
+ /**
+ * Delete request.
+ *
+ * @param index the index
+ * @param id the id
+ * @return this
+ */
+ ExtendedClient delete(String index, String id);
+
+ /**
+ * Delete request. Each request will be added to a queue for bulking requests.
+ * Submitting request will be done when bulk limits are exceeded.
+ *
+ * @param deleteRequest the delete request to add
+ * @return this
+ */
+ ExtendedClient delete(DeleteRequest deleteRequest);
+
+ /**
+ * Bulked update request. Each request will be added to a queue for bulking requests.
+ * Submitting request will be done when bulk limits are exceeded.
+ * Note that updates only work correctly when all operations between nodes are synchronized.
+ *
+ * @param index the index
+ * @param id the id
+ * @param source the source
+ * @return this
+ */
+ ExtendedClient update(String index, String id, BytesReference source);
+
+ /**
+ * Update document. Use with precaution! Does not work in all cases.
+ *
+ * @param index the index
+ * @param id the id
+ * @param source the source
+ * @return this
+ */
+ ExtendedClient update(String index, String id, String source);
+
+ /**
+ * Bulked update request. Each request will be added to a queue for bulking requests.
+ * Submitting request will be done when bulk limits are exceeded.
+ * Note that updates only work correctly when all operations between nodes are synchronized.
+ *
+ * @param updateRequest the update request to add
+ * @return this
+ */
+ ExtendedClient update(UpdateRequest updateRequest);
+
+ /**
+ * Create a new index.
+ *
+ * @param index index
+ * @return this
+ * @throws IOException if new index creation fails
+ */
+ ExtendedClient newIndex(String index) throws IOException;
+
+ /**
+ * Create a new index.
+ *
+ * @param index index
+ * @param settings settings
+ * @param mapping mapping
+ * @return this
+ * @throws IOException if settings/mapping is invalid or index creation fails
+ */
+ ExtendedClient newIndex(String index, InputStream settings, InputStream mapping) throws IOException;
+
+ /**
+ * Create a new index.
+ *
+ * @param index index
+ * @param settings settings
+ * @return this
+ * @throws IOException if settings is invalid or index creation fails
+ */
+ ExtendedClient newIndex(String index, Settings settings) throws IOException;
+
+ /**
+ * Create a new index.
+ *
+ * @param index index
+ * @param settings settings
+ * @param mapping mapping
+ * @return this
+ * @throws IOException if settings/mapping is invalid or index creation fails
+ */
+ ExtendedClient newIndex(String index, Settings settings, String mapping) throws IOException;
+
+ /**
+ * Create a new index.
+ *
+ * @param index index
+ * @param settings settings
+ * @param mapping mapping
+ * @return this
+ * @throws IOException if settings/mapping is invalid or index creation fails
+ */
+ ExtendedClient newIndex(String index, Settings settings, Map mapping) throws IOException;
+
+ /**
+ * Create a new index.
+ * @param indexDefinition the index definition
+ * @return this
+ * @throws IOException if settings/mapping is invalid or index creation fails
+ */
+ ExtendedClient newIndex(IndexDefinition indexDefinition) throws IOException;
+
+ /**
+ * Delete an index.
+ * @param indexDefinition the index definition
+ * @return this
+ */
+ ExtendedClient deleteIndex(IndexDefinition indexDefinition);
+
+ /**
+ * Delete an index.
+ *
+ * @param index index
+ * @return this
+ */
+ ExtendedClient deleteIndex(String index);
+
+ /**
+ * Start bulk mode for indexes.
+ * @param indexDefinition index definition
+ * @return this
+ * @throws IOException if bulk could not be started
+ */
+ ExtendedClient startBulk(IndexDefinition indexDefinition) throws IOException;
+
+ /**
+ * Start bulk mode.
+ *
+ * @param index index
+ * @param startRefreshIntervalSeconds refresh interval before bulk
+ * @param stopRefreshIntervalSeconds refresh interval after bulk
+ * @return this
+ * @throws IOException if bulk could not be started
+ */
+ ExtendedClient startBulk(String index, long startRefreshIntervalSeconds,
+ long stopRefreshIntervalSeconds) throws IOException;
+
+ /**
+ * Stop bulk mode.
+ *
+ * @param indexDefinition index definition
+ * @return this
+ * @throws IOException if bulk could not be startet
+ */
+ ExtendedClient stopBulk(IndexDefinition indexDefinition) throws IOException;
+
+ /**
+ * Stops bulk mode.
+ *
+ * @param index index
+ * @param timeout maximum wait time
+ * @param timeUnit time unit for timeout
+ * @return this
+ * @throws IOException if bulk could not be stopped
+ */
+ ExtendedClient stopBulk(String index, long timeout, TimeUnit timeUnit) throws IOException;
+
+ /**
+ * Update replica level.
+ * @param indexDefinition the index definition
+ * @param level the replica level
+ * @return this
+ * @throws IOException if replica setting could not be updated
+ */
+ ExtendedClient updateReplicaLevel(IndexDefinition indexDefinition, int level) throws IOException;
+
+ /**
+ * Update replica level.
+ *
+ * @param index index
+ * @param level the replica level
+ * @param maxWaitTime maximum wait time
+ * @param timeUnit time unit
+ * @return this
+ * @throws IOException if replica setting could not be updated
+ */
+ ExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) throws IOException;
+
+ /**
+ * Get replica level.
+ * @param indexDefinition the index name
+ * @return the replica level of the index
+ */
+ int getReplicaLevel(IndexDefinition indexDefinition);
+
+ /**
+ * Get replica level.
+ * @param index the index name
+ * @return the replica level of the index
+ */
+ int getReplicaLevel(String index);
+
+ /**
+ * Refresh the index.
+ *
+ * @param index index
+ * @return this
+ */
+ ExtendedClient refreshIndex(String index);
+
+ /**
+ * Flush the index. The cluster clears cache and completes indexing.
+ *
+ * @param index index
+ * @return this
+ */
+ ExtendedClient flushIndex(String index);
+
+ /**
+ * Force segment merge of an index.
+ * @param indexDefinition th eindex definition
+ * @return this
+ */
+ boolean forceMerge(IndexDefinition indexDefinition);
+
+ /**
+ * Force segment merge of an index.
+ * @param index the index
+ * @param maxWaitTime maximum wait time
+ * @param timeUnit time unit
+ * @return this
+ */
+ boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit);
+
+ /**
+ * Wait for all outstanding bulk responses.
+ *
+ * @param timeout maximum wait time
+ * @param timeUnit unit of timeout value
+ * @return true if wait succeeded, false if wait timed out
+ */
+ boolean waitForResponses(long timeout, TimeUnit timeUnit);
+
+ /**
+ * Wait for cluster being healthy.
+ *
+ * @param healthColor cluster health color to wait for
+ * @param maxWaitTime time value
+ * @param timeUnit time unit
+ * @return true if wait succeeded, false if wait timed out
+ */
+ boolean waitForCluster(String healthColor, long maxWaitTime, TimeUnit timeUnit);
+
+ /**
+ * Get current health color.
+ *
+ * @param maxWaitTime maximum wait time
+ * @param timeUnit time unit
+ * @return the cluster health color
+ */
+ String getHealthColor(long maxWaitTime, TimeUnit timeUnit);
+
+ /**
+ * Wait for index recovery (after replica change).
+ *
+ * @param index index
+ * @param maxWaitTime maximum wait time
+ * @param timeUnit time unit
+ * @return true if wait succeeded, false if wait timed out
+ */
+ boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit);
+
+ /**
+ * Update index setting.
+ * @param index the index
+ * @param key the key of the value to be updated
+ * @param value the new value
+ * @param timeout timeout
+ * @param timeUnit time unit
+ * @throws IOException if update index setting failed
+ */
+ void updateIndexSetting(String index, String key, Object value, long timeout, TimeUnit timeUnit) throws IOException;
+
+ /**
+ * Resolve alias.
+ *
+ * @param alias the alias
+ * @return this index name behind the alias or the alias if there is no index
+ */
+ String resolveAlias(String alias);
+
+ /**
+ * Resolve alias to all connected indices, sort index names with most recent timestamp on top, return this index
+ * name.
+ *
+ * @param alias the alias
+ * @return the most recent index name pointing to the alias
+ */
+ String resolveMostRecentIndex(String alias);
+
+ /**
+ * Get all index filters.
+ * @param index the index
+ * @return map of index filters
+ */
+ Map getAliases(String index);
+
+ /**
+ * Shift from one index to another.
+ * @param indexDefinition the index definition
+ * @param additionalAliases new aliases
+ * @return this
+ */
+ IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List additionalAliases);
+
+ /**
+ * Shift from one index to another.
+ * @param indexDefinition the index definition
+ * @param additionalAliases new aliases
+ * @param indexAliasAdder method to add aliases
+ * @return this
+ */
+ IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List additionalAliases,
+ IndexAliasAdder indexAliasAdder);
+
+ /**
+ * Shift from one index to another.
+ * @param index the index name
+ * @param fullIndexName the index name with timestamp
+ * @param additionalAliases a list of names that should be set as index aliases
+ * @return this
+ */
+ IndexShiftResult shiftIndex(String index, String fullIndexName, List additionalAliases);
+
+ /**
+ * Shift from one index to another.
+ * @param index the index name
+ * @param fullIndexName the index name with timestamp
+ * @param additionalAliases a list of names that should be set as index aliases
+ * @param adder an adder method to create alias term queries
+ * @return this
+ */
+ IndexShiftResult shiftIndex(String index, String fullIndexName, List additionalAliases,
+ IndexAliasAdder adder);
+
+ /**
+ * Prune index.
+ * @param indexDefinition the index definition
+ * @return the index prune result
+ */
+ IndexPruneResult pruneIndex(IndexDefinition indexDefinition);
+
+ /**
+ * Apply retention policy to prune indices. All indices before delta should be deleted,
+ * but the number of mintokeep indices must be kept.
+ *
+ * @param index index name
+ * @param fullIndexName index name with timestamp
+ * @param delta timestamp delta (for index timestamps)
+ * @param mintokeep minimum number of indices to keep
+ * @param perform true if pruning should be executed, false if not
+ * @return the index prune result
+ */
+ IndexPruneResult pruneIndex(String index, String fullIndexName, int delta, int mintokeep, boolean perform);
+
+ /**
+ * Find the timestamp of the most recently indexed document in the index.
+ *
+ * @param index the index name
+ * @param timestampfieldname the timestamp field name
+ * @return millis UTC millis of the most recent document
+ * @throws IOException if most rcent document can not be found
+ */
+ Long mostRecentDocument(String index, String timestampfieldname) throws IOException;
+
+ /**
+ * Get cluster name.
+ * @return the cluster name
+ */
+ String getClusterName();
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/ExtendedClientProvider.java b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClientProvider.java
new file mode 100644
index 0000000..2a8904a
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClientProvider.java
@@ -0,0 +1,7 @@
+package org.xbib.elx.api;
+
+@FunctionalInterface
+public interface ExtendedClientProvider {
+
+ C getExtendedClient();
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexAliasAdder.java b/elx-api/src/main/java/org/xbib/elx/api/IndexAliasAdder.java
new file mode 100644
index 0000000..03dd6e6
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/IndexAliasAdder.java
@@ -0,0 +1,9 @@
+package org.xbib.elx.api;
+
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+
+@FunctionalInterface
+public interface IndexAliasAdder {
+
+ void addIndexAlias(IndicesAliasesRequest requwst, String index, String alias);
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexDefinition.java b/elx-api/src/main/java/org/xbib/elx/api/IndexDefinition.java
new file mode 100644
index 0000000..49544a7
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/IndexDefinition.java
@@ -0,0 +1,70 @@
+package org.xbib.elx.api;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.concurrent.TimeUnit;
+
+public interface IndexDefinition {
+
+ IndexDefinition setIndex(String index);
+
+ String getIndex();
+
+ IndexDefinition setFullIndexName(String fullIndexName);
+
+ String getFullIndexName();
+
+ IndexDefinition setSettingsUrl(String settingsUrlString) throws MalformedURLException;
+
+ IndexDefinition setSettingsUrl(URL settingsUrl);
+
+ URL getSettingsUrl();
+
+ IndexDefinition setMappingsUrl(String mappingsUrlString) throws MalformedURLException;
+
+ IndexDefinition setMappingsUrl(URL mappingsUrl);
+
+ URL getMappingsUrl();
+
+ IndexDefinition setDateTimePattern(String timeWindow);
+
+ String getDateTimePattern();
+
+ IndexDefinition setEnabled(boolean enabled);
+
+ boolean isEnabled();
+
+ IndexDefinition setIgnoreErrors(boolean ignoreErrors);
+
+ boolean ignoreErrors();
+
+ IndexDefinition setShift(boolean shift);
+
+ boolean isShiftEnabled();
+
+ IndexDefinition setForceMerge(boolean hasForceMerge);
+
+ boolean hasForceMerge();
+
+ IndexDefinition setReplicaLevel(int replicaLevel);
+
+ int getReplicaLevel();
+
+ IndexDefinition setRetention(IndexRetention indexRetention);
+
+ IndexRetention getRetention();
+
+ IndexDefinition setMaxWaitTime(long maxWaitTime, TimeUnit timeUnit);
+
+ long getMaxWaitTime();
+
+ TimeUnit getMaxWaitTimeUnit();
+
+ IndexDefinition setStartRefreshInterval(long seconds);
+
+ long getStartRefreshInterval();
+
+ IndexDefinition setStopRefreshInterval(long seconds);
+
+ long getStopRefreshInterval();
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java b/elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java
new file mode 100644
index 0000000..0c118f8
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java
@@ -0,0 +1,16 @@
+package org.xbib.elx.api;
+
+import java.util.List;
+
+public interface IndexPruneResult {
+
+ enum State { NOTHING_TO_DO, SUCCESS, NONE };
+
+ State getState();
+
+ List getCandidateIndices();
+
+ List getDeletedIndices();
+
+ boolean isAcknowledged();
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexRetention.java b/elx-api/src/main/java/org/xbib/elx/api/IndexRetention.java
new file mode 100644
index 0000000..44116e2
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/IndexRetention.java
@@ -0,0 +1,13 @@
+package org.xbib.elx.api;
+
+public interface IndexRetention {
+
+ IndexRetention setDelta(int delta);
+
+ int getDelta();
+
+ IndexRetention setMinToKeep(int minToKeep);
+
+ int getMinToKeep();
+
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java b/elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java
new file mode 100644
index 0000000..02a2e8c
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java
@@ -0,0 +1,10 @@
+package org.xbib.elx.api;
+
+import java.util.List;
+
+public interface IndexShiftResult {
+
+ List getMovedAliases();
+
+ List getNewAliases();
+}
diff --git a/elx-api/src/main/java/org/xbib/elx/api/package-info.java b/elx-api/src/main/java/org/xbib/elx/api/package-info.java
new file mode 100644
index 0000000..03fd0e3
--- /dev/null
+++ b/elx-api/src/main/java/org/xbib/elx/api/package-info.java
@@ -0,0 +1,4 @@
+/**
+ * The API of the extended Elasticsearch clients.
+ */
+package org.xbib.elx.api;
diff --git a/elx-common/build.gradle b/elx-common/build.gradle
new file mode 100644
index 0000000..4336a23
--- /dev/null
+++ b/elx-common/build.gradle
@@ -0,0 +1,5 @@
+dependencies{
+ compile project(':elx-api')
+ testCompile "org.xbib.elasticsearch:elasticsearch-analysis-common:${rootProject.property('elasticsearch-server.version')}"
+ testCompile "org.xbib.elasticsearch:transport-netty4:${rootProject.property('elasticsearch-server.version')}"
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/AbstractExtendedClient.java b/elx-common/src/main/java/org/xbib/elx/common/AbstractExtendedClient.java
new file mode 100644
index 0000000..78cdce0
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/AbstractExtendedClient.java
@@ -0,0 +1,1097 @@
+package org.xbib.elx.common;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushAction;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
+import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
+import org.elasticsearch.action.admin.indices.get.GetIndexAction;
+import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.AliasOrIndex;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.DeprecationHandler;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.xbib.elx.api.BulkController;
+import org.xbib.elx.api.BulkMetric;
+import org.xbib.elx.api.ExtendedClient;
+import org.xbib.elx.api.IndexAliasAdder;
+import org.xbib.elx.api.IndexDefinition;
+import org.xbib.elx.api.IndexPruneResult;
+import org.xbib.elx.api.IndexRetention;
+import org.xbib.elx.api.IndexShiftResult;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.time.LocalDate;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+public abstract class AbstractExtendedClient implements ExtendedClient {
+
+ private static final Logger logger = LogManager.getLogger(AbstractExtendedClient.class.getName());
+
+ /**
+ * The one and only index type name used in the extended client.
+ * Notr that all Elasticsearch version < 6.2.0 do not allow a prepending "_".
+ */
+ private static final String TYPE_NAME = "doc";
+
+ /**
+ * The Elasticsearch client.
+ */
+ private ElasticsearchClient client;
+
+ private BulkMetric bulkMetric;
+
+ private BulkController bulkController;
+
+ private AtomicBoolean closed;
+
+ private static final IndexShiftResult EMPTY_INDEX_SHIFT_RESULT = new IndexShiftResult() {
+ @Override
+ public List getMovedAliases() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List getNewAliases() {
+ return Collections.emptyList();
+ }
+ };
+
+ private static final IndexPruneResult EMPTY_INDEX_PRUNE_RESULT = new IndexPruneResult() {
+ @Override
+ public State getState() {
+ return State.NONE;
+ }
+
+ @Override
+ public List getCandidateIndices() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public List getDeletedIndices() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public boolean isAcknowledged() {
+ return false;
+ }
+ };
+
+ protected abstract ElasticsearchClient createClient(Settings settings) throws IOException;
+
+ protected abstract void closeClient() throws IOException;
+
+ protected AbstractExtendedClient() {
+ closed = new AtomicBoolean(false);
+ }
+
+ @Override
+ public AbstractExtendedClient setClient(ElasticsearchClient client) {
+ this.client = client;
+ return this;
+ }
+
+ @Override
+ public ElasticsearchClient getClient() {
+ return client;
+ }
+
+ @Override
+ public BulkMetric getBulkMetric() {
+ return bulkMetric;
+ }
+
+ @Override
+ public BulkController getBulkController() {
+ return bulkController;
+ }
+
+ @Override
+ public AbstractExtendedClient init(Settings settings) throws IOException {
+ if (client == null) {
+ client = createClient(settings);
+ }
+ if (bulkMetric == null) {
+ this.bulkMetric = new DefaultBulkMetric();
+ this.bulkMetric.init(settings);
+ }
+ if (bulkController == null) {
+ this.bulkController = new DefaultBulkController(this, bulkMetric);
+ this.bulkController.init(settings);
+ }
+ return this;
+ }
+
+ @Override
+ public void flush() throws IOException {
+ if (bulkController != null) {
+ bulkController.flush();
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ ensureActive();
+ if (closed.compareAndSet(false, true)) {
+ if (bulkMetric != null) {
+ logger.info("closing bulk metric");
+ bulkMetric.close();
+ }
+ if (bulkController != null) {
+ logger.info("closing bulk controller");
+ bulkController.close();
+ }
+ closeClient();
+ }
+ }
+
+ @Override
+ public String getClusterName() {
+ ensureActive();
+ try {
+ ClusterStateRequest clusterStateRequest = new ClusterStateRequest().all();
+ ClusterStateResponse clusterStateResponse =
+ client.execute(ClusterStateAction.INSTANCE, clusterStateRequest).actionGet();
+ return clusterStateResponse.getClusterName().value();
+ } catch (ElasticsearchTimeoutException e) {
+ logger.warn(e.getMessage(), e);
+ return "TIMEOUT";
+ } catch (NoNodeAvailableException e) {
+ logger.warn(e.getMessage(), e);
+ return "DISCONNECTED";
+ } catch (Exception e) {
+ logger.warn(e.getMessage(), e);
+ return "[" + e.getMessage() + "]";
+ }
+ }
+
+ @Override
+ public ExtendedClient newIndex(IndexDefinition indexDefinition) throws IOException {
+ ensureActive();
+ waitForCluster("YELLOW", 30L, TimeUnit.SECONDS);
+ URL indexSettings = indexDefinition.getSettingsUrl();
+ if (indexSettings == null) {
+ logger.warn("warning while creating index '{}', no settings/mappings",
+ indexDefinition.getFullIndexName());
+ newIndex(indexDefinition.getFullIndexName());
+ return this;
+ }
+ URL indexMappings = indexDefinition.getMappingsUrl();
+ if (indexMappings == null) {
+ logger.warn("warning while creating index '{}', no mappings",
+ indexDefinition.getFullIndexName());
+ newIndex(indexDefinition.getFullIndexName(), indexSettings.openStream(), null);
+ return this;
+ }
+ try (InputStream indexSettingsInput = indexSettings.openStream();
+ InputStream indexMappingsInput = indexMappings.openStream()) {
+ newIndex(indexDefinition.getFullIndexName(), indexSettingsInput, indexMappingsInput);
+ } catch (IOException e) {
+ if (indexDefinition.ignoreErrors()) {
+ logger.warn(e.getMessage(), e);
+ logger.warn("warning while creating index '{}' with settings at {} and mappings at {}",
+ indexDefinition.getFullIndexName(), indexSettings, indexMappings);
+ } else {
+ logger.error("error while creating index '{}' with settings at {} and mappings at {}",
+ indexDefinition.getFullIndexName(), indexSettings, indexMappings);
+ throw new IOException(e);
+ }
+ }
+ return this;
+ }
+
+ @Override
+ public ExtendedClient newIndex(String index) throws IOException {
+ return newIndex(index, Settings.EMPTY, (Map) null);
+ }
+
+ @Override
+ public ExtendedClient newIndex(String index, InputStream settings, InputStream mapping) throws IOException {
+ return newIndex(index,
+ Settings.builder().loadFromStream(".json", settings, true).build(),
+ mapping != null ? JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY,
+ DeprecationHandler.THROW_UNSUPPORTED_OPERATION, mapping).mapOrdered() : null);
+ }
+
+ @Override
+ public ExtendedClient newIndex(String index, Settings settings) throws IOException {
+ return newIndex(index, settings, (Map) null);
+ }
+
+ @Override
+ public ExtendedClient newIndex(String index, Settings settings, String mapping) throws IOException {
+ return newIndex(index, settings,
+ mapping != null ? JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY,
+ DeprecationHandler.THROW_UNSUPPORTED_OPERATION, mapping).mapOrdered() : null);
+ }
+
+ @Override
+ public ExtendedClient newIndex(String index, Settings settings, Map mapping) throws IOException {
+ ensureActive();
+ if (index == null) {
+ logger.warn("no index name given to create index");
+ return this;
+ }
+ CreateIndexRequest createIndexRequest = new CreateIndexRequest().index(index);
+ if (settings != null) {
+ createIndexRequest.settings(settings);
+ }
+ if (mapping != null) {
+ createIndexRequest.mapping(TYPE_NAME, mapping);
+ }
+ CreateIndexResponse createIndexResponse = client.execute(CreateIndexAction.INSTANCE, createIndexRequest).actionGet();
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ logger.info("index {} created: {}", index,
+ Strings.toString(createIndexResponse.toXContent(builder, ToXContent.EMPTY_PARAMS)));
+ return this;
+ }
+
+ @Override
+ public ExtendedClient deleteIndex(IndexDefinition indexDefinition) {
+ return deleteIndex(indexDefinition.getFullIndexName());
+ }
+
+ @Override
+ public ExtendedClient deleteIndex(String index) {
+ ensureActive();
+ if (index == null) {
+ logger.warn("no index name given to delete index");
+ return this;
+ }
+ DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest().indices(index);
+ client.execute(DeleteIndexAction.INSTANCE, deleteIndexRequest).actionGet();
+ return this;
+ }
+
+ @Override
+ public ExtendedClient startBulk(IndexDefinition indexDefinition) throws IOException {
+ startBulk(indexDefinition.getFullIndexName(), -1, 1);
+ return this;
+ }
+
+ @Override
+ public ExtendedClient startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds)
+ throws IOException {
+ if (bulkController != null) {
+ ensureActive();
+ bulkController.startBulkMode(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds);
+ }
+ return this;
+ }
+
+ @Override
+ public ExtendedClient stopBulk(IndexDefinition indexDefinition) throws IOException {
+ if (bulkController != null) {
+ ensureActive();
+ bulkController.stopBulkMode(indexDefinition);
+ }
+ return this;
+ }
+
+ @Override
+ public ExtendedClient stopBulk(String index, long timeout, TimeUnit timeUnit) throws IOException {
+ if (bulkController != null) {
+ ensureActive();
+ bulkController.stopBulkMode(index, timeout, timeUnit);
+ }
+ return this;
+ }
+
+ @Override
+ public ExtendedClient index(String index, String id, boolean create, String source) {
+ return index(new IndexRequest(index, TYPE_NAME, id).create(create)
+ .source(source.getBytes(StandardCharsets.UTF_8), XContentType.JSON));
+ }
+
+ @Override
+ public ExtendedClient index(String index, String id, boolean create, BytesReference source) {
+ return index(new IndexRequest(index, TYPE_NAME, id).create(create)
+ .source(source, XContentType.JSON));
+ }
+
+ @Override
+ public ExtendedClient index(IndexRequest indexRequest) {
+ ensureActive();
+ bulkController.index(indexRequest);
+ return this;
+ }
+
+ @Override
+ public ExtendedClient delete(String index, String id) {
+ return delete(new DeleteRequest(index, TYPE_NAME, id));
+ }
+
+ @Override
+ public ExtendedClient delete(DeleteRequest deleteRequest) {
+ ensureActive();
+ bulkController.delete(deleteRequest);
+ return this;
+ }
+
+ @Override
+ public ExtendedClient update(String index, String id, BytesReference source) {
+ return update(new UpdateRequest(index, TYPE_NAME, id)
+ .doc(source, XContentType.JSON));
+ }
+
+ @Override
+ public ExtendedClient update(String index, String id, String source) {
+ return update(new UpdateRequest(index, TYPE_NAME, id)
+ .doc(source.getBytes(StandardCharsets.UTF_8), XContentType.JSON));
+ }
+
+ @Override
+ public ExtendedClient update(UpdateRequest updateRequest) {
+ ensureActive();
+ bulkController.update(updateRequest);
+ return this;
+ }
+
+ @Override
+ public boolean waitForResponses(long timeout, TimeUnit timeUnit) {
+ ensureActive();
+ return bulkController.waitForResponses(timeout, timeUnit);
+ }
+
+ @Override
+ public boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit) {
+ ensureActive();
+ ensureIndexGiven(index);
+ RecoveryRequest recoveryRequest = new RecoveryRequest();
+ recoveryRequest.indices(index);
+ recoveryRequest.activeOnly(true);
+ RecoveryResponse response = client.execute(RecoveryAction.INSTANCE, recoveryRequest).actionGet();
+ int shards = response.getTotalShards();
+ TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
+ ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest()
+ .indices(index)
+ .waitForActiveShards(shards).timeout(timeout);
+ ClusterHealthResponse healthResponse =
+ client.execute(ClusterHealthAction.INSTANCE, clusterHealthRequest).actionGet();
+ if (healthResponse != null && healthResponse.isTimedOut()) {
+ logger.error("timeout waiting for recovery");
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public boolean waitForCluster(String statusString, long maxWaitTime, TimeUnit timeUnit) {
+ ensureActive();
+ ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString);
+ TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
+ ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE,
+ new ClusterHealthRequest().timeout(timeout).waitForStatus(status)).actionGet();
+ if (healthResponse != null && healthResponse.isTimedOut()) {
+ if (logger.isErrorEnabled()) {
+ logger.error("timeout, cluster state is " + healthResponse.getStatus().name() + " and not " + status.name());
+ }
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public String getHealthColor(long maxWaitTime, TimeUnit timeUnit) {
+ ensureActive();
+ try {
+ TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
+ ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE,
+ new ClusterHealthRequest().timeout(timeout)).actionGet();
+ ClusterHealthStatus status = healthResponse.getStatus();
+ return status.name();
+ } catch (ElasticsearchTimeoutException e) {
+ logger.warn(e.getMessage(), e);
+ return "TIMEOUT";
+ } catch (NoNodeAvailableException e) {
+ logger.warn(e.getMessage(), e);
+ return "DISCONNECTED";
+ } catch (Exception e) {
+ logger.warn(e.getMessage(), e);
+ return "[" + e.getMessage() + "]";
+ }
+ }
+
+ @Override
+ public ExtendedClient updateReplicaLevel(IndexDefinition indexDefinition, int level) throws IOException {
+ return updateReplicaLevel(indexDefinition.getFullIndexName(), level,
+ indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit());
+ }
+
+ @Override
+ public ExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) throws IOException {
+ waitForCluster("YELLOW", maxWaitTime, timeUnit); // let cluster settle down from critical operations
+ if (level > 0) {
+ updateIndexSetting(index, "number_of_replicas", level, maxWaitTime, timeUnit);
+ waitForRecovery(index, maxWaitTime, timeUnit);
+ }
+ return this;
+ }
+
+ @Override
+ public int getReplicaLevel(IndexDefinition indexDefinition) {
+ return getReplicaLevel(indexDefinition.getFullIndexName());
+ }
+
+ @Override
+ public int getReplicaLevel(String index) {
+ GetSettingsRequest request = new GetSettingsRequest().indices(index);
+ GetSettingsResponse response = client.execute(GetSettingsAction.INSTANCE, request).actionGet();
+ int replica = -1;
+ for (ObjectObjectCursor cursor : response.getIndexToSettings()) {
+ Settings settings = cursor.value;
+ if (index.equals(cursor.key)) {
+ replica = settings.getAsInt("index.number_of_replicas", null);
+ }
+ }
+ return replica;
+ }
+
+ @Override
+ public ExtendedClient flushIndex(String index) {
+ if (index != null) {
+ ensureActive();
+ client.execute(FlushAction.INSTANCE, new FlushRequest(index)).actionGet();
+ }
+ return this;
+ }
+
+ @Override
+ public ExtendedClient refreshIndex(String index) {
+ if (index != null) {
+ ensureActive();
+ client.execute(RefreshAction.INSTANCE, new RefreshRequest(index)).actionGet();
+ }
+ return this;
+ }
+
+ @Override
+ public String resolveMostRecentIndex(String alias) {
+ ensureActive();
+ if (alias == null) {
+ return null;
+ }
+ GetAliasesRequest getAliasesRequest = new GetAliasesRequest().aliases(alias);
+ GetAliasesResponse getAliasesResponse = client.execute(GetAliasesAction.INSTANCE, getAliasesRequest).actionGet();
+ Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
+ Set indices = new TreeSet<>(Collections.reverseOrder());
+ for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) {
+ Matcher m = pattern.matcher(indexName.value);
+ if (m.matches() && alias.equals(m.group(1))) {
+ indices.add(indexName.value);
+ }
+ }
+ return indices.isEmpty() ? alias : indices.iterator().next();
+ }
+
+ @Override
+ public Map getAliases(String index) {
+ if (index == null) {
+ return Collections.emptyMap();
+ }
+ GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index);
+ return getFilters(client.execute(GetAliasesAction.INSTANCE, getAliasesRequest).actionGet());
+ }
+
+ @Override
+ public String resolveAlias(String alias) {
+ ensureActive();
+ ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
+ clusterStateRequest.metaData(true);
+ ClusterStateResponse clusterStateResponse =
+ client.execute(ClusterStateAction.INSTANCE, clusterStateRequest).actionGet();
+ SortedMap map = clusterStateResponse.getState().getMetaData().getAliasAndIndexLookup();
+ AliasOrIndex aliasOrIndex = map.get(alias);
+ return aliasOrIndex != null ? aliasOrIndex.getIndices().iterator().next().getIndex().getName() : null;
+ }
+
+ @Override
+ public IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List additionalAliases) {
+ return shiftIndex(indexDefinition, additionalAliases, null);
+ }
+
+ @Override
+ public IndexShiftResult shiftIndex(IndexDefinition indexDefinition,
+ List additionalAliases, IndexAliasAdder indexAliasAdder) {
+ if (additionalAliases == null) {
+ return EMPTY_INDEX_SHIFT_RESULT;
+ }
+ if (indexDefinition.isShiftEnabled()) {
+ return shiftIndex(indexDefinition.getIndex(),
+ indexDefinition.getFullIndexName(), additionalAliases.stream()
+ .filter(a -> a != null && !a.isEmpty())
+ .collect(Collectors.toList()), indexAliasAdder);
+ }
+ return EMPTY_INDEX_SHIFT_RESULT;
+ }
+
+ @Override
+ public IndexShiftResult shiftIndex(String index, String fullIndexName, List additionalAliases) {
+ return shiftIndex(index, fullIndexName, additionalAliases, null);
+ }
+
+ @Override
+ public IndexShiftResult shiftIndex(String index, String fullIndexName,
+ List additionalAliases, IndexAliasAdder adder) {
+ ensureActive();
+ if (index == null) {
+ return EMPTY_INDEX_SHIFT_RESULT; // nothing to shift to
+ }
+ if (index.equals(fullIndexName)) {
+ return EMPTY_INDEX_SHIFT_RESULT; // nothing to shift to
+ }
+ waitForCluster("YELLOW", 30L, TimeUnit.SECONDS);
+ // two situations: 1. a new alias 2. there is already an old index with the alias
+ String oldIndex = resolveAlias(index);
+ Map oldAliasMap = index.equals(oldIndex) ? null : getAliases(oldIndex);
+ logger.debug("old index = {} old alias map = {}", oldIndex, oldAliasMap);
+ final List newAliases = new ArrayList<>();
+ final List moveAliases = new ArrayList<>();
+ IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
+ if (oldAliasMap == null || !oldAliasMap.containsKey(index)) {
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(fullIndexName).alias(index));
+ newAliases.add(index);
+ }
+ // move existing aliases
+ if (oldAliasMap != null) {
+ for (Map.Entry entry : oldAliasMap.entrySet()) {
+ String alias = entry.getKey();
+ String filter = entry.getValue();
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove()
+ .indices(oldIndex).alias(alias));
+ if (filter != null) {
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(fullIndexName).alias(alias).filter(filter));
+ } else {
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(fullIndexName).alias(alias));
+ }
+ moveAliases.add(alias);
+ }
+ }
+ // a list of aliases that should be added, check if new or old
+ if (additionalAliases != null) {
+ for (String additionalAlias : additionalAliases) {
+ if (oldAliasMap == null || !oldAliasMap.containsKey(additionalAlias)) {
+ // index alias adder only active on extra aliases, and if alias is new
+ if (adder != null) {
+ adder.addIndexAlias(indicesAliasesRequest, fullIndexName, additionalAlias);
+ } else {
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(fullIndexName).alias(additionalAlias));
+ }
+ newAliases.add(additionalAlias);
+ } else {
+ String filter = oldAliasMap.get(additionalAlias);
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove()
+ .indices(oldIndex).alias(additionalAlias));
+ if (filter != null) {
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(fullIndexName).alias(additionalAlias).filter(filter));
+ } else {
+ indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(fullIndexName).alias(additionalAlias));
+ }
+ moveAliases.add(additionalAlias);
+ }
+ }
+ }
+ if (!indicesAliasesRequest.getAliasActions().isEmpty()) {
+ logger.debug("indices alias request = {}", indicesAliasesRequest.getAliasActions().toString());
+ IndicesAliasesResponse indicesAliasesResponse =
+ client.execute(IndicesAliasesAction.INSTANCE, indicesAliasesRequest).actionGet();
+ logger.debug("response isAcknowledged = {} isFragment = {}",
+ indicesAliasesResponse.isAcknowledged(), indicesAliasesResponse.isFragment());
+ }
+ return new SuccessIndexShiftResult(moveAliases, newAliases);
+ }
+
+ @Override
+ public IndexPruneResult pruneIndex(IndexDefinition indexDefinition) {
+ return pruneIndex(indexDefinition.getIndex(), indexDefinition.getFullIndexName(),
+ indexDefinition.getRetention().getDelta(), indexDefinition.getRetention().getMinToKeep(), true);
+ }
+
+ @Override
+ public IndexPruneResult pruneIndex(String index, String fullIndexName, int delta, int mintokeep, boolean perform) {
+ if (delta == 0 && mintokeep == 0) {
+ return EMPTY_INDEX_PRUNE_RESULT;
+ }
+ if (index.equals(fullIndexName)) {
+ return EMPTY_INDEX_PRUNE_RESULT;
+ }
+ ensureActive();
+ GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client, GetIndexAction.INSTANCE);
+ GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet();
+ Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
+ logger.info("{} indices", getIndexResponse.getIndices().length);
+ List candidateIndices = new ArrayList<>();
+ for (String s : getIndexResponse.getIndices()) {
+ Matcher m = pattern.matcher(s);
+ if (m.matches() && index.equals(m.group(1)) && !s.equals(fullIndexName)) {
+ candidateIndices.add(s);
+ }
+ }
+ if (candidateIndices.isEmpty()) {
+ return EMPTY_INDEX_PRUNE_RESULT;
+ }
+ if (mintokeep > 0 && candidateIndices.size() <= mintokeep) {
+ return new NothingToDoPruneResult(candidateIndices, Collections.emptyList());
+ }
+ List indicesToDelete = new ArrayList<>();
+ Matcher m1 = pattern.matcher(fullIndexName);
+ if (m1.matches()) {
+ Integer i1 = Integer.parseInt(m1.group(2));
+ for (String s : candidateIndices) {
+ Matcher m2 = pattern.matcher(s);
+ if (m2.matches()) {
+ Integer i2 = Integer.parseInt(m2.group(2));
+ int kept = candidateIndices.size() - indicesToDelete.size();
+ if ((delta == 0 || (delta > 0 && i1 - i2 > delta)) && mintokeep <= kept) {
+ indicesToDelete.add(s);
+ }
+ }
+ }
+ }
+ if (indicesToDelete.isEmpty()) {
+ return new NothingToDoPruneResult(candidateIndices, indicesToDelete);
+ }
+ String[] s = new String[indicesToDelete.size()];
+ DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest()
+ .indices(indicesToDelete.toArray(s));
+ DeleteIndexResponse response = client.execute(DeleteIndexAction.INSTANCE, deleteIndexRequest).actionGet();
+ return new SuccessPruneResult(candidateIndices, indicesToDelete, response);
+ }
+
+ @Override
+ public Long mostRecentDocument(String index, String timestampfieldname) {
+ ensureActive();
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE);
+ SortBuilder> sort = SortBuilders.fieldSort(timestampfieldname).order(SortOrder.DESC);
+ SearchResponse searchResponse = searchRequestBuilder.setIndices(index)
+ .addStoredField(timestampfieldname)
+ .setSize(1)
+ .addSort(sort)
+ .execute().actionGet();
+ if (searchResponse.getHits().getHits().length == 1) {
+ SearchHit hit = searchResponse.getHits().getHits()[0];
+ if (hit.getFields().get(timestampfieldname) != null) {
+ return hit.getFields().get(timestampfieldname).getValue();
+ } else {
+ return 0L;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public boolean forceMerge(IndexDefinition indexDefinition) {
+ if (indexDefinition.hasForceMerge()) {
+ return forceMerge(indexDefinition.getFullIndexName(), indexDefinition.getMaxWaitTime(),
+ indexDefinition.getMaxWaitTimeUnit());
+ }
+ return false;
+ }
+
+ @Override
+ public boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit) {
+ TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
+ ForceMergeRequest forceMergeRequest = new ForceMergeRequest();
+ forceMergeRequest.indices(index);
+ try {
+ client.execute(ForceMergeAction.INSTANCE, forceMergeRequest).get(timeout.getMillis(), TimeUnit.MILLISECONDS);
+ return true;
+ } catch (TimeoutException e) {
+ logger.error("timeout");
+ } catch (ExecutionException e) {
+ logger.error(e.getMessage(), e);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ logger.error(e.getMessage(), e);
+ }
+ return false;
+ }
+
+ @Override
+ public IndexDefinition buildIndexDefinitionFromSettings(String index, Settings settings)
+ throws IOException {
+ boolean isEnabled = settings.getAsBoolean("enabled", !(client instanceof MockExtendedClient));
+ String indexName = settings.get("name", index);
+ String fullIndexName;
+ String dateTimePattern = settings.get("dateTimePattern");
+ if (dateTimePattern != null) {
+ // check if index name with current date already exists, resolve to it
+ fullIndexName = resolveAlias(indexName + DateTimeFormatter.ofPattern(dateTimePattern)
+ .withZone(ZoneId.systemDefault()) // not GMT
+ .format(LocalDate.now()));
+ } else {
+ // check if index name already exists, resolve to it
+ fullIndexName = resolveMostRecentIndex(indexName);
+ }
+ IndexRetention indexRetention = new DefaultIndexRetention()
+ .setMinToKeep(settings.getAsInt("retention.mintokeep", 0))
+ .setDelta(settings.getAsInt("retention.delta", 0));
+ return new DefaultIndexDefinition()
+ .setEnabled(isEnabled)
+ .setIndex(indexName)
+ .setFullIndexName(fullIndexName)
+ .setSettingsUrl(settings.get("settings"))
+ .setMappingsUrl(settings.get("mapping"))
+ .setDateTimePattern(dateTimePattern)
+ .setIgnoreErrors(settings.getAsBoolean("skiperrors", false))
+ .setShift(settings.getAsBoolean("shift", true))
+ .setReplicaLevel(settings.getAsInt("replica", 0))
+ .setMaxWaitTime(settings.getAsLong("timeout", 30L), TimeUnit.SECONDS)
+ .setRetention(indexRetention)
+ .setStartRefreshInterval(settings.getAsLong("bulk.startrefreshinterval", -1L))
+ .setStopRefreshInterval(settings.getAsLong("bulk.stoprefreshinterval", -1L));
+ }
+
+ @Override
+ public void updateIndexSetting(String index, String key, Object value, long timeout, TimeUnit timeUnit) throws IOException {
+ ensureActive();
+ if (index == null) {
+ throw new IOException("no index name given");
+ }
+ if (key == null) {
+ throw new IOException("no key given");
+ }
+ if (value == null) {
+ throw new IOException("no value given");
+ }
+ Settings.Builder updateSettingsBuilder = Settings.builder();
+ updateSettingsBuilder.put(key, value.toString());
+ UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index)
+ .settings(updateSettingsBuilder).timeout(toTimeValue(timeout, timeUnit));
+ client.execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet();
+ }
+
+ private void ensureActive() {
+ if (this instanceof MockExtendedClient) {
+ return;
+ }
+ if (client == null) {
+ throw new IllegalStateException("no client");
+ }
+ }
+
+ private void ensureIndexGiven(String index) {
+ if (index == null) {
+ throw new IllegalArgumentException("no index given");
+ }
+ }
+
+ private Map getFilters(GetAliasesResponse getAliasesResponse) {
+ Map result = new HashMap<>();
+ for (ObjectObjectCursor> object : getAliasesResponse.getAliases()) {
+ List aliasMetaDataList = object.value;
+ for (AliasMetaData aliasMetaData : aliasMetaDataList) {
+ if (aliasMetaData.filteringRequired()) {
+ result.put(aliasMetaData.alias(),
+ new String(aliasMetaData.getFilter().uncompressed(), StandardCharsets.UTF_8));
+ } else {
+ result.put(aliasMetaData.alias(), null);
+ }
+ }
+ }
+ return result;
+ }
+
+ public void checkMapping(String index) {
+ ensureActive();
+ GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(index);
+ GetMappingsResponse getMappingsResponse = client.execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
+ ImmutableOpenMap> map = getMappingsResponse.getMappings();
+ map.keys().forEach((Consumer>) stringObjectCursor -> {
+ ImmutableOpenMap mappings = map.get(stringObjectCursor.value);
+ for (ObjectObjectCursor cursor : mappings) {
+ String mappingName = cursor.key;
+ MappingMetaData mappingMetaData = cursor.value;
+ checkMapping(index, mappingName, mappingMetaData);
+ }
+ });
+ }
+
+ private void checkMapping(String index, String type, MappingMetaData mappingMetaData) {
+ try {
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE);
+ SearchResponse searchResponse = searchRequestBuilder.setSize(0)
+ .setIndices(index)
+ .setTypes(type)
+ .setQuery(QueryBuilders.matchAllQuery())
+ .execute()
+ .actionGet();
+ long total = searchResponse.getHits().getTotalHits();
+ if (total > 0L) {
+ Map fields = new TreeMap<>();
+ Map root = mappingMetaData.getSourceAsMap();
+ checkMapping(index, type, "", "", root, fields);
+ AtomicInteger empty = new AtomicInteger();
+ Map map = sortByValue(fields);
+ map.forEach((key, value) -> {
+ logger.info("{} {} {}",
+ key,
+ value,
+ (double) value * 100 / total);
+ if (value == 0) {
+ empty.incrementAndGet();
+ }
+ });
+ logger.info("index={} type={} numfields={} fieldsnotused={}",
+ index, type, map.size(), empty.get());
+ }
+ } catch (Exception e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private void checkMapping(String index, String type,
+ String pathDef, String fieldName, Map map,
+ Map fields) {
+ String path = pathDef;
+ if (!path.isEmpty() && !path.endsWith(".")) {
+ path = path + ".";
+ }
+ if (!"properties".equals(fieldName)) {
+ path = path + fieldName;
+ }
+ if (map.containsKey("index")) {
+ String mode = (String) map.get("index");
+ if ("no".equals(mode)) {
+ return;
+ }
+ }
+ for (Map.Entry entry : map.entrySet()) {
+ String key = entry.getKey();
+ Object o = entry.getValue();
+ if (o instanceof Map) {
+ Map child = (Map) o;
+ o = map.get("type");
+ String fieldType = o instanceof String ? o.toString() : null;
+ // do not recurse into our custom field mapper
+ if (!"standardnumber".equals(fieldType) && !"ref".equals(fieldType)) {
+ checkMapping(index, type, path, key, child, fields);
+ }
+ } else if ("type".equals(key)) {
+ QueryBuilder filterBuilder = QueryBuilders.existsQuery(path);
+ QueryBuilder queryBuilder = QueryBuilders.constantScoreQuery(filterBuilder);
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE);
+ SearchResponse searchResponse = searchRequestBuilder.setSize(0)
+ .setIndices(index)
+ .setTypes(type)
+ .setQuery(queryBuilder)
+ .execute()
+ .actionGet();
+ fields.put(path, searchResponse.getHits().getTotalHits());
+ }
+ }
+ }
+
+ private static > Map sortByValue(Map map) {
+ Map result = new LinkedHashMap<>();
+ map.entrySet().stream().sorted(Comparator.comparing(Map.Entry::getValue))
+ .forEachOrdered(e -> result.put(e.getKey(), e.getValue()));
+ return result;
+ }
+
+ private static TimeValue toTimeValue(long timeValue, TimeUnit timeUnit) {
+ switch (timeUnit) {
+ case DAYS:
+ return TimeValue.timeValueHours(24 * timeValue);
+ case HOURS:
+ return TimeValue.timeValueHours(timeValue);
+ case MINUTES:
+ return TimeValue.timeValueMinutes(timeValue);
+ case SECONDS:
+ return TimeValue.timeValueSeconds(timeValue);
+ case MILLISECONDS:
+ return TimeValue.timeValueMillis(timeValue);
+ case MICROSECONDS:
+ return TimeValue.timeValueNanos(1000 * timeValue);
+ case NANOSECONDS:
+ return TimeValue.timeValueNanos(timeValue);
+ default:
+ throw new IllegalArgumentException("unknown time unit: " + timeUnit);
+ }
+ }
+
+ private static class SuccessIndexShiftResult implements IndexShiftResult {
+
+ List movedAliases;
+
+ List newAliases;
+
+ SuccessIndexShiftResult(List movedAliases, List newAliases) {
+ this.movedAliases = movedAliases;
+ this.newAliases = newAliases;
+ }
+
+ @Override
+ public List getMovedAliases() {
+ return movedAliases;
+ }
+
+ @Override
+ public List getNewAliases() {
+ return newAliases;
+ }
+ }
+
+ private static class SuccessPruneResult implements IndexPruneResult {
+
+ List candidateIndices;
+
+ List indicesToDelete;
+
+ DeleteIndexResponse response;
+
+ SuccessPruneResult(List candidateIndices, List indicesToDelete,
+ DeleteIndexResponse response) {
+ this.candidateIndices = candidateIndices;
+ this.indicesToDelete = indicesToDelete;
+ this.response = response;
+ }
+
+ @Override
+ public IndexPruneResult.State getState() {
+ return IndexPruneResult.State.SUCCESS;
+ }
+
+ @Override
+ public List getCandidateIndices() {
+ return candidateIndices;
+ }
+
+ @Override
+ public List getDeletedIndices() {
+ return indicesToDelete;
+ }
+
+ @Override
+ public boolean isAcknowledged() {
+ return response.isAcknowledged();
+ }
+ }
+
+ private static class NothingToDoPruneResult implements IndexPruneResult {
+
+ List candidateIndices;
+
+ List indicesToDelete;
+
+ NothingToDoPruneResult(List candidateIndices, List indicesToDelete) {
+ this.candidateIndices = candidateIndices;
+ this.indicesToDelete = indicesToDelete;
+ }
+
+ @Override
+ public IndexPruneResult.State getState() {
+ return IndexPruneResult.State.SUCCESS;
+ }
+
+ @Override
+ public List getCandidateIndices() {
+ return candidateIndices;
+ }
+
+ @Override
+ public List getDeletedIndices() {
+ return indicesToDelete;
+ }
+
+ @Override
+ public boolean isAcknowledged() {
+ return false;
+ }
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/ClientBuilder.java b/elx-common/src/main/java/org/xbib/elx/common/ClientBuilder.java
new file mode 100644
index 0000000..ba9150f
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/ClientBuilder.java
@@ -0,0 +1,102 @@
+package org.xbib.elx.common;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.xbib.elx.api.ExtendedClient;
+import org.xbib.elx.api.ExtendedClientProvider;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.ServiceLoader;
+
+@SuppressWarnings("rawtypes")
+public class ClientBuilder {
+
+ private final ElasticsearchClient client;
+
+ private final Settings.Builder settingsBuilder;
+
+ private Map, ExtendedClientProvider> providerMap;
+
+ private Class extends ExtendedClientProvider> provider;
+
+ public ClientBuilder() {
+ this(null);
+ }
+
+ public ClientBuilder(ElasticsearchClient client) {
+ this(client, Thread.currentThread().getContextClassLoader());
+ }
+
+ public ClientBuilder(ElasticsearchClient client, ClassLoader classLoader) {
+ this.client = client;
+ this.settingsBuilder = Settings.builder();
+ settingsBuilder.put("node.name", "elx-client-" + Version.CURRENT);
+ this.providerMap = new HashMap<>();
+ ServiceLoader serviceLoader = ServiceLoader.load(ExtendedClientProvider.class,
+ classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader());
+ for (ExtendedClientProvider provider : serviceLoader) {
+ providerMap.put(provider.getClass(), provider);
+ }
+ }
+
+ public static ClientBuilder builder() {
+ return new ClientBuilder();
+ }
+
+ public static ClientBuilder builder(ElasticsearchClient client) {
+ return new ClientBuilder(client);
+ }
+
+ public ClientBuilder provider(Class extends ExtendedClientProvider> provider) {
+ this.provider = provider;
+ return this;
+ }
+
+ public ClientBuilder put(String key, String value) {
+ settingsBuilder.put(key, value);
+ return this;
+ }
+
+ public ClientBuilder put(String key, Integer value) {
+ settingsBuilder.put(key, value);
+ return this;
+ }
+
+ public ClientBuilder put(String key, Long value) {
+ settingsBuilder.put(key, value);
+ return this;
+ }
+
+ public ClientBuilder put(String key, Double value) {
+ settingsBuilder.put(key, value);
+ return this;
+ }
+
+ public ClientBuilder put(String key, ByteSizeValue value) {
+ settingsBuilder.put(key, value);
+ return this;
+ }
+
+ public ClientBuilder put(String key, TimeValue value) {
+ settingsBuilder.put(key, value);
+ return this;
+ }
+
+ public ClientBuilder put(Settings settings) {
+ settingsBuilder.put(settings);
+ return this;
+ }
+
+ @SuppressWarnings("unchecked")
+ public C build() throws IOException {
+ if (provider == null) {
+ throw new IllegalArgumentException("no provider");
+ }
+ return (C) providerMap.get(provider).getExtendedClient().setClient(client).init(settingsBuilder.build());
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkController.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkController.java
new file mode 100644
index 0000000..30d5b52
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkController.java
@@ -0,0 +1,312 @@
+package org.xbib.elx.common;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.xbib.elx.api.BulkController;
+import org.xbib.elx.api.BulkMetric;
+import org.xbib.elx.api.BulkProcessor;
+import org.xbib.elx.api.ExtendedClient;
+import org.xbib.elx.api.IndexDefinition;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public class DefaultBulkController implements BulkController {
+
+ private static final Logger logger = LogManager.getLogger(DefaultBulkController.class);
+
+ private final ExtendedClient client;
+
+ private final BulkMetric bulkMetric;
+
+ private final List indexNames;
+
+ private final Map startBulkRefreshIntervals;
+
+ private final Map stopBulkRefreshIntervals;
+
+ private long maxWaitTime;
+
+ private TimeUnit maxWaitTimeUnit;
+
+ private BulkProcessor bulkProcessor;
+
+ private BulkListener bulkListener;
+
+ private AtomicBoolean active;
+
+ public DefaultBulkController(ExtendedClient client, BulkMetric bulkMetric) {
+ this.client = client;
+ this.bulkMetric = bulkMetric;
+ this.indexNames = new ArrayList<>();
+ this.active = new AtomicBoolean(false);
+ this.startBulkRefreshIntervals = new HashMap<>();
+ this.stopBulkRefreshIntervals = new HashMap<>();
+ this.maxWaitTime = 30L;
+ this.maxWaitTimeUnit = TimeUnit.SECONDS;
+ }
+
+ @Override
+ public Throwable getLastBulkError() {
+ return bulkListener.getLastBulkError();
+ }
+
+ @Override
+ public void init(Settings settings) {
+ int maxActionsPerRequest = settings.getAsInt(Parameters.MAX_ACTIONS_PER_REQUEST.name(),
+ Parameters.DEFAULT_MAX_ACTIONS_PER_REQUEST.getNum());
+ int maxConcurrentRequests = settings.getAsInt(Parameters.MAX_CONCURRENT_REQUESTS.name(),
+ Parameters.DEFAULT_MAX_CONCURRENT_REQUESTS.getNum());
+ TimeValue flushIngestInterval = settings.getAsTime(Parameters.FLUSH_INTERVAL.name(),
+ TimeValue.timeValueSeconds(Parameters.DEFAULT_FLUSH_INTERVAL.getNum()));
+ ByteSizeValue maxVolumePerRequest = settings.getAsBytesSize(Parameters.MAX_VOLUME_PER_REQUEST.name(),
+ ByteSizeValue.parseBytesSizeValue(Parameters.DEFAULT_MAX_VOLUME_PER_REQUEST.getString(),
+ "maxVolumePerRequest"));
+ if (logger.isInfoEnabled()) {
+ logger.info("bulk processor up with maxActionsPerRequest = {} maxConcurrentRequests = {} " +
+ "flushIngestInterval = {} maxVolumePerRequest = {}",
+ maxActionsPerRequest, maxConcurrentRequests, flushIngestInterval, maxVolumePerRequest);
+ }
+ this.bulkListener = new BulkListener();
+ DefaultBulkProcessor.Builder builder = DefaultBulkProcessor.builder((Client) client.getClient(), bulkListener)
+ .setBulkActions(maxActionsPerRequest)
+ .setConcurrentRequests(maxConcurrentRequests)
+ .setFlushInterval(flushIngestInterval)
+ .setBulkSize(maxVolumePerRequest);
+ this.bulkProcessor = builder.build();
+ this.active.set(true);
+ }
+
+ @Override
+ public void startBulkMode(IndexDefinition indexDefinition) throws IOException {
+ startBulkMode(indexDefinition.getFullIndexName(), indexDefinition.getStartRefreshInterval(),
+ indexDefinition.getStopRefreshInterval());
+ }
+
+ @Override
+ public void startBulkMode(String indexName,
+ long startRefreshIntervalInSeconds,
+ long stopRefreshIntervalInSeconds) throws IOException {
+ if (!indexNames.contains(indexName)) {
+ indexNames.add(indexName);
+ startBulkRefreshIntervals.put(indexName, startRefreshIntervalInSeconds);
+ stopBulkRefreshIntervals.put(indexName, stopRefreshIntervalInSeconds);
+ if (startRefreshIntervalInSeconds != 0L) {
+ client.updateIndexSetting(indexName, "refresh_interval", startRefreshIntervalInSeconds + "s",
+ 30L, TimeUnit.SECONDS);
+ }
+ }
+ }
+
+ @Override
+ public void index(IndexRequest indexRequest) {
+ if (!active.get()) {
+ throw new IllegalStateException("inactive");
+ }
+ try {
+ if (bulkMetric != null) {
+ bulkMetric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id());
+ }
+ bulkProcessor.add(indexRequest);
+ } catch (Exception e) {
+ bulkListener.lastBulkError = e;
+ active.set(false);
+ if (logger.isErrorEnabled()) {
+ logger.error("bulk add of index failed: " + e.getMessage(), e);
+ }
+ }
+ }
+
+ @Override
+ public void delete(DeleteRequest deleteRequest) {
+ if (!active.get()) {
+ throw new IllegalStateException("inactive");
+ }
+ try {
+ if (bulkMetric != null) {
+ bulkMetric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
+ }
+ bulkProcessor.add(deleteRequest);
+ } catch (Exception e) {
+ bulkListener.lastBulkError = e;
+ active.set(false);
+ if (logger.isErrorEnabled()) {
+ logger.error("bulk add of delete failed: " + e.getMessage(), e);
+ }
+ }
+ }
+
+ @Override
+ public void update(UpdateRequest updateRequest) {
+ if (!active.get()) {
+ throw new IllegalStateException("inactive");
+ }
+ try {
+ if (bulkMetric != null) {
+ bulkMetric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id());
+ }
+ bulkProcessor.add(updateRequest);
+ } catch (Exception e) {
+ bulkListener.lastBulkError = e;
+ active.set(false);
+ if (logger.isErrorEnabled()) {
+ logger.error("bulk add of update failed: " + e.getMessage(), e);
+ }
+ }
+ }
+
+ @Override
+ public boolean waitForResponses(long timeout, TimeUnit timeUnit) {
+ try {
+ return bulkProcessor.awaitFlush(timeout, timeUnit);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ logger.error("interrupted");
+ return false;
+ }
+ }
+
+ @Override
+ public void stopBulkMode(IndexDefinition indexDefinition) throws IOException {
+ stopBulkMode(indexDefinition.getFullIndexName(),
+ indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit());
+ }
+
+ @Override
+ public void stopBulkMode(String index, long timeout, TimeUnit timeUnit) throws IOException {
+ flush();
+ if (waitForResponses(timeout, timeUnit)) {
+ if (indexNames.contains(index)) {
+ Long secs = stopBulkRefreshIntervals.get(index);
+ if (secs != null && secs != 0L) {
+ client.updateIndexSetting(index, "refresh_interval", secs + "s",
+ 30L, TimeUnit.SECONDS);
+ }
+ indexNames.remove(index);
+ }
+ }
+ }
+
+ @Override
+ public void flush() throws IOException {
+ if (bulkProcessor != null) {
+ bulkProcessor.flush();
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ flush();
+ if (client.waitForResponses(maxWaitTime, maxWaitTimeUnit)) {
+ for (String index : indexNames) {
+ Long secs = stopBulkRefreshIntervals.get(index);
+ if (secs != null && secs != 0L)
+ client.updateIndexSetting(index, "refresh_interval", secs + "s",
+ 30L, TimeUnit.SECONDS);
+ }
+ indexNames.clear();
+ }
+ if (bulkProcessor != null) {
+ bulkProcessor.close();
+ }
+ }
+
+ private class BulkListener implements DefaultBulkProcessor.Listener {
+
+ private final Logger logger = LogManager.getLogger("org.xbib.elx.BulkProcessor.Listener");
+
+ private Throwable lastBulkError = null;
+
+ @Override
+ public void beforeBulk(long executionId, BulkRequest request) {
+ long l = 0;
+ if (bulkMetric != null) {
+ l = bulkMetric.getCurrentIngest().getCount();
+ bulkMetric.getCurrentIngest().inc();
+ int n = request.numberOfActions();
+ bulkMetric.getSubmitted().inc(n);
+ bulkMetric.getCurrentIngestNumDocs().inc(n);
+ bulkMetric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
+ executionId,
+ request.numberOfActions(),
+ request.estimatedSizeInBytes(),
+ l);
+ }
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
+ long l = 0;
+ if (bulkMetric != null) {
+ l = bulkMetric.getCurrentIngest().getCount();
+ bulkMetric.getCurrentIngest().dec();
+ bulkMetric.getSucceeded().inc(response.getItems().length);
+ }
+ int n = 0;
+ for (BulkItemResponse itemResponse : response.getItems()) {
+ if (bulkMetric != null) {
+ bulkMetric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
+ }
+ if (itemResponse.isFailed()) {
+ n++;
+ if (bulkMetric != null) {
+ bulkMetric.getSucceeded().dec(1);
+ bulkMetric.getFailed().inc(1);
+ }
+ }
+ }
+ if (bulkMetric != null && logger.isDebugEnabled()) {
+ logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests",
+ executionId,
+ bulkMetric.getSucceeded().getCount(),
+ bulkMetric.getFailed().getCount(),
+ response.getTook().millis(),
+ l);
+ }
+ if (n > 0) {
+ if (logger.isErrorEnabled()) {
+ logger.error("bulk [{}] failed with {} failed items, failure message = {}",
+ executionId, n, response.buildFailureMessage());
+ }
+ } else {
+ if (bulkMetric != null) {
+ bulkMetric.getCurrentIngestNumDocs().dec(response.getItems().length);
+ }
+ }
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
+ if (bulkMetric != null) {
+ bulkMetric.getCurrentIngest().dec();
+ }
+ lastBulkError = failure;
+ active.set(false);
+ if (logger.isErrorEnabled()) {
+ logger.error("after bulk [" + executionId + "] error", failure);
+ }
+ }
+
+ Throwable getLastBulkError() {
+ return lastBulkError;
+ }
+ }
+}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkMetric.java
similarity index 74%
rename from common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java
rename to elx-common/src/main/java/org/xbib/elx/common/DefaultBulkMetric.java
index 1a181cb..a956c4d 100644
--- a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java
+++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkMetric.java
@@ -1,16 +1,15 @@
-package org.xbib.elasticsearch.client;
+package org.xbib.elx.common;
+import org.elasticsearch.common.settings.Settings;
+import org.xbib.elx.api.BulkMetric;
import org.xbib.metrics.Count;
import org.xbib.metrics.CountMetric;
import org.xbib.metrics.Meter;
import org.xbib.metrics.Metered;
import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-public class SimpleBulkMetric implements BulkMetric {
-
- private final ScheduledExecutorService executorService;
+public class DefaultBulkMetric implements BulkMetric {
private final Meter totalIngest;
@@ -30,13 +29,8 @@ public class SimpleBulkMetric implements BulkMetric {
private Long stopped;
- public SimpleBulkMetric() {
- this(Executors.newSingleThreadScheduledExecutor());
- }
-
- public SimpleBulkMetric(ScheduledExecutorService executorService) {
- this.executorService = executorService;
- totalIngest = new Meter(executorService);
+ public DefaultBulkMetric() {
+ totalIngest = new Meter(Executors.newSingleThreadScheduledExecutor());
totalIngestSizeInBytes = new CountMetric();
currentIngest = new CountMetric();
currentIngestNumDocs = new CountMetric();
@@ -45,6 +39,11 @@ public class SimpleBulkMetric implements BulkMetric {
failed = new CountMetric();
}
+ @Override
+ public void init(Settings settings) {
+ start();
+ }
+
@Override
public Metered getTotalIngest() {
return totalIngest;
@@ -80,6 +79,11 @@ public class SimpleBulkMetric implements BulkMetric {
return failed;
}
+ @Override
+ public long elapsed() {
+ return started != null ? ((stopped != null ? stopped : System.nanoTime()) - started) : -1L;
+ }
+
@Override
public void start() {
this.started = System.nanoTime();
@@ -90,12 +94,11 @@ public class SimpleBulkMetric implements BulkMetric {
public void stop() {
this.stopped = System.nanoTime();
totalIngest.stop();
- executorService.shutdownNow();
}
@Override
- public long elapsed() {
- return (stopped != null ? stopped : System.nanoTime()) - started;
+ public void close() {
+ stop();
+ totalIngest.shutdown();
}
-
}
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkProcessor.java
similarity index 64%
rename from common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java
rename to elx-common/src/main/java/org/xbib/elx/common/DefaultBulkProcessor.java
index 59ea5b2..99a72ec 100644
--- a/common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java
+++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkProcessor.java
@@ -1,19 +1,21 @@
-package org.xbib.elasticsearch.client;
+package org.xbib.elx.common;
import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
-import org.elasticsearch.client.ElasticsearchClient;
-import org.elasticsearch.common.Nullable;
+import org.elasticsearch.client.Client;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.FutureUtils;
+import org.xbib.elx.api.BulkProcessor;
-import java.io.Closeable;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -27,36 +29,38 @@ import java.util.concurrent.atomic.AtomicLong;
* requests allowed to be executed in parallel.
* In order to create a new bulk processor, use the {@link Builder}.
*/
-public class BulkProcessor implements Closeable {
+public class DefaultBulkProcessor implements BulkProcessor {
- private final int maximumBulkActionsPerRequest;
+ private final int bulkActions;
- private final long maximumBulkRequestByteSize;
+ private final long bulkSize;
private final ScheduledThreadPoolExecutor scheduler;
private final ScheduledFuture> scheduledFuture;
- private final AtomicLong executionIdGen = new AtomicLong();
+ private final AtomicLong executionIdGen;
- private final BulkExecutor bulkExecutor;
+ private final BulkRequestHandler bulkRequestHandler;
private BulkRequest bulkRequest;
- private volatile boolean closed = false;
+ private volatile boolean closed;
- private BulkProcessor(ElasticsearchClient client, Listener listener, int maximumConcurrentBulkRequests,
- int maximumBulkActionsPerRequest, ByteSizeValue maximumBulkRequestByteSize,
- @Nullable TimeValue flushInterval) {
- this.maximumBulkActionsPerRequest = maximumBulkActionsPerRequest;
- this.maximumBulkRequestByteSize = maximumBulkRequestByteSize.getBytes();
+ private DefaultBulkProcessor(Client client, Listener listener, String name, int concurrentRequests,
+ int bulkActions, ByteSizeValue bulkSize, TimeValue flushInterval) {
+ this.executionIdGen = new AtomicLong();
+ this.closed = false;
+ this.bulkActions = bulkActions;
+ this.bulkSize = bulkSize.getBytes();
this.bulkRequest = new BulkRequest();
- this.bulkExecutor = maximumConcurrentBulkRequests == 0 ?
- new SyncBulkExecutor(client, listener) :
- new AsyncBulkExecutor(client, listener, maximumConcurrentBulkRequests);
-
+ this.bulkRequestHandler = concurrentRequests == 0 ?
+ new SyncBulkRequestHandler(client, listener) :
+ new AsyncBulkRequestHandler(client, listener, concurrentRequests);
if (flushInterval != null) {
- this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1);
+ this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1,
+ EsExecutors.daemonThreadFactory(client.settings(),
+ name != null ? "[" + name + "]" : "" + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(),
@@ -67,7 +71,7 @@ public class BulkProcessor implements Closeable {
}
}
- public static Builder builder(ElasticsearchClient client, Listener listener) {
+ public static Builder builder(Client client, Listener listener) {
if (client == null) {
throw new NullPointerException("The client you specified while building a BulkProcessor is null");
}
@@ -75,20 +79,28 @@ public class BulkProcessor implements Closeable {
}
/**
- * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed.
+ * Wait for bulk request handler with flush.
+ * @param timeout the timeout value
+ * @param unit the timeout unit
+ * @return true is method was successful, false if timeout
+ * @throws InterruptedException if timeout
*/
@Override
- public void close() {
- try {
- awaitClose(0, TimeUnit.NANOSECONDS);
- } catch (InterruptedException exc) {
- Thread.currentThread().interrupt();
+ public synchronized boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException {
+ if (closed) {
+ return true;
}
+ // flush
+ if (bulkRequest.numberOfActions() > 0) {
+ execute();
+ }
+ // wait for all bulk responses
+ return this.bulkRequestHandler.close(timeout, unit);
}
/**
- * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are
- * flushed.
+ * Closes the processor. Any remaining bulk actions are flushed and then closed. This emthod can only be called
+ * once as the last action of a bulk processor.
*
* If concurrent requests are not enabled, returns {@code true} immediately.
* If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then
@@ -101,98 +113,50 @@ public class BulkProcessor implements Closeable {
* bulk requests completed
* @throws InterruptedException If the current thread is interrupted
*/
+ @Override
public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
if (closed) {
return true;
}
closed = true;
if (this.scheduledFuture != null) {
- this.scheduledFuture.cancel(false);
+ FutureUtils.cancel(this.scheduledFuture);
this.scheduler.shutdown();
}
if (bulkRequest.numberOfActions() > 0) {
execute();
}
- return bulkExecutor.awaitClose(timeout, unit);
+ return this.bulkRequestHandler.close(timeout, unit);
}
/**
- * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
- * (for example, if no id is provided, one will be generated, or usage of the create flag).
+ * Adds either a delete or an index request.
*
* @param request request
* @return his bulk processor
*/
- public synchronized BulkProcessor add(IndexRequest request) {
- if (request == null) {
- return this;
- }
- ensureOpen();
- bulkRequest.add(request);
- if (isOverTheLimit()) {
- execute();
- }
- return this;
+ @Override
+ public DefaultBulkProcessor add(ActionRequest request) {
+ return add(request, null);
}
/**
- * Adds an {@link DeleteRequest} to the list of actions to execute.
+ * Adds either a delete or an index request with a payload.
*
* @param request request
+ * @param payload payload
* @return his bulk processor
*/
- public synchronized BulkProcessor add(DeleteRequest request) {
- if (request == null) {
- return this;
- }
- ensureOpen();
- bulkRequest.add(request);
- if (isOverTheLimit()) {
- execute();
- }
+ @Override
+ public DefaultBulkProcessor add(ActionRequest request, Object payload) {
+ internalAdd(request, payload);
return this;
}
- /**
- * Adds an {@link UpdateRequest} to the list of actions to execute.
- *
- * @param request request
- * @return his bulk processor
- */
- public synchronized BulkProcessor add(UpdateRequest request) {
- if (request == null) {
- return this;
- }
- ensureOpen();
- bulkRequest.add(request);
- if (isOverTheLimit()) {
- execute();
- }
- return this;
- }
-
- private void ensureOpen() {
- if (closed) {
- throw new IllegalStateException("bulk process already closed");
- }
- }
-
- private boolean isOverTheLimit() {
- final int count = bulkRequest.numberOfActions();
- return count > 0 &&
- (maximumBulkActionsPerRequest != -1 && count >= maximumBulkActionsPerRequest) ||
- (maximumBulkRequestByteSize != -1 && bulkRequest.estimatedSizeInBytes() >= maximumBulkRequestByteSize);
- }
-
- private void execute() {
- final BulkRequest myBulkRequest = this.bulkRequest;
- bulkExecutor.execute(myBulkRequest, executionIdGen.incrementAndGet());
- this.bulkRequest = new BulkRequest();
- }
-
/**
* Flush pending delete or index requests.
*/
+ @Override
public synchronized void flush() {
ensureOpen();
if (bulkRequest.numberOfActions() > 0) {
@@ -201,39 +165,58 @@ public class BulkProcessor implements Closeable {
}
/**
- * A listener for the execution.
+ * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed.
*/
- public interface Listener {
+ @Override
+ public void close() {
+ try {
+ // 0 = immediate close
+ awaitClose(0, TimeUnit.NANOSECONDS);
+ } catch (InterruptedException exc) {
+ Thread.currentThread().interrupt();
+ }
+ }
- /**
- * Callback before the bulk is executed.
- *
- * @param executionId execution ID
- * @param request request
- */
- void beforeBulk(long executionId, BulkRequest request);
+ private void ensureOpen() {
+ if (closed) {
+ throw new IllegalStateException("bulk processor already closed");
+ }
+ }
- /**
- * Callback after a successful execution of bulk request.
- *
- * @param executionId execution ID
- * @param request request
- * @param response response
- */
- void afterBulk(long executionId, BulkRequest request, BulkResponse response);
+ private synchronized void internalAdd(ActionRequest request, Object payload) {
+ ensureOpen();
+ if (request instanceof IndexRequest) {
+ bulkRequest.add((IndexRequest) request, payload);
+ } else if (request instanceof DeleteRequest) {
+ bulkRequest.add((DeleteRequest) request, payload);
+ } else if (request instanceof UpdateRequest) {
+ bulkRequest.add((UpdateRequest) request, payload);
+ } else {
+ throw new UnsupportedOperationException();
+ }
+ executeIfNeeded();
+ }
- /**
- * Callback after a failed execution of bulk request.
- *
- * Note that in case an instance of InterruptedException
is passed, which means that request
- * processing has been
- * cancelled externally, the thread's interruption status has been restored prior to calling this method.
- *
- * @param executionId execution ID
- * @param request request
- * @param failure failure
- */
- void afterBulk(long executionId, BulkRequest request, Throwable failure);
+ private void executeIfNeeded() {
+ ensureOpen();
+ if (!isOverTheLimit()) {
+ return;
+ }
+ execute();
+ }
+
+ private void execute() {
+ final BulkRequest myBulkRequest = this.bulkRequest;
+ final long executionId = executionIdGen.incrementAndGet();
+ this.bulkRequest = new BulkRequest();
+ this.bulkRequestHandler.execute(myBulkRequest, executionId);
+ }
+
+ private boolean isOverTheLimit() {
+ return bulkActions != -1 &&
+ bulkRequest.numberOfActions() >= bulkActions ||
+ bulkSize != -1 &&
+ bulkRequest.estimatedSizeInBytes() >= bulkSize;
}
/**
@@ -241,11 +224,18 @@ public class BulkProcessor implements Closeable {
*/
public static class Builder {
- private final ElasticsearchClient client;
+ private final Client client;
+
private final Listener listener;
+
+ private String name;
+
private int concurrentRequests = 1;
+
private int bulkActions = 1000;
- private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
+
+ private ByteSizeValue bulkSize = new ByteSizeValue(10, ByteSizeUnit.MB);
+
private TimeValue flushInterval = null;
/**
@@ -255,11 +245,22 @@ public class BulkProcessor implements Closeable {
* @param client the client
* @param listener the listener
*/
- Builder(ElasticsearchClient client, Listener listener) {
+ Builder(Client client, Listener listener) {
this.client = client;
this.listener = listener;
}
+ /**
+ * Sets an optional name to identify this bulk processor.
+ *
+ * @param name name
+ * @return this builder
+ */
+ public Builder setName(String name) {
+ this.name = name;
+ return this;
+ }
+
/**
* Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single
* request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed
@@ -277,7 +278,7 @@ public class BulkProcessor implements Closeable {
* Sets when to flush a new bulk request based on the number of actions currently added. Defaults to
* {@code 1000}. Can be set to {@code -1} to disable it.
*
- * @param bulkActions mbulk actions
+ * @param bulkActions bulk actions
* @return this builder
*/
public Builder setBulkActions(int bulkActions) {
@@ -299,7 +300,7 @@ public class BulkProcessor implements Closeable {
/**
* Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set.
- * Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(ByteSizeValue)}
+ * Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)}
* can be set to {@code -1} with the flush interval set allowing for complete async processing of bulk actions.
*
* @param flushInterval flush interval
@@ -315,8 +316,8 @@ public class BulkProcessor implements Closeable {
*
* @return a bulk processor
*/
- public BulkProcessor build() {
- return new BulkProcessor(client, listener, concurrentRequests, bulkActions, bulkSize, flushInterval);
+ public DefaultBulkProcessor build() {
+ return new DefaultBulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
}
}
@@ -324,32 +325,25 @@ public class BulkProcessor implements Closeable {
@Override
public void run() {
- synchronized (BulkProcessor.this) {
+ synchronized (DefaultBulkProcessor.this) {
if (closed) {
return;
}
- if (bulkRequest.numberOfActions() > 0) {
- execute();
+ if (bulkRequest.numberOfActions() == 0) {
+ return;
}
+ execute();
}
}
}
- interface BulkExecutor {
+ private static class SyncBulkRequestHandler implements BulkRequestHandler {
- void execute(BulkRequest bulkRequest, long executionId);
+ private final Client client;
- boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
+ private final DefaultBulkProcessor.Listener listener;
- }
-
- private static class SyncBulkExecutor implements BulkExecutor {
-
- private final ElasticsearchClient client;
-
- private final BulkProcessor.Listener listener;
-
- SyncBulkExecutor(ElasticsearchClient client, BulkProcessor.Listener listener) {
+ SyncBulkRequestHandler(Client client, DefaultBulkProcessor.Listener listener) {
this.client = client;
this.listener = listener;
}
@@ -370,22 +364,22 @@ public class BulkProcessor implements Closeable {
}
@Override
- public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
+ public boolean close(long timeout, TimeUnit unit) {
return true;
}
}
- private static class AsyncBulkExecutor implements BulkExecutor {
+ private static class AsyncBulkRequestHandler implements BulkRequestHandler {
- private final ElasticsearchClient client;
+ private final Client client;
- private final BulkProcessor.Listener listener;
+ private final DefaultBulkProcessor.Listener listener;
private final Semaphore semaphore;
private final int concurrentRequests;
- private AsyncBulkExecutor(ElasticsearchClient client, BulkProcessor.Listener listener, int concurrentRequests) {
+ private AsyncBulkRequestHandler(Client client, DefaultBulkProcessor.Listener listener, int concurrentRequests) {
this.client = client;
this.listener = listener;
this.concurrentRequests = concurrentRequests;
@@ -400,7 +394,7 @@ public class BulkProcessor implements Closeable {
listener.beforeBulk(executionId, bulkRequest);
semaphore.acquire();
acquired = true;
- client.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener() {
+ client.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener<>() {
@Override
public void onResponse(BulkResponse response) {
try {
@@ -433,9 +427,9 @@ public class BulkProcessor implements Closeable {
}
@Override
- public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
- if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
- semaphore.release(this.concurrentRequests);
+ public boolean close(long timeout, TimeUnit unit) throws InterruptedException {
+ if (semaphore.tryAcquire(concurrentRequests, timeout, unit)) {
+ semaphore.release(concurrentRequests);
return true;
}
return false;
diff --git a/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexDefinition.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexDefinition.java
new file mode 100644
index 0000000..52127e1
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexDefinition.java
@@ -0,0 +1,214 @@
+package org.xbib.elx.common;
+
+import org.xbib.elx.api.IndexDefinition;
+import org.xbib.elx.api.IndexRetention;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.concurrent.TimeUnit;
+
+public class DefaultIndexDefinition implements IndexDefinition {
+
+ private String index;
+
+ private String fullIndexName;
+
+ private String dateTimePattern;
+
+ private URL settingsUrl;
+
+ private URL mappingsUrl;
+
+ private boolean enabled;
+
+ private boolean ignoreErrors;
+
+ private boolean switchAliases;
+
+ private boolean hasForceMerge;
+
+ private int replicaLevel;
+
+ private IndexRetention indexRetention;
+
+ private long maxWaitTime;
+
+ private TimeUnit maxWaitTimeUnit;
+
+ private long startRefreshInterval;
+
+ private long stopRefreshInterval;
+
+ @Override
+ public IndexDefinition setIndex(String index) {
+ this.index = index;
+ return this;
+ }
+
+ @Override
+ public String getIndex() {
+ return index;
+ }
+
+ @Override
+ public IndexDefinition setFullIndexName(String fullIndexName) {
+ this.fullIndexName = fullIndexName;
+ return this;
+ }
+
+ @Override
+ public String getFullIndexName() {
+ return fullIndexName;
+ }
+
+ @Override
+ public IndexDefinition setSettingsUrl(String settingsUrlString) throws MalformedURLException {
+ this.settingsUrl = settingsUrlString != null ? new URL(settingsUrlString) : null;
+ return this;
+ }
+
+ @Override
+ public IndexDefinition setSettingsUrl(URL settingsUrl) {
+ this.settingsUrl = settingsUrl;
+ return this;
+ }
+
+ @Override
+ public URL getSettingsUrl() {
+ return settingsUrl;
+ }
+
+ @Override
+ public IndexDefinition setMappingsUrl(String mappingsUrlString) throws MalformedURLException {
+ this.mappingsUrl = mappingsUrlString != null ? new URL(mappingsUrlString) : null;
+ return this;
+ }
+
+ @Override
+ public IndexDefinition setMappingsUrl(URL mappingsUrl) {
+ this.mappingsUrl = mappingsUrl;
+ return this;
+ }
+
+ @Override
+ public URL getMappingsUrl() {
+ return mappingsUrl;
+ }
+
+ @Override
+ public IndexDefinition setDateTimePattern(String timeWindow) {
+ this.dateTimePattern = timeWindow;
+ return this;
+ }
+
+ @Override
+ public String getDateTimePattern() {
+ return dateTimePattern;
+ }
+
+ @Override
+ public IndexDefinition setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ return this;
+ }
+
+ @Override
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ @Override
+ public IndexDefinition setIgnoreErrors(boolean ignoreErrors) {
+ this.ignoreErrors = ignoreErrors;
+ return this;
+ }
+
+ @Override
+ public boolean ignoreErrors() {
+ return ignoreErrors;
+ }
+
+ @Override
+ public IndexDefinition setShift(boolean switchAliases) {
+ this.switchAliases = switchAliases;
+ return this;
+ }
+
+ @Override
+ public boolean isShiftEnabled() {
+ return switchAliases;
+ }
+
+ @Override
+ public IndexDefinition setForceMerge(boolean hasForceMerge) {
+ this.hasForceMerge = hasForceMerge;
+ return this;
+ }
+
+ @Override
+ public boolean hasForceMerge() {
+ return hasForceMerge;
+ }
+
+ @Override
+ public IndexDefinition setReplicaLevel(int replicaLevel) {
+ this.replicaLevel = replicaLevel;
+ return this;
+ }
+
+ @Override
+ public int getReplicaLevel() {
+ return replicaLevel;
+ }
+
+ @Override
+ public IndexDefinition setRetention(IndexRetention indexRetention) {
+ this.indexRetention = indexRetention;
+ return this;
+ }
+
+ @Override
+ public IndexRetention getRetention() {
+ return indexRetention;
+ }
+
+ @Override
+ public IndexDefinition setMaxWaitTime(long maxWaitTime, TimeUnit timeUnit) {
+ this.maxWaitTime = maxWaitTime;
+ this.maxWaitTimeUnit = timeUnit;
+ return this;
+ }
+
+ @Override
+ public long getMaxWaitTime() {
+ return maxWaitTime;
+ }
+
+ @Override
+ public TimeUnit getMaxWaitTimeUnit() {
+ return maxWaitTimeUnit;
+ }
+
+ @Override
+ public IndexDefinition setStartRefreshInterval(long seconds) {
+ this.startRefreshInterval = seconds;
+ return this;
+ }
+
+ @Override
+ public long getStartRefreshInterval() {
+ return startRefreshInterval;
+ }
+
+ @Override
+ public IndexDefinition setStopRefreshInterval(long seconds) {
+ this.stopRefreshInterval = seconds;
+ return this;
+ }
+
+ @Override
+ public long getStopRefreshInterval() {
+ return stopRefreshInterval;
+ }
+
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexRetention.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexRetention.java
new file mode 100644
index 0000000..4e49be3
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexRetention.java
@@ -0,0 +1,32 @@
+package org.xbib.elx.common;
+
+import org.xbib.elx.api.IndexRetention;
+
+public class DefaultIndexRetention implements IndexRetention {
+
+ private int delta;
+
+ private int minToKeep;
+
+ @Override
+ public IndexRetention setDelta(int delta) {
+ this.delta = delta;
+ return this;
+ }
+
+ @Override
+ public int getDelta() {
+ return delta;
+ }
+
+ @Override
+ public IndexRetention setMinToKeep(int minToKeep) {
+ this.minToKeep = minToKeep;
+ return this;
+ }
+
+ @Override
+ public int getMinToKeep() {
+ return minToKeep;
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClient.java b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClient.java
new file mode 100644
index 0000000..647894b
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClient.java
@@ -0,0 +1,129 @@
+package org.xbib.elx.common;
+
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A mocked client, it does not perform any actions on a cluster. Useful for testing.
+ */
+public class MockExtendedClient extends AbstractExtendedClient {
+
+ @Override
+ public ElasticsearchClient getClient() {
+ return null;
+ }
+
+ @Override
+ public MockExtendedClient init(Settings settings) {
+ return this;
+ }
+
+ @Override
+ protected ElasticsearchClient createClient(Settings settings) {
+ return null;
+ }
+
+ @Override
+ protected void closeClient() {
+ }
+
+ @Override
+ public MockExtendedClient index(String index, String id, boolean create, String source) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient delete(String index, String id) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient update(String index, String id, String source) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient index(IndexRequest indexRequest) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient delete(DeleteRequest deleteRequest) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient update(UpdateRequest updateRequest) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient stopBulk(String index, long maxWaitTime, TimeUnit timeUnit) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient newIndex(String index) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient deleteIndex(String index) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient refreshIndex(String index) {
+ return this;
+ }
+
+ @Override
+ public MockExtendedClient flushIndex(String index) {
+ return this;
+ }
+
+ @Override
+ public boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit) {
+ return true;
+ }
+
+ @Override
+ public boolean waitForCluster(String healthColor, long timeValue, TimeUnit timeUnit) {
+ return true;
+ }
+
+ @Override
+ public boolean waitForResponses(long maxWaitTime, TimeUnit timeUnit) {
+ return true;
+ }
+
+ @Override
+ public boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit) {
+ return true;
+ }
+
+ @Override
+ public MockExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) {
+ return this;
+ }
+
+ @Override
+ public void flush() {
+ // nothing to do
+ }
+
+ @Override
+ public void close() {
+ // nothing to do
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClientProvider.java b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClientProvider.java
new file mode 100644
index 0000000..87e65cc
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClientProvider.java
@@ -0,0 +1,10 @@
+package org.xbib.elx.common;
+
+import org.xbib.elx.api.ExtendedClientProvider;
+
+public class MockExtendedClientProvider implements ExtendedClientProvider {
+ @Override
+ public MockExtendedClient getExtendedClient() {
+ return new MockExtendedClient();
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/Parameters.java b/elx-common/src/main/java/org/xbib/elx/common/Parameters.java
new file mode 100644
index 0000000..28d10d7
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/Parameters.java
@@ -0,0 +1,40 @@
+package org.xbib.elx.common;
+
+public enum Parameters {
+
+ DEFAULT_MAX_ACTIONS_PER_REQUEST(1000),
+
+ DEFAULT_MAX_CONCURRENT_REQUESTS(Runtime.getRuntime().availableProcessors()),
+
+ DEFAULT_MAX_VOLUME_PER_REQUEST("10mb"),
+
+ DEFAULT_FLUSH_INTERVAL(30),
+
+ MAX_ACTIONS_PER_REQUEST ("max_actions_per_request"),
+
+ MAX_CONCURRENT_REQUESTS("max_concurrent_requests"),
+
+ MAX_VOLUME_PER_REQUEST("max_volume_per_request"),
+
+ FLUSH_INTERVAL("flush_interval");
+
+ int num;
+
+ String string;
+
+ Parameters(int num) {
+ this.num = num;
+ }
+
+ Parameters(String string) {
+ this.string = string;
+ }
+
+ int getNum() {
+ return num;
+ }
+
+ String getString() {
+ return string;
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandler.java b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandler.java
new file mode 100644
index 0000000..e7d8727
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandler.java
@@ -0,0 +1,25 @@
+package org.xbib.elx.common.io;
+
+import java.io.IOException;
+import java.net.URL;
+import java.net.URLConnection;
+import java.net.URLStreamHandler;
+
+public class ClasspathURLStreamHandler extends URLStreamHandler {
+
+ private final ClassLoader classLoader;
+
+ public ClasspathURLStreamHandler() {
+ this.classLoader = getClass().getClassLoader();
+ }
+
+ public ClasspathURLStreamHandler(ClassLoader classLoader) {
+ this.classLoader = classLoader;
+ }
+
+ @Override
+ protected URLConnection openConnection(URL u) throws IOException {
+ final URL resourceUrl = classLoader.getResource(u.getPath());
+ return resourceUrl != null ? resourceUrl.openConnection() : null;
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandlerFactory.java b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandlerFactory.java
new file mode 100644
index 0000000..00c7c83
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandlerFactory.java
@@ -0,0 +1,12 @@
+package org.xbib.elx.common.io;
+
+import java.net.URLStreamHandler;
+import java.net.URLStreamHandlerFactory;
+
+public class ClasspathURLStreamHandlerFactory implements URLStreamHandlerFactory {
+
+ @Override
+ public URLStreamHandler createURLStreamHandler(String protocol) {
+ return "classpath".equals(protocol) ? new ClasspathURLStreamHandler() : null;
+ }
+}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/io/package-info.java b/elx-common/src/main/java/org/xbib/elx/common/io/package-info.java
new file mode 100644
index 0000000..492a3e2
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/io/package-info.java
@@ -0,0 +1,4 @@
+/**
+ *
+ */
+package org.xbib.elx.common.io;
diff --git a/elx-common/src/main/java/org/xbib/elx/common/package-info.java b/elx-common/src/main/java/org/xbib/elx/common/package-info.java
new file mode 100644
index 0000000..ced4357
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/package-info.java
@@ -0,0 +1,4 @@
+/**
+ *
+ */
+package org.xbib.elx.common;
diff --git a/common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java b/elx-common/src/main/java/org/xbib/elx/common/util/NetworkUtils.java
similarity index 95%
rename from common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java
rename to elx-common/src/main/java/org/xbib/elx/common/util/NetworkUtils.java
index 63a6fdf..11dd014 100644
--- a/common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java
+++ b/elx-common/src/main/java/org/xbib/elx/common/util/NetworkUtils.java
@@ -1,4 +1,4 @@
-package org.xbib.elasticsearch.client;
+package org.xbib.elx.common.util;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -16,6 +16,9 @@ import java.util.Enumeration;
import java.util.List;
import java.util.Locale;
+/**
+ *
+ */
public class NetworkUtils {
private static final Logger logger = LogManager.getLogger(NetworkUtils.class.getName());
@@ -100,10 +103,8 @@ public class NetworkUtils {
NetworkInterface networkInterface = interfaces.nextElement();
allInterfaces.add(networkInterface);
Enumeration subInterfaces = networkInterface.getSubInterfaces();
- if (subInterfaces.hasMoreElements()) {
- while (subInterfaces.hasMoreElements()) {
- allInterfaces.add(subInterfaces.nextElement());
- }
+ while (subInterfaces.hasMoreElements()) {
+ allInterfaces.add(subInterfaces.nextElement());
}
}
sortInterfaces(allInterfaces);
@@ -221,10 +222,8 @@ public class NetworkUtils {
NetworkInterface networkInterface = interfaces.nextElement();
networkInterfaces.add(networkInterface);
Enumeration subInterfaces = networkInterface.getSubInterfaces();
- if (subInterfaces.hasMoreElements()) {
- while (subInterfaces.hasMoreElements()) {
- networkInterfaces.add(subInterfaces.nextElement());
- }
+ while (subInterfaces.hasMoreElements()) {
+ networkInterfaces.add(subInterfaces.nextElement());
}
}
sortInterfaces(networkInterfaces);
@@ -250,6 +249,9 @@ public class NetworkUtils {
return left.length - right.length;
}
+ /**
+ *
+ */
public enum ProtocolVersion {
IPV4, IPV6, IPV46, NONE
}
diff --git a/elx-common/src/main/java/org/xbib/elx/common/util/package-info.java b/elx-common/src/main/java/org/xbib/elx/common/util/package-info.java
new file mode 100644
index 0000000..20a7cbb
--- /dev/null
+++ b/elx-common/src/main/java/org/xbib/elx/common/util/package-info.java
@@ -0,0 +1,4 @@
+/**
+ *
+ */
+package org.xbib.elx.common.util;
diff --git a/elx-common/src/main/resources/META-INF/services/java.net.URLStreamHandlerFactory b/elx-common/src/main/resources/META-INF/services/java.net.URLStreamHandlerFactory
new file mode 100644
index 0000000..bb6d620
--- /dev/null
+++ b/elx-common/src/main/resources/META-INF/services/java.net.URLStreamHandlerFactory
@@ -0,0 +1 @@
+org.xbib.elx.common.io.ClasspathURLStreamHandlerFactory
\ No newline at end of file
diff --git a/elx-common/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider b/elx-common/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider
new file mode 100644
index 0000000..9729b83
--- /dev/null
+++ b/elx-common/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider
@@ -0,0 +1 @@
+org.xbib.elx.common.MockExtendedClientProvider
\ No newline at end of file
diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java b/elx-common/src/test/java/org/xbib/elx/common/test/AliasTest.java
similarity index 56%
rename from common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java
rename to elx-common/src/test/java/org/xbib/elx/common/test/AliasTest.java
index e0ef8d5..9fa4a40 100644
--- a/common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/AliasTest.java
@@ -1,4 +1,7 @@
-package org.xbib.elasticsearch.client.common;
+package org.xbib.elx.common.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.logging.log4j.LogManager;
@@ -9,8 +12,9 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.testframework.ESSingleNodeTestCase;
+import org.junit.Test;
import java.util.Collections;
import java.util.Iterator;
@@ -19,58 +23,72 @@ import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-public class AliasTests extends ESSingleNodeTestCase {
+public class AliasTest extends TestBase {
- private static final Logger logger = LogManager.getLogger(AliasTests.class.getName());
+ private static final Logger logger = LogManager.getLogger(AliasTest.class.getName());
+ @Test
public void testAlias() {
+ Client client = client("1");
CreateIndexRequest indexRequest = new CreateIndexRequest("test");
- client().admin().indices().create(indexRequest).actionGet();
+ client.admin().indices().create(indexRequest).actionGet();
// put alias
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
- indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
- .index("test").alias("test_alias")
- );
- client().admin().indices().aliases(indicesAliasesRequest).actionGet();
+ String[] indices = new String[]{"test"};
+ String[] aliases = new String[]{"test_alias"};
+ IndicesAliasesRequest.AliasActions aliasAction =
+ new IndicesAliasesRequest.AliasActions(IndicesAliasesRequest.AliasActions.Type.ADD)
+ .indices(indices)
+ .aliases(aliases);
+ indicesAliasesRequest.addAliasAction(aliasAction);
+ client.admin().indices().aliases(indicesAliasesRequest).actionGet();
// get alias
GetAliasesRequest getAliasesRequest = new GetAliasesRequest(Strings.EMPTY_ARRAY);
long t0 = System.nanoTime();
- GetAliasesResponse getAliasesResponse = client().admin().indices().getAliases(getAliasesRequest).actionGet();
+ GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(getAliasesRequest).actionGet();
long t1 = (System.nanoTime() - t0) / 1000000;
logger.info("{} time(ms) = {}", getAliasesResponse.getAliases(), t1);
assertTrue(t1 >= 0);
}
+ @Test
public void testMostRecentIndex() {
+ Client client = client("1");
String alias = "test";
CreateIndexRequest indexRequest = new CreateIndexRequest("test20160101");
- client().admin().indices().create(indexRequest).actionGet();
+ client.admin().indices().create(indexRequest).actionGet();
indexRequest = new CreateIndexRequest("test20160102");
- client().admin().indices().create(indexRequest).actionGet();
+ client.admin().indices().create(indexRequest).actionGet();
indexRequest = new CreateIndexRequest("test20160103");
- client().admin().indices().create(indexRequest).actionGet();
+ client.admin().indices().create(indexRequest).actionGet();
IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest();
- indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
- .indices("test20160101", "test20160102", "test20160103")
- .alias(alias)
- );
- client().admin().indices().aliases(indicesAliasesRequest).actionGet();
+ String[] indices = new String[]{"test20160101", "test20160102", "test20160103"};
+ String[] aliases = new String[]{alias};
+ IndicesAliasesRequest.AliasActions aliasAction =
+ new IndicesAliasesRequest.AliasActions(IndicesAliasesRequest.AliasActions.Type.ADD)
+ .indices(indices)
+ .aliases(aliases);
+ indicesAliasesRequest.addAliasAction(aliasAction);
+ client.admin().indices().aliases(indicesAliasesRequest).actionGet();
- GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(),
+ GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client,
GetAliasesAction.INSTANCE);
GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet();
Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
Set result = new TreeSet<>(Collections.reverseOrder());
for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) {
Matcher m = pattern.matcher(indexName.value);
- if (m.matches() && alias.equals(m.group(1))) {
- result.add(indexName.value);
+ if (m.matches()) {
+ if (alias.equals(m.group(1))) {
+ result.add(indexName.value);
+ }
}
}
Iterator it = result.iterator();
assertEquals("test20160103", it.next());
assertEquals("test20160102", it.next());
assertEquals("test20160101", it.next());
- logger.info("result={}", result);
+ logger.info("success: result={}", result);
}
+
}
diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/ClusterBlockTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/ClusterBlockTest.java
new file mode 100644
index 0000000..243ed93
--- /dev/null
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/ClusterBlockTest.java
@@ -0,0 +1,50 @@
+package org.xbib.elx.common.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+@Ignore
+public class ClusterBlockTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger("test");
+
+ @Before
+ public void startNodes() {
+ try {
+ setClusterName("test-cluster");
+ startNode("1");
+ // do not wait for green health state
+ logger.info("ready");
+ } catch (Throwable t) {
+ logger.error("startNodes failed", t);
+ }
+ }
+
+ @Override
+ protected Settings getNodeSettings() {
+ return Settings.builder()
+ .put(super.getNodeSettings())
+ .put("discovery.zen.minimum_master_nodes", 2) // block until we have two nodes
+ .build();
+ }
+
+ @Test(expected = ClusterBlockException.class)
+ public void testClusterBlock() throws Exception {
+ Client client = client("1");
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("field1", "value1").endObject();
+ IndexRequestBuilder irb = client.prepareIndex("test", "test", "1").setSource(builder);
+ BulkRequestBuilder brb = client.prepareBulk();
+ brb.add(irb);
+ brb.execute().actionGet();
+ }
+}
diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/MockExtendedClientProviderTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/MockExtendedClientProviderTest.java
new file mode 100644
index 0000000..cbe7972
--- /dev/null
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/MockExtendedClientProviderTest.java
@@ -0,0 +1,19 @@
+package org.xbib.elx.common.test;
+
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.common.MockExtendedClient;
+import org.xbib.elx.common.MockExtendedClientProvider;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertNotNull;
+
+public class MockExtendedClientProviderTest {
+
+ @Test
+ public void testMockExtendedProvider() throws IOException {
+ MockExtendedClient client = ClientBuilder.builder().provider(MockExtendedClientProvider.class).build();
+ assertNotNull(client);
+ }
+}
diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/MockNode.java b/elx-common/src/test/java/org/xbib/elx/common/test/MockNode.java
new file mode 100644
index 0000000..b83aa8a
--- /dev/null
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/MockNode.java
@@ -0,0 +1,15 @@
+package org.xbib.elx.common.test;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.InternalSettingsPreparer;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.Plugin;
+
+import java.util.List;
+
+public class MockNode extends Node {
+
+ public MockNode(Settings settings, List> classpathPlugins) {
+ super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins);
+ }
+}
diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/NetworkTest.java
similarity index 81%
rename from common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java
rename to elx-common/src/test/java/org/xbib/elx/common/test/NetworkTest.java
index 0ed4fc8..7933343 100644
--- a/common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/NetworkTest.java
@@ -1,4 +1,4 @@
-package org.xbib.elasticsearch.client.common;
+package org.xbib.elx.common.test;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -13,17 +13,12 @@ public class NetworkTest {
private static final Logger logger = LogManager.getLogger(NetworkTest.class);
- /**
- * Demonstrates the slowness oj Java network interface lookup on certain environments.
- * May be a killer for ES node startup - so avoid automatic traversal of NICs at all costs.
- *
- * @throws Exception if test fails
- */
@Test
public void testNetwork() throws Exception {
+ // walk over all found interfaces (this is slow - multicast/pings are performed)
Enumeration nets = NetworkInterface.getNetworkInterfaces();
for (NetworkInterface netint : Collections.list(nets)) {
- logger.info("checking network interface = " + netint.getName());
+ System.out.println("checking network interface = " + netint.getName());
Enumeration inetAddresses = netint.getInetAddresses();
for (InetAddress addr : Collections.list(inetAddresses)) {
logger.info("found address = " + addr.getHostAddress()
diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/SearchTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/SearchTest.java
new file mode 100644
index 0000000..5d7420a
--- /dev/null
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/SearchTest.java
@@ -0,0 +1,58 @@
+package org.xbib.elx.common.test;
+
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.bulk.BulkAction;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+public class SearchTest extends TestBase {
+
+ @Test
+ public void testSearch() throws Exception {
+ Client client = client("1");
+ BulkRequestBuilder builder = new BulkRequestBuilder(client, BulkAction.INSTANCE);
+ for (int i = 0; i < 1000; i++) {
+ IndexRequest indexRequest = new IndexRequest("pages", "row")
+ .source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("user1", "joerg")
+ .field("user2", "joerg")
+ .field("user3", "joerg")
+ .field("user4", "joerg")
+ .field("user5", "joerg")
+ .field("user6", "joerg")
+ .field("user7", "joerg")
+ .field("user8", "joerg")
+ .field("user9", "joerg")
+ .field("rowcount", i)
+ .field("rs", 1234)
+ .endObject()
+ );
+ builder.add(indexRequest);
+ }
+ client.bulk(builder.request()).actionGet();
+ client.admin().indices().refresh(new RefreshRequest()).actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ QueryBuilder queryStringBuilder = QueryBuilders.queryStringQuery("rs:" + 1234);
+ SearchRequestBuilder requestBuilder = client.prepareSearch()
+ .setIndices("pages")
+ .setTypes("row")
+ .setQuery(queryStringBuilder)
+ .addSort("rowcount", SortOrder.DESC)
+ .setFrom(i * 10).setSize(10);
+ SearchResponse searchResponse = requestBuilder.execute().actionGet();
+ assertTrue(searchResponse.getHits().getTotalHits() > 0);
+ }
+ }
+}
diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java b/elx-common/src/test/java/org/xbib/elx/common/test/SimpleTest.java
similarity index 56%
rename from common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java
rename to elx-common/src/test/java/org/xbib/elx/common/test/SimpleTest.java
index 6e2dd8a..ba20e63 100644
--- a/common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/SimpleTest.java
@@ -1,53 +1,53 @@
-package org.xbib.elasticsearch.client.common;
+package org.xbib.elx.common.test;
+
+import static org.junit.Assert.assertEquals;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexRequestBuilder;
-import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.testframework.ESSingleNodeTestCase;
+import org.junit.Test;
-public class SimpleTests extends ESSingleNodeTestCase {
-
- private static final Logger logger = LogManager.getLogger(SimpleTests.class.getName());
+public class SimpleTest extends TestBase {
+ @Test
public void test() throws Exception {
try {
DeleteIndexRequestBuilder deleteIndexRequestBuilder =
- new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, "test");
+ new DeleteIndexRequestBuilder(client("1"), DeleteIndexAction.INSTANCE, "test");
deleteIndexRequestBuilder.execute().actionGet();
- } catch (Exception e) {
- logger.warn(e.getMessage(), e);
+ } catch (IndexNotFoundException e) {
+ // ignore if index not found
}
- CreateIndexRequestBuilder createIndexRequestBuilder = new CreateIndexRequestBuilder(client(),
- CreateIndexAction.INSTANCE)
- .setIndex("test")
- .setSettings(Settings.builder()
- .put("index.analysis.analyzer.default.filter.0", "lowercase")
- // where is the trim token filter???
- //.put("index.analysis.analyzer.default.filter.1", "trim")
- .put("index.analysis.analyzer.default.tokenizer", "keyword")
- .build());
- createIndexRequestBuilder.execute().actionGet();
+ Settings indexSettings = Settings.builder()
+ .put("index.analysis.analyzer.default.filter.0", "lowercase")
+ .put("index.analysis.analyzer.default.filter.1", "trim")
+ .put("index.analysis.analyzer.default.tokenizer", "keyword")
+ .build();
+ CreateIndexRequestBuilder createIndexRequestBuilder = new CreateIndexRequestBuilder(client("1"), CreateIndexAction.INSTANCE);
+ createIndexRequestBuilder.setIndex("test")
+ .setSettings(indexSettings).execute().actionGet();
- IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client(), IndexAction.INSTANCE);
+ IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client("1"), IndexAction.INSTANCE);
indexRequestBuilder
.setIndex("test")
.setType("test")
.setId("1")
.setSource(XContentFactory.jsonBuilder().startObject().field("field",
"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8").endObject())
- .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.execute()
.actionGet();
- String doc = client().prepareSearch("test")
+ RefreshRequestBuilder refreshRequestBuilder = new RefreshRequestBuilder(client("1"), RefreshAction.INSTANCE);
+ refreshRequestBuilder.setIndices("test").execute().actionGet();
+ String doc = client("1").prepareSearch("test")
.setTypes("test")
.setQuery(QueryBuilders.matchQuery("field",
"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8"))
diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/TestBase.java b/elx-common/src/test/java/org/xbib/elx/common/test/TestBase.java
new file mode 100644
index 0000000..f2a290f
--- /dev/null
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/TestBase.java
@@ -0,0 +1,206 @@
+package org.xbib.elx.common.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.analysis.common.CommonAnalysisPlugin;
+import org.elasticsearch.client.support.AbstractClient;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.transport.netty4.Netty4Plugin;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+public class TestBase {
+
+ private static final Logger logger = LogManager.getLogger("test");
+
+ private static final Random random = new Random();
+
+ private static final char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray();
+
+ private Map nodes = new HashMap<>();
+
+ private Map clients = new HashMap<>();
+
+ private String cluster;
+
+ private String host;
+
+ private int port;
+
+ @Before
+ public void startNodes() {
+ try {
+ logger.info("starting");
+ setClusterName("test-cluster");
+ startNode("1");
+ findNodeAddress();
+ try {
+ ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE,
+ new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN)
+ .timeout(TimeValue.timeValueSeconds(30))).actionGet();
+ if (healthResponse != null && healthResponse.isTimedOut()) {
+ throw new IOException("cluster state is " + healthResponse.getStatus().name()
+ + ", from here on, everything will fail!");
+ }
+ } catch (ElasticsearchTimeoutException e) {
+ throw new IOException("cluster does not respond to health request, cowardly refusing to continue");
+ }
+ ClusterStateRequestBuilder clusterStateRequestBuilder =
+ new ClusterStateRequestBuilder(client("1"), ClusterStateAction.INSTANCE).all();
+ ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet();
+ logger.info("cluster name = {}", clusterStateResponse.getClusterName().value());
+ logger.info("host = {} port = {}", host, port);
+ } catch (Throwable t) {
+ logger.error(t.getMessage(), t);
+ }
+ }
+
+ @After
+ public void stopNodes() {
+ try {
+ closeNodes();
+ } catch (Exception e) {
+ logger.error("can not close nodes", e);
+ } finally {
+ try {
+ deleteFiles();
+ logger.info("data files wiped");
+ Thread.sleep(2000L); // let OS commit changes
+ } catch (IOException e) {
+ logger.error(e.getMessage(), e);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ }
+
+ protected Settings getTransportSettings() {
+ return Settings.builder()
+ .put("host", host)
+ .put("port", port)
+ .put("cluster.name", cluster)
+ .put("path.home", getHome())
+ .build();
+ }
+
+ protected Settings getNodeSettings() {
+ return Settings.builder()
+ .put("cluster.name", cluster)
+ .put("transport.type", Netty4Plugin.NETTY_TRANSPORT_NAME)
+ .put("path.home", getHome())
+ .build();
+ }
+
+ protected static String getHome() {
+ return System.getProperty("path.home", System.getProperty("user.dir"));
+ }
+
+ protected void startNode(String id) throws NodeValidationException {
+ buildNode(id).start();
+ }
+
+ protected AbstractClient client(String id) {
+ return clients.get(id);
+ }
+
+ protected void setClusterName(String cluster) {
+ this.cluster = cluster;
+ }
+
+ protected String getClusterName() {
+ return cluster;
+ }
+
+ protected String randomString(int len) {
+ final char[] buf = new char[len];
+ final int n = numbersAndLetters.length - 1;
+ for (int i = 0; i < buf.length; i++) {
+ buf[i] = numbersAndLetters[random.nextInt(n)];
+ }
+ return new String(buf);
+ }
+
+ private void closeNodes() throws IOException {
+ logger.info("closing all clients");
+ for (AbstractClient client : clients.values()) {
+ client.close();
+ }
+ clients.clear();
+ logger.info("closing all nodes");
+ for (Node node : nodes.values()) {
+ if (node != null) {
+ node.close();
+ }
+ }
+ nodes.clear();
+ logger.info("all nodes closed");
+ }
+
+ private void findNodeAddress() {
+ NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true);
+ NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet();
+ TransportAddress address= response.getNodes().iterator().next().getTransport().getAddress()
+ .publishAddress();
+ host = address.address().getHostName();
+ port = address.address().getPort();
+ }
+
+ private Node buildNode(String id) {
+ Settings nodeSettings = Settings.builder()
+ .put(getNodeSettings())
+ .put("node.name", id)
+ .build();
+ List> plugins = Arrays.asList(CommonAnalysisPlugin.class, Netty4Plugin.class);
+ Node node = new MockNode(nodeSettings, plugins);
+ AbstractClient client = (AbstractClient) node.client();
+ nodes.put(id, node);
+ clients.put(id, client);
+ logger.info("clients={}", clients);
+ return node;
+ }
+
+ private static void deleteFiles() throws IOException {
+ Path directory = Paths.get(getHome() + "/data");
+ Files.walkFileTree(directory, new SimpleFileVisitor<>() {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ Files.delete(file);
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
+ Files.delete(dir);
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ }
+}
diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/WildcardTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/WildcardTest.java
new file mode 100644
index 0000000..d3d2b95
--- /dev/null
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/WildcardTest.java
@@ -0,0 +1,52 @@
+package org.xbib.elx.common.test;
+
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class WildcardTest extends TestBase {
+
+ @Test
+ public void testWildcard() throws Exception {
+ index(client("1"), "1", "010");
+ index(client("1"), "2", "0*0");
+ // exact
+ validateCount(client("1"), QueryBuilders.queryStringQuery("010").defaultField("field"), 1);
+ validateCount(client("1"), QueryBuilders.queryStringQuery("0\\*0").defaultField("field"), 1);
+ // pattern
+ validateCount(client("1"), QueryBuilders.queryStringQuery("0*0").defaultField("field"), 1); // 2?
+ validateCount(client("1"), QueryBuilders.queryStringQuery("0?0").defaultField("field"), 1); // 2?
+ validateCount(client("1"), QueryBuilders.queryStringQuery("0**0").defaultField("field"), 1); // 2?
+ validateCount(client("1"), QueryBuilders.queryStringQuery("0??0").defaultField("field"), 0);
+ validateCount(client("1"), QueryBuilders.queryStringQuery("*10").defaultField("field"), 1);
+ validateCount(client("1"), QueryBuilders.queryStringQuery("*1*").defaultField("field"), 1);
+ validateCount(client("1"), QueryBuilders.queryStringQuery("*\\*0").defaultField("field"), 0); // 1?
+ validateCount(client("1"), QueryBuilders.queryStringQuery("*\\**").defaultField("field"), 0); // 1?
+ }
+
+ private void index(Client client, String id, String fieldValue) throws IOException {
+ client.index(new IndexRequest("index", "type", id)
+ .source(XContentFactory.jsonBuilder().startObject().field("field", fieldValue).endObject()))
+ .actionGet();
+ client.admin().indices().refresh(new RefreshRequest()).actionGet();
+ }
+
+ private long count(Client client, QueryBuilder queryBuilder) {
+ return client.prepareSearch("index").setTypes("type")
+ .setQuery(queryBuilder)
+ .execute().actionGet().getHits().getTotalHits();
+ }
+
+ private void validateCount(Client client, QueryBuilder queryBuilder, long expectedHits) {
+ final long actualHits = count(client, queryBuilder);
+ if (actualHits != expectedHits) {
+ throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits);
+ }
+ }
+}
diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/package-info.java b/elx-common/src/test/java/org/xbib/elx/common/test/package-info.java
new file mode 100644
index 0000000..9d006c1
--- /dev/null
+++ b/elx-common/src/test/java/org/xbib/elx/common/test/package-info.java
@@ -0,0 +1,4 @@
+/**
+ *
+ */
+package org.xbib.elx.common.test;
diff --git a/common/src/test/resources/log4j2.xml b/elx-common/src/test/resources/log4j2.xml
similarity index 72%
rename from common/src/test/resources/log4j2.xml
rename to elx-common/src/test/resources/log4j2.xml
index b175dfc..6c323f8 100644
--- a/common/src/test/resources/log4j2.xml
+++ b/elx-common/src/test/resources/log4j2.xml
@@ -2,11 +2,11 @@
-
+
-
+
diff --git a/elx-http/build.gradle b/elx-http/build.gradle
new file mode 100644
index 0000000..39534d0
--- /dev/null
+++ b/elx-http/build.gradle
@@ -0,0 +1,6 @@
+dependencies{
+ compile project(':elx-common')
+ compile "org.xbib.elasticsearch:transport-netty4:${rootProject.property('elasticsearch-server.version')}"
+ compile "org.xbib:netty-http-client:${project.property('xbib-netty-http.version')}"
+ testCompile "org.xbib.elasticsearch:elasticsearch-analysis-common:${rootProject.property('elasticsearch-server.version')}"
+}
diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClient.java
similarity index 50%
rename from http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java
rename to elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClient.java
index 31e5d95..c8faf35 100644
--- a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClient.java
@@ -1,4 +1,4 @@
-package org.xbib.elasticsearch.client.http;
+package org.xbib.elx.http;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -13,21 +13,16 @@ import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
-import org.elasticsearch.node.Node;
import org.elasticsearch.threadpool.ThreadPool;
-import org.xbib.elasticsearch.client.AbstractClient;
-import org.xbib.elasticsearch.client.BulkControl;
-import org.xbib.elasticsearch.client.BulkMetric;
+import org.xbib.elx.common.AbstractExtendedClient;
import org.xbib.netty.http.client.Client;
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.ServiceLoader;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -35,52 +30,44 @@ import java.util.stream.Stream;
/**
* Elasticsearch HTTP client.
*/
-public class HttpClient extends AbstractClient implements ElasticsearchClient {
+public class ExtendedHttpClient extends AbstractExtendedClient implements ElasticsearchClient {
- private static final Logger logger = LogManager.getLogger(HttpClient.class);
+ private static final Logger logger = LogManager.getLogger(ExtendedHttpClient.class);
- private Client client;
+ private Client nettyHttpClient;
- private NamedXContentRegistry registry;
+ private final ClassLoader classLoader;
+
+ private final NamedXContentRegistry registry;
@SuppressWarnings("rawtypes")
- private Map actionMap;
+ private final Map actionMap;
- private List urls;
+ private String url;
- //private ThreadPool threadPool;
-
- @Override
- public HttpClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) {
- init(client, settings, metric, control, null, Collections.emptyList());
- return this;
+ public ExtendedHttpClient(List namedXContentEntries, ClassLoader classLoader) {
+ this.registry = new NamedXContentRegistry(Stream.of(getNamedXContents().stream(),
+ namedXContentEntries.stream()).flatMap(Function.identity()).collect(Collectors.toList()));
+ this.classLoader = classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader();
+ this.actionMap = new HashMap<>();
}
+ @Override
@SuppressWarnings({"unchecked", "rawtypes"})
- private void init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control,
- ClassLoader classLoader, List namedXContentEntries) {
- //super.init(client, settings, metric, control);
- this.urls = settings.getAsList("urls");
- if (urls.isEmpty()) {
- throw new IllegalArgumentException("no urls given");
+ public ExtendedHttpClient init(Settings settings) throws IOException {
+ super.init(settings);
+ if (settings == null) {
+ return null;
}
- this.registry = new NamedXContentRegistry(Stream.of(getNamedXContents().stream(),
- namedXContentEntries.stream()
- ).flatMap(Function.identity()).collect(Collectors.toList()));
- this.actionMap = new HashMap<>();
- ServiceLoader httpActionServiceLoader = ServiceLoader.load(HttpAction.class,
- classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader());
+ this.url = settings.get("url");
+ ServiceLoader httpActionServiceLoader = ServiceLoader.load(HttpAction.class, classLoader);
for (HttpAction extends ActionRequest, ? extends ActionResponse> httpAction : httpActionServiceLoader) {
httpAction.setSettings(settings);
actionMap.put(httpAction.getActionInstance(), httpAction);
}
- this.client = Client.builder().enableDebug().build();
- Settings threadPoolsettings = Settings.builder()
- .put(settings)
- .put(Node.NODE_NAME_SETTING.getKey(), "httpclient")
- .build();
- //this.threadPool = threadPool != null ? threadPool : new ThreadPool(threadPoolsettings);
- logger.info("HTTP client initialized with {} actions", actionMap.size());
+ this.nettyHttpClient = Client.builder().enableDebug().build();
+ logger.info("extended HTTP client initialized with {} actions", actionMap.size());
+ return this;
}
private static List getNamedXContents() {
@@ -91,28 +78,23 @@ public class HttpClient extends AbstractClient implements ElasticsearchClient {
return registry;
}
- public static Builder builder() {
- return new Builder();
- }
-
public Client internalClient() {
- return client;
+ return nettyHttpClient;
}
@Override
- public ElasticsearchClient client() {
+ public ElasticsearchClient getClient() {
return this;
}
@Override
- protected ElasticsearchClient createClient(Settings settings) throws IOException {
+ protected ElasticsearchClient createClient(Settings settings) {
return this;
}
@Override
- public void shutdown() throws IOException {
- client.shutdownGracefully();
- //threadPool.close();
+ protected void closeClient() throws IOException {
+ nettyHttpClient.shutdownGracefully();
}
@Override
@@ -142,68 +124,22 @@ public class HttpClient extends AbstractClient implements ElasticsearchClient {
@Override
public ThreadPool threadPool() {
logger.info("returning null for threadPool() request");
- return null; //threadPool;
+ return null;
}
@SuppressWarnings({"unchecked", "rawtypes"})
- public >
+ private >
void doExecute(Action action, R request, ActionListener listener) {
HttpAction httpAction = actionMap.get(action);
if (httpAction == null) {
throw new IllegalStateException("failed to find http action [" + action + "] to execute");
}
- logger.info("http action = " + httpAction);
- String url = urls.get(0); // TODO
try {
- logger.info("submitting to URL {}", url);
HttpActionContext httpActionContext = new HttpActionContext(this, request, url);
httpAction.execute(httpActionContext, listener);
- logger.info("submitted to URL {}", url);
+ logger.debug("submitted to URL {}", url);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
-
- /**
- * The Builder for HTTP client.
- */
- public static class Builder {
-
- private final Settings.Builder settingsBuilder = Settings.builder();
-
- private ClassLoader classLoader;
-
- private List namedXContentEntries;
-
- private ThreadPool threadPool = null;
-
- public Builder settings(Settings settings) {
- this.settingsBuilder.put(settings);
- return this;
- }
-
- public Builder classLoader(ClassLoader classLoader) {
- this.classLoader = classLoader;
- return this;
- }
-
- public Builder namedXContentEntries(List namedXContentEntries) {
- this.namedXContentEntries = namedXContentEntries;
- return this;
- }
-
- public Builder threadPool(ThreadPool threadPool) {
- this.threadPool = threadPool;
- return this;
- }
-
- @SuppressWarnings({"unchecked", "rawtypes"})
- public HttpClient build() {
- Settings settings = settingsBuilder.build();
- HttpClient httpClient = new HttpClient();
- httpClient.init(null, settings, null, null,
- classLoader, namedXContentEntries);
- return httpClient;
- }
- }
}
diff --git a/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClientProvider.java b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClientProvider.java
new file mode 100644
index 0000000..628ba4f
--- /dev/null
+++ b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClientProvider.java
@@ -0,0 +1,12 @@
+package org.xbib.elx.http;
+
+import org.xbib.elx.api.ExtendedClientProvider;
+
+import java.util.Collections;
+
+public class ExtendedHttpClientProvider implements ExtendedClientProvider {
+ @Override
+ public ExtendedHttpClient getExtendedClient() {
+ return new ExtendedHttpClient(Collections.emptyList(), Thread.currentThread().getContextClassLoader());
+ }
+}
diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java b/elx-http/src/main/java/org/xbib/elx/http/HttpAction.java
similarity index 95%
rename from http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/HttpAction.java
index 674ee6d..844dae3 100644
--- a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/HttpAction.java
@@ -1,4 +1,4 @@
-package org.xbib.elasticsearch.client.http;
+package org.xbib.elx.http;
import io.netty.buffer.ByteBuf;
import io.netty.handler.codec.http.HttpHeaderNames;
@@ -68,7 +68,7 @@ public abstract class HttpAction {
- private final HttpClient httpClient;
+ private final ExtendedHttpClient extendedHttpClient;
private final R request;
@@ -23,14 +23,14 @@ public class HttpActionContext extends BaseFuture implements ActionFuture, ActionListener {
private Transport httpClientTransport;
diff --git a/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/health/HttpClusterHealthAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/health/HttpClusterHealthAction.java
new file mode 100644
index 0000000..5ed27be
--- /dev/null
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/health/HttpClusterHealthAction.java
@@ -0,0 +1,135 @@
+package org.xbib.elx.http.action.admin.cluster.health;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.health.ClusterIndexHealth;
+import org.elasticsearch.common.CheckedFunction;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.xbib.elx.http.HttpAction;
+import org.xbib.netty.http.client.RequestBuilder;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static java.util.Collections.emptyMap;
+import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
+
+public class HttpClusterHealthAction extends HttpAction {
+
+ @Override
+ public ClusterHealthAction getActionInstance() {
+ return ClusterHealthAction.INSTANCE;
+ }
+
+ @Override
+ protected RequestBuilder createHttpRequest(String url, ClusterHealthRequest request) {
+ return newPutRequest(url, "/_cluster/health");
+ }
+
+ @Override
+ protected CheckedFunction entityParser() {
+ throw new UnsupportedOperationException();
+ }
+
+ private static final String CLUSTER_NAME = "cluster_name";
+ private static final String STATUS = "status";
+ private static final String TIMED_OUT = "timed_out";
+ private static final String NUMBER_OF_NODES = "number_of_nodes";
+ private static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes";
+ private static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks";
+ private static final String NUMBER_OF_IN_FLIGHT_FETCH = "number_of_in_flight_fetch";
+ private static final String DELAYED_UNASSIGNED_SHARDS = "delayed_unassigned_shards";
+ private static final String TASK_MAX_WAIT_TIME_IN_QUEUE = "task_max_waiting_in_queue";
+ private static final String TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = "task_max_waiting_in_queue_millis";
+ private static final String ACTIVE_SHARDS_PERCENT_AS_NUMBER = "active_shards_percent_as_number";
+ private static final String ACTIVE_SHARDS_PERCENT = "active_shards_percent";
+ private static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards";
+ private static final String ACTIVE_SHARDS = "active_shards";
+ private static final String RELOCATING_SHARDS = "relocating_shards";
+ private static final String INITIALIZING_SHARDS = "initializing_shards";
+ private static final String UNASSIGNED_SHARDS = "unassigned_shards";
+ private static final String INDICES = "indices";
+
+ private static final ConstructingObjectParser PARSER =
+ new ConstructingObjectParser<>("cluster_health_response", true,
+ parsedObjects -> {
+ int i = 0;
+ // ClusterStateHealth fields
+ int numberOfNodes = (int) parsedObjects[i++];
+ int numberOfDataNodes = (int) parsedObjects[i++];
+ int activeShards = (int) parsedObjects[i++];
+ int relocatingShards = (int) parsedObjects[i++];
+ int activePrimaryShards = (int) parsedObjects[i++];
+ int initializingShards = (int) parsedObjects[i++];
+ int unassignedShards = (int) parsedObjects[i++];
+ double activeShardsPercent = (double) parsedObjects[i++];
+ String statusStr = (String) parsedObjects[i++];
+ ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr);
+ @SuppressWarnings("unchecked") List indexList =
+ (List) parsedObjects[i++];
+ final Map indices;
+ if (indexList == null || indexList.isEmpty()) {
+ indices = emptyMap();
+ } else {
+ indices = new HashMap<>(indexList.size());
+ for (ClusterIndexHealth indexHealth : indexList) {
+ indices.put(indexHealth.getIndex(), indexHealth);
+ }
+ }
+ /*ClusterStateHealth stateHealth = new ClusterStateHealth(activePrimaryShards, activeShards, relocatingShards,
+ initializingShards, unassignedShards, numberOfNodes, numberOfDataNodes, activeShardsPercent, status,
+ indices);*/
+ //ClusterState clusterState = new ClusterState();
+ //ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices);
+
+ // ClusterHealthResponse fields
+ String clusterName = (String) parsedObjects[i++];
+ int numberOfPendingTasks = (int) parsedObjects[i++];
+ int numberOfInFlightFetch = (int) parsedObjects[i++];
+ int delayedUnassignedShards = (int) parsedObjects[i++];
+ long taskMaxWaitingTimeMillis = (long) parsedObjects[i++];
+ boolean timedOut = (boolean) parsedObjects[i];
+
+ return new ClusterHealthResponse(clusterName, null, null, numberOfPendingTasks,
+ numberOfInFlightFetch, delayedUnassignedShards,
+ TimeValue.timeValueMillis(taskMaxWaitingTimeMillis));
+ /*return new ClusterHealthResponse(clusterName, numberOfPendingTasks, numberOfInFlightFetch,
+ delayedUnassignedShards,
+ TimeValue.timeValueMillis(taskMaxWaitingTimeMillis), timedOut, stateHealth);*/
+ });
+
+
+ // private static final ObjectParser.NamedObjectParser INDEX_PARSER =
+ // (XContentParser parser, Void context, String index) -> ClusterIndexHealth.innerFromXContent(parser, index);
+
+ static {
+ // ClusterStateHealth fields
+ PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_NODES));
+ PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_DATA_NODES));
+ PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS));
+ PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS));
+ PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS));
+ PARSER.declareInt(constructorArg(), new ParseField(INITIALIZING_SHARDS));
+ PARSER.declareInt(constructorArg(), new ParseField(UNASSIGNED_SHARDS));
+ PARSER.declareDouble(constructorArg(), new ParseField(ACTIVE_SHARDS_PERCENT_AS_NUMBER));
+ PARSER.declareString(constructorArg(), new ParseField(STATUS));
+ // Can be absent if LEVEL == 'cluster'
+ //PARSER.declareNamedObjects(optionalConstructorArg(), INDEX_PARSER, new ParseField(INDICES));
+
+ // ClusterHealthResponse fields
+ PARSER.declareString(constructorArg(), new ParseField(CLUSTER_NAME));
+ PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_PENDING_TASKS));
+ PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_IN_FLIGHT_FETCH));
+ PARSER.declareInt(constructorArg(), new ParseField(DELAYED_UNASSIGNED_SHARDS));
+ PARSER.declareLong(constructorArg(), new ParseField(TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS));
+ PARSER.declareBoolean(constructorArg(), new ParseField(TIMED_OUT));
+ }
+
+}
diff --git a/http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/node/info/HttpNodesInfoAction.java
similarity index 87%
rename from http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/node/info/HttpNodesInfoAction.java
index e50358b..619f80a 100644
--- a/http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/node/info/HttpNodesInfoAction.java
@@ -1,8 +1,13 @@
-package org.elasticsearch.action.admin.cluster.node.info;
+package org.xbib.elx.http.action.admin.cluster.node.info;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.CheckedFunction;
@@ -16,8 +21,8 @@ import org.elasticsearch.monitor.os.OsInfo;
import org.elasticsearch.monitor.process.ProcessInfo;
import org.elasticsearch.threadpool.ThreadPoolInfo;
import org.elasticsearch.transport.TransportInfo;
-import org.xbib.elasticsearch.client.http.HttpAction;
-import org.xbib.elasticsearch.client.http.HttpActionContext;
+import org.xbib.elx.http.HttpAction;
+import org.xbib.elx.http.HttpActionContext;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
@@ -106,11 +111,11 @@ public class HttpNodesInfoAction extends HttpAction map2 = (Map) entry.getValue();
- String nodeName = (String)map2.get("name");
- String hostName = (String)map2.get("host");
- String hostAddress = (String)map2.get("ip");
+ String nodeName = (String) map2.get("name");
+ String hostName = (String) map2.get("host");
+ String hostAddress = (String) map2.get("ip");
// [/][:]
- String transportAddressString = (String)map2.get("transport_address");
+ String transportAddressString = (String) map2.get("transport_address");
int pos = transportAddressString.indexOf(':');
String host = pos > 0 ? transportAddressString.substring(0, pos) : transportAddressString;
int port = Integer.parseInt(pos > 0 ? transportAddressString.substring(pos + 1) : "0");
@@ -121,8 +126,8 @@ public class HttpNodesInfoAction extends HttpAction attributes = Collections.emptyMap();
Set roles = new HashSet<>();
Version version = Version.fromString((String) map2.get("version"));
diff --git a/http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java
similarity index 79%
rename from http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java
index b66675c..f5d9631 100644
--- a/http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java
@@ -1,11 +1,14 @@
-package org.elasticsearch.action.admin.cluster.settings;
+package org.xbib.elx.http.action.admin.cluster.settings;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
@@ -13,9 +16,6 @@ import java.io.UncheckedIOException;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-/**
- *
- */
public class HttpClusterUpdateSettingsAction extends HttpAction {
@Override
@@ -41,9 +41,6 @@ public class HttpClusterUpdateSettingsAction extends HttpAction entityParser() {
- return parser -> {
- // TODO(jprante)
- return new ClusterUpdateSettingsResponse();
- };
+ return ClusterUpdateSettingsResponse::fromXContent;
}
}
diff --git a/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/state/HttpClusterStateAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/state/HttpClusterStateAction.java
new file mode 100644
index 0000000..5adf01c
--- /dev/null
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/state/HttpClusterStateAction.java
@@ -0,0 +1,29 @@
+package org.xbib.elx.http.action.admin.cluster.state;
+
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.common.CheckedFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.xbib.elx.http.HttpAction;
+import org.xbib.netty.http.client.RequestBuilder;
+
+import java.io.IOException;
+
+public class HttpClusterStateAction extends HttpAction {
+
+ @Override
+ public ClusterStateAction getActionInstance() {
+ return ClusterStateAction.INSTANCE;
+ }
+
+ @Override
+ protected RequestBuilder createHttpRequest(String url, ClusterStateRequest request) {
+ return newPutRequest(url, "/_cluster/state");
+ }
+
+ @Override
+ protected CheckedFunction entityParser() {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/create/HttpCreateIndexAction.java
similarity index 75%
rename from http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/create/HttpCreateIndexAction.java
index da64f8b..1b9410b 100644
--- a/http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/create/HttpCreateIndexAction.java
@@ -1,12 +1,15 @@
-package org.elasticsearch.action.admin.indices.create;
+package org.xbib.elx.http.action.admin.indices.create;
+import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
@@ -27,9 +30,6 @@ public class HttpCreateIndexAction extends HttpAction entityParser() {
- return parser -> {
- // TODO(jprante) build real create index response
- return new CreateIndexResponse();
- };
+ return CreateIndexResponse::fromXContent;
}
}
diff --git a/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/delete/HttpDeleteIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/delete/HttpDeleteIndexAction.java
new file mode 100644
index 0000000..c791444
--- /dev/null
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/delete/HttpDeleteIndexAction.java
@@ -0,0 +1,29 @@
+package org.xbib.elx.http.action.admin.indices.delete;
+
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.common.CheckedFunction;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.xbib.elx.http.HttpAction;
+import org.xbib.netty.http.client.RequestBuilder;
+
+import java.io.IOException;
+
+public class HttpDeleteIndexAction extends HttpAction {
+
+ @Override
+ public DeleteIndexAction getActionInstance() {
+ return DeleteIndexAction.INSTANCE;
+ }
+
+ @Override
+ protected RequestBuilder createHttpRequest(String url, DeleteIndexRequest deleteIndexRequest) {
+ return newPutRequest(url, "/" + String.join(",", deleteIndexRequest.indices()));
+ }
+
+ @Override
+ protected CheckedFunction entityParser() {
+ return DeleteIndexResponse::fromXContent;
+ }
+}
diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/refresh/HttpRefreshIndexAction.java
similarity index 68%
rename from http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/refresh/HttpRefreshIndexAction.java
index 88f76ea..a6e37c5 100644
--- a/http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/refresh/HttpRefreshIndexAction.java
@@ -1,15 +1,15 @@
-package org.elasticsearch.action.admin.indices.refresh;
+package org.xbib.elx.http.action.admin.indices.refresh;
+import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
-/**
- *
- */
public class HttpRefreshIndexAction extends HttpAction {
@Override
@@ -25,6 +25,6 @@ public class HttpRefreshIndexAction extends HttpAction entityParser() {
- return parser -> new RefreshResponse();
+ return RefreshResponse::fromXContent;
}
}
diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/settings/put/HttpUpdateSettingsAction.java
similarity index 71%
rename from http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/settings/put/HttpUpdateSettingsAction.java
index b8facce..f6dc7e8 100644
--- a/http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/settings/put/HttpUpdateSettingsAction.java
@@ -1,21 +1,21 @@
-package org.elasticsearch.action.admin.indices.settings.put;
+package org.xbib.elx.http.action.admin.indices.settings.put;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
import java.io.UncheckedIOException;
-import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
-/**
- *
- */
public class HttpUpdateSettingsAction extends HttpAction {
@Override
@@ -26,7 +26,7 @@ public class HttpUpdateSettingsAction extends HttpAction entityParser() {
- return parser -> new UpdateSettingsResponse();
+ return UpdateSettingsResponse::fromXContent;
}
}
diff --git a/http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/bulk/HttpBulkAction.java
similarity index 93%
rename from http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/bulk/HttpBulkAction.java
index 050d608..6a07321 100644
--- a/http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/bulk/HttpBulkAction.java
@@ -1,18 +1,18 @@
-package org.elasticsearch.action.bulk;
+package org.xbib.elx.http.action.bulk;
import org.elasticsearch.action.DocWriteRequest;
+import org.elasticsearch.action.bulk.BulkAction;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
-/**
- *
- */
public class HttpBulkAction extends HttpAction {
@Override
diff --git a/http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpExistsAction.java
similarity index 78%
rename from http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/get/HttpExistsAction.java
index fd2443e..bb1d5df 100644
--- a/http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpExistsAction.java
@@ -1,9 +1,12 @@
-package org.elasticsearch.action.get;
+package org.xbib.elx.http.action.get;
import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.get.GetAction;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
diff --git a/http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpGetAction.java
similarity index 78%
rename from http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/get/HttpGetAction.java
index 3a72116..b700961 100644
--- a/http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpGetAction.java
@@ -1,9 +1,12 @@
-package org.elasticsearch.action.get;
+package org.xbib.elx.http.action.get;
import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.get.GetAction;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
diff --git a/http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/index/HttpIndexAction.java
similarity index 78%
rename from http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/index/HttpIndexAction.java
index 5352682..be7aba2 100644
--- a/http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/index/HttpIndexAction.java
@@ -1,9 +1,12 @@
-package org.elasticsearch.action.index;
+package org.xbib.elx.http.action.index;
import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
diff --git a/http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/main/HttpMainAction.java
similarity index 76%
rename from http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/main/HttpMainAction.java
index ee5dc8c..0ee995b 100644
--- a/http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/main/HttpMainAction.java
@@ -1,15 +1,16 @@
-package org.elasticsearch.action.main;
+package org.xbib.elx.http.action.main;
import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.main.MainAction;
+import org.elasticsearch.action.main.MainRequest;
+import org.elasticsearch.action.main.MainResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
-/**
- */
public class HttpMainAction extends HttpAction {
@Override
diff --git a/http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/search/HttpSearchAction.java
similarity index 71%
rename from http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/search/HttpSearchAction.java
index 4c637b7..0cd6a15 100644
--- a/http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/search/HttpSearchAction.java
@@ -1,15 +1,15 @@
-package org.elasticsearch.action.search;
+package org.xbib.elx.http.action.search;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
-/**
- *
- */
public class HttpSearchAction extends HttpAction {
@Override
@@ -20,14 +20,11 @@ public class HttpSearchAction extends HttpAction
@Override
protected RequestBuilder createHttpRequest(String url, SearchRequest request) {
String index = request.indices() != null ? "/" + String.join(",", request.indices()) : "";
- return newPostRequest(url, index + "/_search", request.source().toString() );
+ return newPostRequest(url, index + "/_search", request.source().toString());
}
@Override
protected CheckedFunction entityParser() {
- return parser -> {
- // TODO(jprante) build search response
- return new SearchResponse();
- };
+ return SearchResponse::fromXContent;
}
}
diff --git a/http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/update/HttpUpdateAction.java
similarity index 85%
rename from http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java
rename to elx-http/src/main/java/org/xbib/elx/http/action/update/HttpUpdateAction.java
index c703075..134dbb8 100644
--- a/http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/action/update/HttpUpdateAction.java
@@ -1,13 +1,16 @@
-package org.elasticsearch.action.update;
+package org.xbib.elx.http.action.update;
import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.update.UpdateAction;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
-import org.xbib.elasticsearch.client.http.HttpAction;
+import org.xbib.elx.http.HttpAction;
import org.xbib.netty.http.client.RequestBuilder;
import java.io.IOException;
@@ -35,8 +38,8 @@ public class HttpUpdateAction extends HttpAction
if (updateRequest.upsertRequest() != null) {
XContentType upsertContentType = updateRequest.upsertRequest().getContentType();
if ((xContentType != null) && (xContentType != upsertContentType)) {
- throw new IllegalStateException("update request cannot have different content types for doc [" + xContentType + "]" +
- " and upsert [" + upsertContentType + "] documents");
+ throw new IllegalStateException("update request cannot have different content types for doc [" +
+ xContentType + "]" + " and upsert [" + upsertContentType + "] documents");
} else {
xContentType = upsertContentType;
}
diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java b/elx-http/src/main/java/org/xbib/elx/http/package-info.java
similarity index 53%
rename from http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java
rename to elx-http/src/main/java/org/xbib/elx/http/package-info.java
index a9c3ded..ef5876c 100644
--- a/http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java
+++ b/elx-http/src/main/java/org/xbib/elx/http/package-info.java
@@ -1,4 +1,4 @@
/**
* Classes for Elasticsearch HTTP client.
*/
-package org.xbib.elasticsearch.client.http;
+package org.xbib.elx.http;
diff --git a/elx-http/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider
new file mode 100644
index 0000000..0c75f14
--- /dev/null
+++ b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider
@@ -0,0 +1 @@
+org.xbib.elx.http.ExtendedHttpClientProvider
\ No newline at end of file
diff --git a/elx-http/src/main/resources/META-INF/services/org.xbib.elx.http.HttpAction b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.http.HttpAction
new file mode 100644
index 0000000..4d35ec6
--- /dev/null
+++ b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.http.HttpAction
@@ -0,0 +1,12 @@
+org.xbib.elx.http.action.admin.cluster.node.info.HttpNodesInfoAction
+org.xbib.elx.http.action.admin.cluster.settings.HttpClusterUpdateSettingsAction
+org.xbib.elx.http.action.admin.indices.create.HttpCreateIndexAction
+org.xbib.elx.http.action.admin.indices.delete.HttpDeleteIndexAction
+org.xbib.elx.http.action.admin.indices.refresh.HttpRefreshIndexAction
+org.xbib.elx.http.action.admin.indices.settings.put.HttpUpdateSettingsAction
+org.xbib.elx.http.action.bulk.HttpBulkAction
+org.xbib.elx.http.action.index.HttpIndexAction
+org.xbib.elx.http.action.search.HttpSearchAction
+org.xbib.elx.http.action.main.HttpMainAction
+org.xbib.elx.http.action.get.HttpExistsAction
+org.xbib.elx.http.action.get.HttpGetAction
diff --git a/http/src/main/resources/extra-security.policy b/elx-http/src/main/resources/extra-security.policy
similarity index 100%
rename from http/src/main/resources/extra-security.policy
rename to elx-http/src/main/resources/extra-security.policy
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/ClientTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/ClientTest.java
new file mode 100644
index 0000000..920dd5e
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/ClientTest.java
@@ -0,0 +1,207 @@
+package org.xbib.elx.http.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.common.Parameters;
+import org.xbib.elx.http.ExtendedHttpClient;
+import org.xbib.elx.http.ExtendedHttpClientProvider;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+@Ignore
+public class ClientTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(ClientTest.class.getSimpleName());
+
+ private static final Long ACTIONS = 25000L;
+
+ private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
+
+ @Before
+ public void startNodes() {
+ try {
+ super.startNodes();
+ startNode("2");
+ } catch (Throwable t) {
+ logger.error("startNodes failed", t);
+ }
+ }
+
+ @Test
+ public void testSingleDoc() throws Exception {
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(30))
+ .build();
+ try {
+ client.newIndex("test");
+ client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ client.close();
+ }
+ }
+
+ @Test
+ public void testNewIndex() throws Exception {
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5))
+ .build();
+ client.newIndex("test");
+ client.close();
+ }
+
+ @Test
+ public void testMapping() throws Exception {
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5))
+ .build();
+ XContentBuilder builder = jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .startObject("properties")
+ .startObject("location")
+ .field("type", "geo_point")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client.newIndex("test", Settings.EMPTY, Strings.toString(builder));
+ GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test");
+ GetMappingsResponse getMappingsResponse =
+ client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
+ logger.info("mappings={}", getMappingsResponse.getMappings());
+ assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc"));
+ client.close();
+ }
+
+ @Test
+ public void testRandomDocs() throws Exception {
+ long numactions = ACTIONS;
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60))
+ .build();
+ try {
+ client.newIndex("test");
+ for (int i = 0; i < ACTIONS; i++) {
+ client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } catch (Exception e) {
+ logger.error(e.getMessage(), e);
+ } finally {
+ assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ client.refreshIndex("test");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setQuery(QueryBuilders.matchAllQuery()).setSize(0);
+ assertEquals(numactions,
+ searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
+ client.close();
+ }
+ }
+
+ @Test
+ public void testThreadedRandomDocs() throws Exception {
+ int maxthreads = Runtime.getRuntime().availableProcessors();
+ Long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST;
+ final Long actions = ACTIONS;
+ logger.info("maxthreads={} maxactions={} maxloop={}", maxthreads, maxActionsPerRequest, actions);
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .put(Parameters.MAX_CONCURRENT_REQUESTS.name(), maxthreads * 2)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), maxActionsPerRequest)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60))
+ .build();
+ try {
+ Settings settings = Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build();
+ client.newIndex("test", settings, (String)null)
+ .startBulk("test", 0, 1000);
+ logger.info("index created");
+ ExecutorService executorService = Executors.newFixedThreadPool(maxthreads);
+ final CountDownLatch latch = new CountDownLatch(maxthreads);
+ for (int i = 0; i < maxthreads; i++) {
+ executorService.execute(() -> {
+ for (int i1 = 0; i1 < actions; i1++) {
+ client.index("test", null, false,"{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ latch.countDown();
+ });
+ }
+ logger.info("waiting for latch...");
+ if (latch.await(60L, TimeUnit.SECONDS)) {
+ logger.info("flush...");
+ client.flush();
+ client.waitForResponses(60L, TimeUnit.SECONDS);
+ logger.info("got all responses, executor service shutdown...");
+ executorService.shutdown();
+ executorService.awaitTermination(60L, TimeUnit.SECONDS);
+ logger.info("pool is shut down");
+ } else {
+ logger.warn("latch timeout");
+ }
+ client.stopBulk("test", 30L, TimeUnit.SECONDS);
+ assertEquals(maxthreads * actions, client.getBulkMetric().getSucceeded().getCount());
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } catch (Exception e) {
+ logger.error(e.getMessage(), e);
+ } finally {
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ client.refreshIndex("test");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setQuery(QueryBuilders.matchAllQuery()).setSize(0);
+ assertEquals(maxthreads * actions,
+ searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
+ client.close();
+ }
+ }
+}
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/DuplicateIDTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/DuplicateIDTest.java
new file mode 100644
index 0000000..65745ce
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/DuplicateIDTest.java
@@ -0,0 +1,64 @@
+package org.xbib.elx.http.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.common.Parameters;
+import org.xbib.elx.http.ExtendedHttpClient;
+import org.xbib.elx.http.ExtendedHttpClientProvider;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+@Ignore
+public class DuplicateIDTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(DuplicateIDTest.class.getSimpleName());
+
+ private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
+
+ private static final Long ACTIONS = 12345L;
+
+ @Test
+ public void testDuplicateDocIDs() throws Exception {
+ long numactions = ACTIONS;
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST)
+ .build();
+ try {
+ client.newIndex("test");
+ for (int i = 0; i < ACTIONS; i++) {
+ client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ client.refreshIndex("test");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setIndices("test")
+ .setTypes("test")
+ .setQuery(matchAllQuery());
+ long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
+ logger.info("hits = {}", hits);
+ assertTrue(hits < ACTIONS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+}
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/IndexShiftTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/IndexShiftTest.java
new file mode 100644
index 0000000..7317978
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/IndexShiftTest.java
@@ -0,0 +1,111 @@
+package org.xbib.elx.http.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.xbib.elx.api.IndexShiftResult;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.http.ExtendedHttpClient;
+import org.xbib.elx.http.ExtendedHttpClientProvider;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+@Ignore
+public class IndexShiftTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(IndexShiftTest.class.getSimpleName());
+
+ @Test
+ public void testIndexShift() throws Exception {
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .build();
+ try {
+ Settings settings = Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build();
+ client.newIndex("test1234", settings);
+ for (int i = 0; i < 1; i++) {
+ client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+
+ IndexShiftResult indexShiftResult =
+ client.shiftIndex("test", "test1234", Arrays.asList("a", "b", "c"));
+
+ assertTrue(indexShiftResult.getNewAliases().contains("a"));
+ assertTrue(indexShiftResult.getNewAliases().contains("b"));
+ assertTrue(indexShiftResult.getNewAliases().contains("c"));
+ assertTrue(indexShiftResult.getMovedAliases().isEmpty());
+
+ Map aliases = client.getAliases("test1234");
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("test"));
+
+ String resolved = client.resolveAlias("test");
+ aliases = client.getAliases(resolved);
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("test"));
+
+ client.newIndex("test5678", settings);
+ for (int i = 0; i < 1; i++) {
+ client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+
+ indexShiftResult = client.shiftIndex("test", "test5678", Arrays.asList("d", "e", "f"),
+ (request, index, alias) -> request.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(index).alias(alias).filter(QueryBuilders.termQuery("my_key", alias)))
+ );
+ assertTrue(indexShiftResult.getNewAliases().contains("d"));
+ assertTrue(indexShiftResult.getNewAliases().contains("e"));
+ assertTrue(indexShiftResult.getNewAliases().contains("f"));
+ assertTrue(indexShiftResult.getMovedAliases().contains("a"));
+ assertTrue(indexShiftResult.getMovedAliases().contains("b"));
+ assertTrue(indexShiftResult.getMovedAliases().contains("c"));
+
+ aliases = client.getAliases("test5678");
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("d"));
+ assertTrue(aliases.containsKey("e"));
+ assertTrue(aliases.containsKey("f"));
+
+ resolved = client.resolveAlias("test");
+ aliases = client.getAliases(resolved);
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("d"));
+ assertTrue(aliases.containsKey("e"));
+ assertTrue(aliases.containsKey("f"));
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+}
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/MockNode.java b/elx-http/src/test/java/org/xbib/elx/http/test/MockNode.java
new file mode 100644
index 0000000..fc62993
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/MockNode.java
@@ -0,0 +1,15 @@
+package org.xbib.elx.http.test;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.InternalSettingsPreparer;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.Plugin;
+
+import java.util.List;
+
+public class MockNode extends Node {
+
+ public MockNode(Settings settings, List> classpathPlugins) {
+ super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins);
+ }
+}
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/ReplicaTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/ReplicaTest.java
new file mode 100644
index 0000000..c9037ca
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/ReplicaTest.java
@@ -0,0 +1,151 @@
+package org.xbib.elx.http.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.IndexingStats;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.http.ExtendedHttpClient;
+import org.xbib.elx.http.ExtendedHttpClientProvider;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+@Ignore
+public class ReplicaTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(ReplicaTest.class.getSimpleName());
+
+ @Test
+ public void testReplicaLevel() throws Exception {
+
+ // we need nodes for replica levels
+ startNode("2");
+ startNode("3");
+ startNode("4");
+
+ Settings settingsTest1 = Settings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 3)
+ .build();
+
+ Settings settingsTest2 = Settings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build();
+
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .build();
+
+ try {
+ client.newIndex("test1", settingsTest1, new HashMap<>())
+ .newIndex("test2", settingsTest2, new HashMap<>());
+ client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
+ for (int i = 0; i < 1234; i++) {
+ client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ for (int i = 0; i < 1234; i++) {
+ client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ logger.info("refreshing");
+ client.refreshIndex("test1");
+ client.refreshIndex("test2");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setIndices("test1", "test2")
+ .setQuery(matchAllQuery());
+ long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
+ logger.info("query total hits={}", hits);
+ assertEquals(2468, hits);
+ IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.getClient(), IndicesStatsAction.INSTANCE)
+ .all();
+ IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet();
+ for (Map.Entry m : response.getIndices().entrySet()) {
+ IndexStats indexStats = m.getValue();
+ CommonStats commonStats = indexStats.getTotal();
+ IndexingStats indexingStats = commonStats.getIndexing();
+ IndexingStats.Stats stats = indexingStats.getTotal();
+ logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount());
+ for (Map.Entry me : indexStats.getIndexShards().entrySet()) {
+ IndexShardStats indexShardStats = me.getValue();
+ CommonStats commonShardStats = indexShardStats.getTotal();
+ logger.info("shard {} count = {}", me.getKey(),
+ commonShardStats.getIndexing().getTotal().getIndexCount());
+ }
+ }
+ try {
+ client.deleteIndex("test1")
+ .deleteIndex("test2");
+ } catch (Exception e) {
+ logger.error("delete index failed, ignored. Reason:", e);
+ }
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+
+ @Test
+ public void testUpdateReplicaLevel() throws Exception {
+
+ long numberOfShards = 2;
+ int replicaLevel = 3;
+
+ // we need 3 nodes for replica level 3
+ startNode("2");
+ startNode("3");
+
+ Settings settings = Settings.builder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .build();
+
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .build();
+
+ try {
+ client.newIndex("replicatest", settings, new HashMap<>());
+ client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
+ for (int i = 0; i < 12345; i++) {
+ client.index("replicatest",null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ client.updateReplicaLevel("replicatest", replicaLevel, 30L, TimeUnit.SECONDS);
+ assertEquals(replicaLevel, client.getReplicaLevel("replicatest"));
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+
+}
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/SmokeTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/SmokeTest.java
new file mode 100644
index 0000000..30dc44a
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/SmokeTest.java
@@ -0,0 +1,71 @@
+package org.xbib.elx.http.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.settings.Settings;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.xbib.elx.api.IndexDefinition;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.http.ExtendedHttpClient;
+import org.xbib.elx.http.ExtendedHttpClientProvider;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+@Ignore
+public class SmokeTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(SmokeTest.class.getSimpleName());
+
+ @Test
+ public void smokeTest() throws Exception {
+ final ExtendedHttpClient client = ClientBuilder.builder()
+ .provider(ExtendedHttpClientProvider.class)
+ .build();
+ try {
+ client.newIndex("test");
+ client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
+ client.flush();
+ client.waitForResponses(30, TimeUnit.SECONDS);
+
+ assertEquals(getClusterName(), client.getClusterName());
+
+ client.checkMapping("test");
+
+ client.update("test", "1", "{ \"name\" : \"Another name\"}");
+ client.flush();
+
+ client.waitForRecovery("test", 10L, TimeUnit.SECONDS);
+
+ client.delete("test", "1");
+ client.deleteIndex("test");
+
+ IndexDefinition indexDefinition = client.buildIndexDefinitionFromSettings("test", Settings.builder()
+ .build());
+ assertEquals(0, indexDefinition.getReplicaLevel());
+ client.newIndex(indexDefinition);
+ client.index(indexDefinition.getFullIndexName(), "1", true, "{ \"name\" : \"Hello World\"}");
+ client.flush();
+ client.updateReplicaLevel(indexDefinition, 2);
+
+ int replica = client.getReplicaLevel(indexDefinition);
+ assertEquals(2, replica);
+
+ client.deleteIndex(indexDefinition);
+ assertEquals(0, client.getBulkMetric().getFailed().getCount());
+ assertEquals(4, client.getBulkMetric().getSucceeded().getCount());
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+}
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/TestBase.java b/elx-http/src/test/java/org/xbib/elx/http/test/TestBase.java
new file mode 100644
index 0000000..d15a279
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/TestBase.java
@@ -0,0 +1,198 @@
+package org.xbib.elx.http.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.analysis.common.CommonAnalysisPlugin;
+import org.elasticsearch.client.support.AbstractClient;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.transport.netty4.Netty4Plugin;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class TestBase {
+
+ private static final Logger logger = LogManager.getLogger("test");
+
+ private static final Random random = new Random();
+
+ private static final char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray();
+
+ private Map nodes = new HashMap<>();
+
+ private Map clients = new HashMap<>();
+
+ private AtomicInteger counter = new AtomicInteger();
+
+ private String cluster;
+
+ private String host;
+
+ private int port;
+
+ @Before
+ public void startNodes() {
+ try {
+ logger.info("starting");
+ this.cluster = "test-helper-cluster-" + counter.incrementAndGet();
+ startNode("1");
+ findNodeAddress();
+ try {
+ ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE,
+ new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN)
+ .timeout(TimeValue.timeValueSeconds(30))).actionGet();
+ if (healthResponse != null && healthResponse.isTimedOut()) {
+ throw new IOException("cluster state is " + healthResponse.getStatus().name()
+ + ", from here on, everything will fail!");
+ }
+ } catch (ElasticsearchTimeoutException e) {
+ throw new IOException("cluster does not respond to health request, cowardly refusing to continue");
+ }
+ } catch (Throwable t) {
+ logger.error("startNodes failed", t);
+ }
+ }
+
+ @After
+ public void stopNodes() {
+ try {
+ closeNodes();
+ } catch (Exception e) {
+ logger.error("can not close nodes", e);
+ } finally {
+ try {
+ deleteFiles();
+ logger.info("data files wiped");
+ Thread.sleep(2000L); // let OS commit changes
+ } catch (IOException e) {
+ logger.error(e.getMessage(), e);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ }
+
+ protected Settings getTransportSettings() {
+ return Settings.builder()
+ .put("host", host)
+ .put("port", port)
+ .put("cluster.name", cluster)
+ .put("path.home", getHome())
+ .build();
+ }
+
+ protected Settings getNodeSettings() {
+ return Settings.builder()
+ .put("cluster.name", cluster)
+ .put("discovery.zen.minimum_master_nodes", "1")
+ .put("transport.type", Netty4Plugin.NETTY_TRANSPORT_NAME)
+ .put("node.max_local_storage_nodes", 10) // allow many nodes to initialize here
+ .put("path.home", getHome())
+ .build();
+ }
+
+ protected static String getHome() {
+ return System.getProperty("path.home", System.getProperty("user.dir"));
+ }
+
+ protected void startNode(String id) throws NodeValidationException {
+ buildNode(id).start();
+ }
+
+ protected AbstractClient client(String id) {
+ return clients.get(id);
+ }
+
+ protected String getClusterName() {
+ return cluster;
+ }
+
+ protected String randomString(int len) {
+ final char[] buf = new char[len];
+ final int n = numbersAndLetters.length - 1;
+ for (int i = 0; i < buf.length; i++) {
+ buf[i] = numbersAndLetters[random.nextInt(n)];
+ }
+ return new String(buf);
+ }
+
+ private void closeNodes() throws IOException {
+ logger.info("closing all clients");
+ for (AbstractClient client : clients.values()) {
+ client.close();
+ }
+ clients.clear();
+ logger.info("closing all nodes");
+ for (Node node : nodes.values()) {
+ if (node != null) {
+ node.close();
+ }
+ }
+ nodes.clear();
+ logger.info("all nodes closed");
+ }
+
+ private void findNodeAddress() {
+ NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true);
+ NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet();
+ TransportAddress address= response.getNodes().iterator().next().getTransport().getAddress()
+ .publishAddress();
+ host = address.address().getHostName();
+ port = address.address().getPort();
+ }
+
+ private Node buildNode(String id) {
+ Settings nodeSettings = Settings.builder()
+ .put(getNodeSettings())
+ .put("node.name", id)
+ .build();
+ List> plugins = Arrays.asList(CommonAnalysisPlugin.class, Netty4Plugin.class);
+ Node node = new MockNode(nodeSettings, plugins);
+ AbstractClient client = (AbstractClient) node.client();
+ nodes.put(id, node);
+ clients.put(id, client);
+ return node;
+ }
+
+ private static void deleteFiles() throws IOException {
+ Path directory = Paths.get(getHome() + "/data");
+ Files.walkFileTree(directory, new SimpleFileVisitor<>() {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ Files.delete(file);
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
+ Files.delete(dir);
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ }
+}
diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/package-info.java b/elx-http/src/test/java/org/xbib/elx/http/test/package-info.java
new file mode 100644
index 0000000..2bb05c9
--- /dev/null
+++ b/elx-http/src/test/java/org/xbib/elx/http/test/package-info.java
@@ -0,0 +1,4 @@
+/**
+ *
+ */
+package org.xbib.elx.http.test;
diff --git a/node/src/test/resources/log4j2.xml b/elx-http/src/test/resources/log4j2.xml
similarity index 72%
rename from node/src/test/resources/log4j2.xml
rename to elx-http/src/test/resources/log4j2.xml
index b175dfc..6c323f8 100644
--- a/node/src/test/resources/log4j2.xml
+++ b/elx-http/src/test/resources/log4j2.xml
@@ -2,11 +2,11 @@
-
+
-
+
diff --git a/elx-node/build.gradle b/elx-node/build.gradle
new file mode 100644
index 0000000..6f6191c
--- /dev/null
+++ b/elx-node/build.gradle
@@ -0,0 +1,5 @@
+dependencies {
+ compile project(':elx-common')
+ compile "org.xbib.elasticsearch:transport-netty4:${rootProject.property('elasticsearch-server.version')}"
+ testCompile "org.xbib.elasticsearch:elasticsearch-analysis-common:${rootProject.property('elasticsearch-server.version')}"
+}
diff --git a/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClient.java b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClient.java
new file mode 100644
index 0000000..d6e4963
--- /dev/null
+++ b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClient.java
@@ -0,0 +1,71 @@
+package org.xbib.elx.node;
+
+import io.netty.util.Version;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.transport.netty4.Netty4Utils;
+import org.xbib.elx.common.AbstractExtendedClient;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+
+public class ExtendedNodeClient extends AbstractExtendedClient {
+
+ private static final Logger logger = LogManager.getLogger(ExtendedNodeClient.class.getName());
+
+ private Node node;
+
+ @Override
+ protected ElasticsearchClient createClient(Settings settings) throws IOException {
+ if (settings == null) {
+ return null;
+ }
+ String version = System.getProperty("os.name")
+ + " " + System.getProperty("java.vm.name")
+ + " " + System.getProperty("java.vm.vendor")
+ + " " + System.getProperty("java.runtime.version")
+ + " " + System.getProperty("java.vm.version");
+ Settings effectiveSettings = Settings.builder().put(settings)
+ .put("node.client", true)
+ .put("node.master", false)
+ .put("node.data", false)
+ .build();
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ effectiveSettings.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("flat_settings", "true")));
+ logger.info("creating node client on {} with effective settings {}",
+ version, Strings.toString(builder));
+ Collection> plugins = Collections.emptyList();
+ this.node = new BulkNode(new Environment(effectiveSettings, null), plugins);
+ try {
+ node.start();
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ return node.client();
+ }
+
+ @Override
+ protected void closeClient() throws IOException {
+ if (node != null) {
+ logger.debug("closing node client");
+ node.close();
+ }
+ }
+
+ private static class BulkNode extends Node {
+
+ BulkNode(Environment env, Collection> classpathPlugins) {
+ super(env, classpathPlugins);
+ }
+ }
+}
diff --git a/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClientProvider.java b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClientProvider.java
new file mode 100644
index 0000000..46a4e9a
--- /dev/null
+++ b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClientProvider.java
@@ -0,0 +1,10 @@
+package org.xbib.elx.node;
+
+import org.xbib.elx.api.ExtendedClientProvider;
+
+public class ExtendedNodeClientProvider implements ExtendedClientProvider {
+ @Override
+ public ExtendedNodeClient getExtendedClient() {
+ return new ExtendedNodeClient();
+ }
+}
diff --git a/elx-node/src/main/java/org/xbib/elx/node/package-info.java b/elx-node/src/main/java/org/xbib/elx/node/package-info.java
new file mode 100644
index 0000000..1216a48
--- /dev/null
+++ b/elx-node/src/main/java/org/xbib/elx/node/package-info.java
@@ -0,0 +1,4 @@
+/**
+ *
+ */
+package org.xbib.elx.node;
diff --git a/elx-node/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider b/elx-node/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider
new file mode 100644
index 0000000..372aaad
--- /dev/null
+++ b/elx-node/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider
@@ -0,0 +1 @@
+org.xbib.elx.node.ExtendedNodeClientProvider
\ No newline at end of file
diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/ClientTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/ClientTest.java
new file mode 100644
index 0000000..8e14e21
--- /dev/null
+++ b/elx-node/src/test/java/org/xbib/elx/node/test/ClientTest.java
@@ -0,0 +1,205 @@
+package org.xbib.elx.node.test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.junit.Before;
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.common.Parameters;
+import org.xbib.elx.node.ExtendedNodeClient;
+import org.xbib.elx.node.ExtendedNodeClientProvider;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+public class ClientTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(ClientTest.class.getSimpleName());
+
+ private static final Long ACTIONS = 25000L;
+
+ private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
+
+ @Before
+ public void startNodes() {
+ try {
+ super.startNodes();
+ startNode("2");
+ } catch (Throwable t) {
+ logger.error("startNodes failed", t);
+ }
+ }
+
+ @Test
+ public void testSingleDoc() throws Exception {
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(30))
+ .build();
+ try {
+ client.newIndex("test");
+ client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ client.close();
+ }
+ }
+
+ @Test
+ public void testNewIndex() throws Exception {
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5))
+ .build();
+ client.newIndex("test");
+ client.close();
+ }
+
+ @Test
+ public void testMapping() throws Exception {
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5))
+ .build();
+ XContentBuilder builder = jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .startObject("properties")
+ .startObject("location")
+ .field("type", "geo_point")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ client.newIndex("test", Settings.EMPTY, Strings.toString(builder));
+ GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test");
+ GetMappingsResponse getMappingsResponse =
+ client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
+ logger.info("mappings={}", getMappingsResponse.getMappings());
+ assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc"));
+ client.close();
+ }
+
+ @Test
+ public void testRandomDocs() throws Exception {
+ long numactions = ACTIONS;
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60))
+ .build();
+ try {
+ client.newIndex("test");
+ for (int i = 0; i < ACTIONS; i++) {
+ client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } catch (Exception e) {
+ logger.error(e.getMessage(), e);
+ } finally {
+ assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ client.refreshIndex("test");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setQuery(QueryBuilders.matchAllQuery()).setSize(0);
+ assertEquals(numactions,
+ searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
+ client.close();
+ }
+ }
+
+ @Test
+ public void testThreadedRandomDocs() throws Exception {
+ int maxthreads = Runtime.getRuntime().availableProcessors();
+ Long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST;
+ final Long actions = ACTIONS;
+ logger.info("maxthreads={} maxactions={} maxloop={}", maxthreads, maxActionsPerRequest, actions);
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .put(Parameters.MAX_CONCURRENT_REQUESTS.name(), maxthreads * 2)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), maxActionsPerRequest)
+ .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60))
+ .build();
+ try {
+ Settings settings = Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build();
+ client.newIndex("test", settings, (String)null)
+ .startBulk("test", 0, 1000);
+ logger.info("index created");
+ ExecutorService executorService = Executors.newFixedThreadPool(maxthreads);
+ final CountDownLatch latch = new CountDownLatch(maxthreads);
+ for (int i = 0; i < maxthreads; i++) {
+ executorService.execute(() -> {
+ for (int i1 = 0; i1 < actions; i1++) {
+ client.index("test", null, false,"{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ latch.countDown();
+ });
+ }
+ logger.info("waiting for latch...");
+ if (latch.await(60L, TimeUnit.SECONDS)) {
+ logger.info("flush...");
+ client.flush();
+ client.waitForResponses(60L, TimeUnit.SECONDS);
+ logger.info("got all responses, executor service shutdown...");
+ executorService.shutdown();
+ executorService.awaitTermination(60L, TimeUnit.SECONDS);
+ logger.info("pool is shut down");
+ } else {
+ logger.warn("latch timeout");
+ }
+ client.stopBulk("test", 30L, TimeUnit.SECONDS);
+ assertEquals(maxthreads * actions, client.getBulkMetric().getSucceeded().getCount());
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } catch (Exception e) {
+ logger.error(e.getMessage(), e);
+ } finally {
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ client.refreshIndex("test");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setQuery(QueryBuilders.matchAllQuery()).setSize(0);
+ assertEquals(maxthreads * actions,
+ searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
+ client.close();
+ }
+ }
+}
diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/DuplicateIDTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/DuplicateIDTest.java
new file mode 100644
index 0000000..43d74fa
--- /dev/null
+++ b/elx-node/src/test/java/org/xbib/elx/node/test/DuplicateIDTest.java
@@ -0,0 +1,60 @@
+package org.xbib.elx.node.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.common.Parameters;
+import org.xbib.elx.node.ExtendedNodeClient;
+import org.xbib.elx.node.ExtendedNodeClientProvider;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.junit.Assert.*;
+
+public class DuplicateIDTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(DuplicateIDTest.class.getSimpleName());
+
+ private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
+
+ private static final Long ACTIONS = 12345L;
+
+ @Test
+ public void testDuplicateDocIDs() throws Exception {
+ long numactions = ACTIONS;
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST)
+ .build();
+ try {
+ client.newIndex("test");
+ for (int i = 0; i < ACTIONS; i++) {
+ client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ client.refreshIndex("test");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setIndices("test")
+ .setTypes("test")
+ .setQuery(matchAllQuery());
+ long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
+ logger.info("hits = {}", hits);
+ assertTrue(hits < ACTIONS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+}
diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/IndexShiftTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/IndexShiftTest.java
new file mode 100644
index 0000000..48cb3e8
--- /dev/null
+++ b/elx-node/src/test/java/org/xbib/elx/node/test/IndexShiftTest.java
@@ -0,0 +1,109 @@
+package org.xbib.elx.node.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.junit.Test;
+import org.xbib.elx.api.IndexShiftResult;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.node.ExtendedNodeClient;
+import org.xbib.elx.node.ExtendedNodeClientProvider;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+public class IndexShiftTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(IndexShiftTest.class.getSimpleName());
+
+ @Test
+ public void testIndexShift() throws Exception {
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .build();
+ try {
+ Settings settings = Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .build();
+ client.newIndex("test1234", settings);
+ for (int i = 0; i < 1; i++) {
+ client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+
+ IndexShiftResult indexShiftResult =
+ client.shiftIndex("test", "test1234", Arrays.asList("a", "b", "c"));
+
+ assertTrue(indexShiftResult.getNewAliases().contains("a"));
+ assertTrue(indexShiftResult.getNewAliases().contains("b"));
+ assertTrue(indexShiftResult.getNewAliases().contains("c"));
+ assertTrue(indexShiftResult.getMovedAliases().isEmpty());
+
+ Map aliases = client.getAliases("test1234");
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("test"));
+
+ String resolved = client.resolveAlias("test");
+ aliases = client.getAliases(resolved);
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("test"));
+
+ client.newIndex("test5678", settings);
+ for (int i = 0; i < 1; i++) {
+ client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+
+ indexShiftResult = client.shiftIndex("test", "test5678", Arrays.asList("d", "e", "f"),
+ (request, index, alias) -> request.addAliasAction(IndicesAliasesRequest.AliasActions.add()
+ .index(index).alias(alias).filter(QueryBuilders.termQuery("my_key", alias)))
+ );
+ assertTrue(indexShiftResult.getNewAliases().contains("d"));
+ assertTrue(indexShiftResult.getNewAliases().contains("e"));
+ assertTrue(indexShiftResult.getNewAliases().contains("f"));
+ assertTrue(indexShiftResult.getMovedAliases().contains("a"));
+ assertTrue(indexShiftResult.getMovedAliases().contains("b"));
+ assertTrue(indexShiftResult.getMovedAliases().contains("c"));
+
+ aliases = client.getAliases("test5678");
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("d"));
+ assertTrue(aliases.containsKey("e"));
+ assertTrue(aliases.containsKey("f"));
+
+ resolved = client.resolveAlias("test");
+ aliases = client.getAliases(resolved);
+ assertTrue(aliases.containsKey("a"));
+ assertTrue(aliases.containsKey("b"));
+ assertTrue(aliases.containsKey("c"));
+ assertTrue(aliases.containsKey("d"));
+ assertTrue(aliases.containsKey("e"));
+ assertTrue(aliases.containsKey("f"));
+
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+}
diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/MockNode.java b/elx-node/src/test/java/org/xbib/elx/node/test/MockNode.java
new file mode 100644
index 0000000..0d0568a
--- /dev/null
+++ b/elx-node/src/test/java/org/xbib/elx/node/test/MockNode.java
@@ -0,0 +1,15 @@
+package org.xbib.elx.node.test;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.InternalSettingsPreparer;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.plugins.Plugin;
+
+import java.util.List;
+
+public class MockNode extends Node {
+
+ public MockNode(Settings settings, List> classpathPlugins) {
+ super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins);
+ }
+}
diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/ReplicaTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/ReplicaTest.java
new file mode 100644
index 0000000..a2de18c
--- /dev/null
+++ b/elx-node/src/test/java/org/xbib/elx/node/test/ReplicaTest.java
@@ -0,0 +1,151 @@
+package org.xbib.elx.node.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.IndexShardStats;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.IndexingStats;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.node.ExtendedNodeClient;
+import org.xbib.elx.node.ExtendedNodeClientProvider;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+@Ignore
+public class ReplicaTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(ReplicaTest.class.getSimpleName());
+
+ @Test
+ public void testReplicaLevel() throws Exception {
+
+ // we need nodes for replica levels
+ startNode("2");
+ startNode("3");
+ startNode("4");
+
+ Settings settingsTest1 = Settings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 3)
+ .build();
+
+ Settings settingsTest2 = Settings.builder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build();
+
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .build();
+
+ try {
+ client.newIndex("test1", settingsTest1, new HashMap<>())
+ .newIndex("test2", settingsTest2, new HashMap<>());
+ client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
+ for (int i = 0; i < 1234; i++) {
+ client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ for (int i = 0; i < 1234; i++) {
+ client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ logger.info("refreshing");
+ client.refreshIndex("test1");
+ client.refreshIndex("test2");
+ SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
+ .setIndices("test1", "test2")
+ .setQuery(matchAllQuery());
+ long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits();
+ logger.info("query total hits={}", hits);
+ assertEquals(2468, hits);
+ IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.getClient(), IndicesStatsAction.INSTANCE)
+ .all();
+ IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet();
+ for (Map.Entry m : response.getIndices().entrySet()) {
+ IndexStats indexStats = m.getValue();
+ CommonStats commonStats = indexStats.getTotal();
+ IndexingStats indexingStats = commonStats.getIndexing();
+ IndexingStats.Stats stats = indexingStats.getTotal();
+ logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount());
+ for (Map.Entry me : indexStats.getIndexShards().entrySet()) {
+ IndexShardStats indexShardStats = me.getValue();
+ CommonStats commonShardStats = indexShardStats.getTotal();
+ logger.info("shard {} count = {}", me.getKey(),
+ commonShardStats.getIndexing().getTotal().getIndexCount());
+ }
+ }
+ try {
+ client.deleteIndex("test1")
+ .deleteIndex("test2");
+ } catch (Exception e) {
+ logger.error("delete index failed, ignored. Reason:", e);
+ }
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+
+ @Test
+ public void testUpdateReplicaLevel() throws Exception {
+
+ long numberOfShards = 2;
+ int replicaLevel = 3;
+
+ // we need 3 nodes for replica level 3
+ startNode("2");
+ startNode("3");
+
+ Settings settings = Settings.builder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .build();
+
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .build();
+
+ try {
+ client.newIndex("replicatest", settings, new HashMap<>());
+ client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
+ for (int i = 0; i < 12345; i++) {
+ client.index("replicatest",null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
+ }
+ client.flush();
+ client.waitForResponses(30L, TimeUnit.SECONDS);
+ client.updateReplicaLevel("replicatest", replicaLevel, 30L, TimeUnit.SECONDS);
+ assertEquals(replicaLevel, client.getReplicaLevel("replicatest"));
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+
+}
diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/SmokeTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/SmokeTest.java
new file mode 100644
index 0000000..aa548f7
--- /dev/null
+++ b/elx-node/src/test/java/org/xbib/elx/node/test/SmokeTest.java
@@ -0,0 +1,69 @@
+package org.xbib.elx.node.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.settings.Settings;
+import org.junit.Test;
+import org.xbib.elx.common.ClientBuilder;
+import org.xbib.elx.api.IndexDefinition;
+import org.xbib.elx.node.ExtendedNodeClient;
+import org.xbib.elx.node.ExtendedNodeClientProvider;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+public class SmokeTest extends TestBase {
+
+ private static final Logger logger = LogManager.getLogger(SmokeTest.class.getSimpleName());
+
+ @Test
+ public void smokeTest() throws Exception {
+ final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
+ .provider(ExtendedNodeClientProvider.class)
+ .build();
+ try {
+ client.newIndex("test");
+ client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
+ client.flush();
+ client.waitForResponses(30, TimeUnit.SECONDS);
+
+ assertEquals(getClusterName(), client.getClusterName());
+
+ client.checkMapping("test");
+
+ client.update("test", "1", "{ \"name\" : \"Another name\"}");
+ client.flush();
+
+ client.waitForRecovery("test", 10L, TimeUnit.SECONDS);
+
+ client.delete("test", "1");
+ client.deleteIndex("test");
+
+ IndexDefinition indexDefinition = client.buildIndexDefinitionFromSettings("test", Settings.builder()
+ .build());
+ assertEquals(0, indexDefinition.getReplicaLevel());
+ client.newIndex(indexDefinition);
+ client.index(indexDefinition.getFullIndexName(), "1", true, "{ \"name\" : \"Hello World\"}");
+ client.flush();
+ client.updateReplicaLevel(indexDefinition, 2);
+
+ int replica = client.getReplicaLevel(indexDefinition);
+ assertEquals(2, replica);
+
+ client.deleteIndex(indexDefinition);
+ assertEquals(0, client.getBulkMetric().getFailed().getCount());
+ assertEquals(4, client.getBulkMetric().getSucceeded().getCount());
+ } catch (NoNodeAvailableException e) {
+ logger.warn("skipping, no node available");
+ } finally {
+ client.close();
+ if (client.getBulkController().getLastBulkError() != null) {
+ logger.error("error", client.getBulkController().getLastBulkError());
+ }
+ assertNull(client.getBulkController().getLastBulkError());
+ }
+ }
+}
diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/TestBase.java b/elx-node/src/test/java/org/xbib/elx/node/test/TestBase.java
new file mode 100644
index 0000000..86d511f
--- /dev/null
+++ b/elx-node/src/test/java/org/xbib/elx/node/test/TestBase.java
@@ -0,0 +1,206 @@
+package org.xbib.elx.node.test;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.analysis.common.CommonAnalysisPlugin;
+import org.elasticsearch.client.support.AbstractClient;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeValidationException;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.transport.netty4.Netty4Plugin;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class TestBase {
+
+ private static final Logger logger = LogManager.getLogger("test");
+
+ private static final Random random = new Random();
+
+ private static final char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray();
+
+ private Map nodes = new HashMap<>();
+
+ private Map