diff --git a/.gitignore b/.gitignore index bf3e9b4..b92da43 100644 --- a/.gitignore +++ b/.gitignore @@ -5,9 +5,10 @@ /target .DS_Store *.iml +*~ /.settings /.classpath /.project /.gradle -/build -/plugins \ No newline at end of file +build +plugins diff --git a/.travis.yml b/.travis.yml index a830350..ee1dfd1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,8 +5,3 @@ jdk: cache: directories: - $HOME/.m2 -after_success: - - ./gradlew sonarqube -Dsonar.host.url=https://sonarqube.com -Dsonar.login=$SONAR_TOKEN -env: - global: - secure: n1Ai4q/yMLn/Pg5pA4lTavoJoe7mQYB1PSKnZAqwbgyla94ySzK6iyBCBiNs/foMPisB/x+DHvmUXTsjvquw9Ay48ZITCV3xhcWzD0eZM2TMoG19CpRAEe8L8LNuYiti9k89ijDdUGZ5ifsvQNTGNHksouayAuApC3PrTUejJfR6SYrp1ZsQTbsMlr+4XU3p7QknK5rGgOwATIMP28F+bVnB05WJtlJA3b0SeucCurn3wJ4FGBQXRYmdlT7bQhNE4QgZM1VzcUFD/K0TBxzzq/otb/lNRSifyoekktDmJwQnaT9uQ4R8R6KdQ2Kb38Rvgjur+TKm5i1G8qS2+6LnIxQJG1aw3JvKK6W0wWCgnAVVRrXaCLday9NuY59tuh1mfjQ10UcsMNKcTdcKEMrLow506wSETcXc7L/LEnneWQyJJeV4vhPqR7KJfsBbeqgz3yIfsCn1GZVWFlfegzYCN52YTl0Y0uRD2Z+TnzQu+Bf4DzaWXLge1rz31xkhyeNNspub4h024+XqBjcMm6M9mlMzmmK8t2DIwPy/BlQbFBUyhrxziuR/5/2NEDPyHltvWkRb4AUIa25WJqkV0gTBegbMadZ9DyOo6Ea7aoVFBae2WGR08F1kzABsWrd1S7UJmWxW35iyMEtoAIayXphIK98qO5aCutwZ+3iOQazxbAs= diff --git a/README.adoc b/README.adoc index 852224a..c5cdd92 100644 --- a/README.adoc +++ b/README.adoc @@ -1,4 +1,4 @@ -# Elasticsearch Extras - Client +# Elasticsearch Clients image:https://api.travis-ci.org/xbib/content.svg[title="Build status", link="https://travis-ci.org/jprante/elasticsearch-extras-client/"] image:https://img.shields.io/sonar/http/nemo.sonarqube.com/org.xbib%3Aelasticsearch-extras-client/coverage.svg?style=flat-square[title="Coverage", link="https://sonarqube.com/dashboard/index?id=org.xbib%3Aelasticsearch-extras-client"] @@ -99,7 +99,7 @@ You will need Java 8, although Elasticsearch 2.x requires Java 7. Java 7 is not ## Dependencies This project depends only on https://github.com/xbib/metrics which is a slim version of Coda Hale's metrics library, -and Elasticsearch. +Elasticsearch, and Log4j2 API. ## How to decode the Elasticsearch version diff --git a/api/build.gradle b/api/build.gradle new file mode 100644 index 0000000..61be444 --- /dev/null +++ b/api/build.gradle @@ -0,0 +1,18 @@ + +dependencies { + compile("org.elasticsearch.client:transport:${rootProject.property('elasticsearch.version')}") { + exclude group: 'org.elasticsearch', module: 'securesm' + exclude group: 'org.elasticsearch.plugin', module: 'transport-netty3-client' + exclude group: 'org.elasticsearch.plugin', module: 'reindex-client' + exclude group: 'org.elasticsearch.plugin', module: 'percolator-client' + exclude group: 'org.elasticsearch.plugin', module: 'lang-mustache-client' + } + // we try to override the Elasticsearch netty by our netty version which is more recent + compile "io.netty:netty-buffer:${rootProject.property('netty.version')}" + compile "io.netty:netty-codec-http:${rootProject.property('netty.version')}" + compile "io.netty:netty-handler:${rootProject.property('netty.version')}" +} + +jar { + baseName "${rootProject.name}-api" +} diff --git a/api/config/checkstyle/checkstyle.xml b/api/config/checkstyle/checkstyle.xml new file mode 100644 index 0000000..8cb4438 --- /dev/null +++ b/api/config/checkstyle/checkstyle.xml @@ -0,0 +1,321 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/api/src/docs/asciidoc/css/foundation.css b/api/src/docs/asciidoc/css/foundation.css new file mode 100644 index 0000000..27be611 --- /dev/null +++ b/api/src/docs/asciidoc/css/foundation.css @@ -0,0 +1,684 @@ +/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ +/* ========================================================================== HTML5 display definitions ========================================================================== */ +/** Correct `block` display not defined in IE 8/9. */ +article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } + +/** Correct `inline-block` display not defined in IE 8/9. */ +audio, canvas, video { display: inline-block; } + +/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ +audio:not([controls]) { display: none; height: 0; } + +/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ +[hidden], template { display: none; } + +script { display: none !important; } + +/* ========================================================================== Base ========================================================================== */ +/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ +html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } + +/** Remove default margin. */ +body { margin: 0; } + +/* ========================================================================== Links ========================================================================== */ +/** Remove the gray background color from active links in IE 10. */ +a { background: transparent; } + +/** Address `outline` inconsistency between Chrome and other browsers. */ +a:focus { outline: thin dotted; } + +/** Improve readability when focused and also mouse hovered in all browsers. */ +a:active, a:hover { outline: 0; } + +/* ========================================================================== Typography ========================================================================== */ +/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ +h1 { font-size: 2em; margin: 0.67em 0; } + +/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ +abbr[title] { border-bottom: 1px dotted; } + +/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ +b, strong { font-weight: bold; } + +/** Address styling not present in Safari 5 and Chrome. */ +dfn { font-style: italic; } + +/** Address differences between Firefox and other browsers. */ +hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } + +/** Address styling not present in IE 8/9. */ +mark { background: #ff0; color: #000; } + +/** Correct font family set oddly in Safari 5 and Chrome. */ +code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } + +/** Improve readability of pre-formatted text in all browsers. */ +pre { white-space: pre-wrap; } + +/** Set consistent quote types. */ +q { quotes: "\201C" "\201D" "\2018" "\2019"; } + +/** Address inconsistent and variable font size in all browsers. */ +small { font-size: 80%; } + +/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ +sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } + +sup { top: -0.5em; } + +sub { bottom: -0.25em; } + +/* ========================================================================== Embedded content ========================================================================== */ +/** Remove border when inside `a` element in IE 8/9. */ +img { border: 0; } + +/** Correct overflow displayed oddly in IE 9. */ +svg:not(:root) { overflow: hidden; } + +/* ========================================================================== Figures ========================================================================== */ +/** Address margin not present in IE 8/9 and Safari 5. */ +figure { margin: 0; } + +/* ========================================================================== Forms ========================================================================== */ +/** Define consistent border, margin, and padding. */ +fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } + +/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ +legend { border: 0; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ +button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } + +/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ +button, input { line-height: normal; } + +/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ +button, select { text-transform: none; } + +/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ +button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } + +/** Re-set default cursor for disabled elements. */ +button[disabled], html input[disabled] { cursor: default; } + +/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ +input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ +input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } + +/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ +input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } + +/** Remove inner padding and border in Firefox 4+. */ +button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } + +/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ +textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } + +/* ========================================================================== Tables ========================================================================== */ +/** Remove most spacing between table cells. */ +table { border-collapse: collapse; border-spacing: 0; } + +meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } + +meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } + +meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } + +*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } + +html, body { font-size: 100%; } + +body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } + +a:hover { cursor: pointer; } + +img, object, embed { max-width: 100%; height: auto; } + +object, embed { height: 100%; } + +img { -ms-interpolation-mode: bicubic; } + +#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } + +.left { float: left !important; } + +.right { float: right !important; } + +.text-left { text-align: left !important; } + +.text-right { text-align: right !important; } + +.text-center { text-align: center !important; } + +.text-justify { text-align: justify !important; } + +.hide { display: none; } + +.antialiased { -webkit-font-smoothing: antialiased; } + +img { display: inline-block; vertical-align: middle; } + +textarea { height: auto; min-height: 50px; } + +select { width: 100%; } + +object, svg { display: inline-block; vertical-align: middle; } + +.center { margin-left: auto; margin-right: auto; } + +.spread { width: 100%; } + +p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } + +.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } + +/* Typography resets */ +div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } + +/* Default Link Styles */ +a { color: #2ba6cb; text-decoration: none; line-height: inherit; } +a:hover, a:focus { color: #2795b6; } +a img { border: none; } + +/* Default paragraph styles */ +p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } +p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } + +/* Default header styles */ +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } +h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } + +h1 { font-size: 2.125em; } + +h2 { font-size: 1.6875em; } + +h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } + +h4 { font-size: 1.125em; } + +h5 { font-size: 1.125em; } + +h6 { font-size: 1em; } + +hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } + +/* Helpful Typography Defaults */ +em, i { font-style: italic; line-height: inherit; } + +strong, b { font-weight: bold; line-height: inherit; } + +small { font-size: 60%; line-height: inherit; } + +code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } + +/* Lists */ +ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } + +ul, ol { margin-left: 1.5em; } +ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } + +/* Unordered Lists */ +ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } +ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } +ul.square { list-style-type: square; } +ul.circle { list-style-type: circle; } +ul.disc { list-style-type: disc; } +ul.no-bullet { list-style: none; } + +/* Ordered Lists */ +ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } + +/* Definition Lists */ +dl dt { margin-bottom: 0.3125em; font-weight: bold; } +dl dd { margin-bottom: 1.25em; } + +/* Abbreviations */ +abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } + +abbr { text-transform: none; } + +/* Blockquotes */ +blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } +blockquote cite { display: block; font-size: 0.8125em; color: #555555; } +blockquote cite:before { content: "\2014 \0020"; } +blockquote cite a, blockquote cite a:visited { color: #555555; } + +blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } + +/* Microformats */ +.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } +.vcard li { margin: 0; display: block; } +.vcard .fn { font-weight: bold; font-size: 0.9375em; } + +.vevent .summary { font-weight: bold; } +.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } + +@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + h1 { font-size: 2.75em; } + h2 { font-size: 2.3125em; } + h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } + h4 { font-size: 1.4375em; } } +/* Tables */ +table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } +table thead, table tfoot { background: whitesmoke; font-weight: bold; } +table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } +table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } +table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } +table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } + +body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } + +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + +.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } +.clearfix:after, .float-group:after { clear: both; } + +*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } +*:not(pre) > code.nobreak { word-wrap: normal; } +*:not(pre) > code.nowrap { white-space: nowrap; } + +pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } + +em em { font-style: normal; } + +strong strong { font-weight: normal; } + +.keyseq { color: #555555; } + +kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } + +.keyseq kbd:first-child { margin-left: 0; } + +.keyseq kbd:last-child { margin-right: 0; } + +.menuseq, .menu { color: #090909; } + +b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } + +b.button:before { content: "["; padding: 0 3px 0 2px; } + +b.button:after { content: "]"; padding: 0 2px 0 3px; } + +#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } +#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } +#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } + +#content { margin-top: 1.25em; } + +#content:before { content: none; } + +#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } +#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } +#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } +#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } +#header .details span:first-child { margin-left: -0.125em; } +#header .details span.email a { color: #6f6f6f; } +#header .details br { display: none; } +#header .details br + span:before { content: "\00a0\2013\00a0"; } +#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } +#header .details br + span#revremark:before { content: "\00a0|\00a0"; } +#header #revnumber { text-transform: capitalize; } +#header #revnumber:after { content: "\00a0"; } + +#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } + +#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } +#toc > ul { margin-left: 0.125em; } +#toc ul.sectlevel0 > li > a { font-style: italic; } +#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } +#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } +#toc li { line-height: 1.3334; margin-top: 0.3334em; } +#toc a { text-decoration: none; } +#toc a:active { text-decoration: underline; } + +#toctitle { color: #6f6f6f; font-size: 1.2em; } + +@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } + body.toc2 { padding-left: 15em; padding-right: 0; } + #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } + #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } + #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } + #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } + #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } + body.toc2.toc-right { padding-left: 0; padding-right: 15em; } + body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } +@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } + #toc.toc2 { width: 20em; } + #toc.toc2 #toctitle { font-size: 1.375em; } + #toc.toc2 > ul { font-size: 0.95em; } + #toc.toc2 ul ul { padding-left: 1.25em; } + body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } +#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +#content #toc > :first-child { margin-top: 0; } +#content #toc > :last-child { margin-bottom: 0; } + +#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } + +#footer-text { color: #dddddd; line-height: 1.44; } + +.sect1 { padding-bottom: 0.625em; } + +@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } +.sect1 + .sect1 { border-top: 1px solid #dddddd; } + +#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } +#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } +#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } +#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } +#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } + +.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } + +.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } + +table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } + +.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } + +table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } + +.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } +.admonitionblock > table td.icon { text-align: center; width: 80px; } +.admonitionblock > table td.icon img { max-width: initial; } +.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } +.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } +.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } + +.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } +.exampleblock > .content > :first-child { margin-top: 0; } +.exampleblock > .content > :last-child { margin-bottom: 0; } + +.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +.sidebarblock > :first-child { margin-top: 0; } +.sidebarblock > :last-child { margin-bottom: 0; } +.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } + +.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } + +.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } +.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } + +.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } +.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } +@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } +@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } + +.literalblock.output pre { color: #eeeeee; background-color: black; } + +.listingblock pre.highlightjs { padding: 0; } +.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } + +.listingblock > .content { position: relative; } + +.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } + +.listingblock:hover code[data-lang]:before { display: block; } + +.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } + +.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } + +table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } + +table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } + +table.pyhltable td.code { padding-left: .75em; padding-right: 0; } + +pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } + +pre.pygments .lineno { display: inline-block; margin-right: .25em; } + +table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } + +.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } +.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } +.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } +.quoteblock blockquote { margin: 0; padding: 0; border: 0; } +.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } +.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } +.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } +.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } +.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } +.quoteblock .quoteblock blockquote:before { display: none; } + +.verseblock { margin: 0 1em 1.25em 1em; } +.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } +.verseblock pre strong { font-weight: 400; } +.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } + +.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } +.quoteblock .attribution br, .verseblock .attribution br { display: none; } +.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } + +.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } +.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } +.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } + +table.tableblock { max-width: 100%; border-collapse: separate; } +table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } + +table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } + +table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } + +table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } + +table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } + +table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } + +table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } + +table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } + +table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } + +table.frame-all { border-width: 1px; } + +table.frame-sides { border-width: 0 1px; } + +table.frame-topbot { border-width: 1px 0; } + +th.halign-left, td.halign-left { text-align: left; } + +th.halign-right, td.halign-right { text-align: right; } + +th.halign-center, td.halign-center { text-align: center; } + +th.valign-top, td.valign-top { vertical-align: top; } + +th.valign-bottom, td.valign-bottom { vertical-align: bottom; } + +th.valign-middle, td.valign-middle { vertical-align: middle; } + +table thead th, table tfoot th { font-weight: bold; } + +tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } + +tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } + +p.tableblock > code:only-child { background: none; padding: 0; } + +p.tableblock { font-size: 1em; } + +td > div.verse { white-space: pre; } + +ol { margin-left: 1.75em; } + +ul li ol { margin-left: 1.5em; } + +dl dd { margin-left: 1.125em; } + +dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } + +ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } + +ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } + +ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } + +ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } + +ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } + +ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } +ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } +ul.inline > li > * { display: block; } + +.unstyled dl dt { font-weight: normal; font-style: normal; } + +ol.arabic { list-style-type: decimal; } + +ol.decimal { list-style-type: decimal-leading-zero; } + +ol.loweralpha { list-style-type: lower-alpha; } + +ol.upperalpha { list-style-type: upper-alpha; } + +ol.lowerroman { list-style-type: lower-roman; } + +ol.upperroman { list-style-type: upper-roman; } + +ol.lowergreek { list-style-type: lower-greek; } + +.hdlist > table, .colist > table { border: 0; background: none; } +.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } + +td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } + +td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } + +.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } + +.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } +.colist > table tr > td:first-of-type img { max-width: initial; } +.colist > table tr > td:last-of-type { padding: 0.25em 0; } + +.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } + +.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } +.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } +.imageblock > .title { margin-bottom: 0; } +.imageblock.thumb, .imageblock.th { border-width: 6px; } +.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } + +.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } +.image.left { margin-right: 0.625em; } +.image.right { margin-left: 0.625em; } + +a.image { text-decoration: none; display: inline-block; } +a.image object { pointer-events: none; } + +sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } +sup.footnote a, sup.footnoteref a { text-decoration: none; } +sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } + +#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } +#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } +#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } +#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } +#footnotes .footnote:last-of-type { margin-bottom: 0; } +#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } + +.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } +.gist .file-data > table td.line-data { width: 99%; } + +div.unbreakable { page-break-inside: avoid; } + +.big { font-size: larger; } + +.small { font-size: smaller; } + +.underline { text-decoration: underline; } + +.overline { text-decoration: overline; } + +.line-through { text-decoration: line-through; } + +.aqua { color: #00bfbf; } + +.aqua-background { background-color: #00fafa; } + +.black { color: black; } + +.black-background { background-color: black; } + +.blue { color: #0000bf; } + +.blue-background { background-color: #0000fa; } + +.fuchsia { color: #bf00bf; } + +.fuchsia-background { background-color: #fa00fa; } + +.gray { color: #606060; } + +.gray-background { background-color: #7d7d7d; } + +.green { color: #006000; } + +.green-background { background-color: #007d00; } + +.lime { color: #00bf00; } + +.lime-background { background-color: #00fa00; } + +.maroon { color: #600000; } + +.maroon-background { background-color: #7d0000; } + +.navy { color: #000060; } + +.navy-background { background-color: #00007d; } + +.olive { color: #606000; } + +.olive-background { background-color: #7d7d00; } + +.purple { color: #600060; } + +.purple-background { background-color: #7d007d; } + +.red { color: #bf0000; } + +.red-background { background-color: #fa0000; } + +.silver { color: #909090; } + +.silver-background { background-color: #bcbcbc; } + +.teal { color: #006060; } + +.teal-background { background-color: #007d7d; } + +.white { color: #bfbfbf; } + +.white-background { background-color: #fafafa; } + +.yellow { color: #bfbf00; } + +.yellow-background { background-color: #fafa00; } + +span.icon > .fa { cursor: default; } + +.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } +.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } +.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } +.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } +.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } +.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } + +.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } +.conum[data-value] * { color: #fff !important; } +.conum[data-value] + b { display: none; } +.conum[data-value]:after { content: attr(data-value); } +pre .conum[data-value] { position: relative; top: -0.125em; } + +b.conum * { color: inherit !important; } + +.conum:not([data-value]):empty { display: none; } + +.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/api/src/docs/asciidoclet/overview.adoc b/api/src/docs/asciidoclet/overview.adoc new file mode 100644 index 0000000..7947331 --- /dev/null +++ b/api/src/docs/asciidoclet/overview.adoc @@ -0,0 +1,4 @@ += Elasticsearch Java client +Jörg Prante +Version 5.4.0.0 + diff --git a/backup/XbibTransportService.java b/backup/XbibTransportService.java new file mode 100644 index 0000000..c2dc502 --- /dev/null +++ b/backup/XbibTransportService.java @@ -0,0 +1,1047 @@ +package org.xbib.elasticsearch.client.transport; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ActionNotFoundTransportException; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.FutureTransportResponseHandler; +import org.elasticsearch.transport.NodeDisconnectedException; +import org.elasticsearch.transport.NodeNotConnectedException; +import org.elasticsearch.transport.PlainTransportFuture; +import org.elasticsearch.transport.ReceiveTimeoutTransportException; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.RequestHandlerRegistry; +import org.elasticsearch.transport.ResponseHandlerFailureTransportException; +import org.elasticsearch.transport.SendRequestTransportException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportFuture; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportResponseOptions; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ScheduledFuture; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; + +/** + * + */ +public class XbibTransportService extends AbstractLifecycleComponent { + + private static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; + + private static final Setting> TRACE_LOG_INCLUDE_SETTING = + Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), + Property.Dynamic, Property.NodeScope); + + private static final Setting> TRACE_LOG_EXCLUDE_SETTING = + Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", + TransportLivenessAction.NAME), Function.identity(), Property.Dynamic, Property.NodeScope); + + private final CountDownLatch blockIncomingRequestsLatch = new CountDownLatch(1); + + private final Transport transport; + + private final ThreadPool threadPool; + + private final ClusterName clusterName; + + private final TaskManager taskManager; + + private final TransportInterceptor.AsyncSender asyncSender; + + private final Function localNodeFactory; + + private volatile Map> requestHandlers = Collections.emptyMap(); + + private final Object requestHandlerMutex = new Object(); + + private final ConcurrentMapLong> clientHandlers = + ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + + private final TransportInterceptor interceptor; + + // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they + // do show up, we can print more descriptive information about them + private final Map timeoutInfoHandlers = + Collections.synchronizedMap(new LinkedHashMap(100, .75F, true) { + private static final long serialVersionUID = 9174428975922394994L; + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > 100; + } + }); + + private final Logger tracerLog; + + private volatile String[] tracerLogInclude; + + private volatile String[] tracerLogExclude; + + private volatile DiscoveryNode localNode = null; + + private final Transport.Connection localNodeConnection = new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return localNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + sendLocalRequest(requestId, action, request, options); + } + + @Override + public void close() throws IOException { + } + }; + + /** + * Build the service. + * + * @param clusterSettings if non null the the {@linkplain XbibTransportService} will register + * with the {@link ClusterSettings} for settings updates for + * {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. + */ + XbibTransportService(Settings settings, Transport transport, ThreadPool threadPool, + TransportInterceptor transportInterceptor, + Function localNodeFactory, + @Nullable ClusterSettings clusterSettings) { + super(settings); + this.transport = transport; + this.threadPool = threadPool; + this.localNodeFactory = localNodeFactory; + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); + setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); + setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); + tracerLog = Loggers.getLogger(logger, ".tracer"); + taskManager = createTaskManager(); + this.interceptor = transportInterceptor; + this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); + if (clusterSettings != null) { + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); + } + } + + private TaskManager createTaskManager() { + return new TaskManager(settings); + } + + private void setTracerLogInclude(List tracerLogInclude) { + this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); + } + + private void setTracerLogExclude(List tracerLogExclude) { + this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY); + } + + @Override + protected void doStart() { + rxMetric.clear(); + txMetric.clear(); + transport.setTransportService(this); + transport.start(); + if (transport.boundAddress() != null && logger.isInfoEnabled()) { + logger.info("{}", transport.boundAddress()); + for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { + logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); + } + } + localNode = localNodeFactory.apply(transport.boundAddress()); + registerRequestHandler(HANDSHAKE_ACTION_NAME, + () -> HandshakeRequest.INSTANCE, + ThreadPool.Names.SAME, + (request, channel) -> channel.sendResponse(new HandshakeResponse(localNode, clusterName, + localNode.getVersion()))); + } + + @Override + protected void doStop() { + try { + transport.stop(); + } finally { + // in case the transport is not connected to our local node (thus cleaned on node disconnect) + // make sure to clean any leftover on going handles + for (Map.Entry> entry : clientHandlers.entrySet()) { + final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); + if (holderToNotify != null) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug((Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action()), + e); + } + @Override + public void onFailure(Exception e) { + logger.warn((Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action()), + e); + } + @Override + public void doRun() { + TransportException ex = new TransportException("transport stopped, action: " + + holderToNotify.action()); + holderToNotify.handler().handleException(ex); + } + }); + } + } + } + } + + @Override + protected void doClose() { + transport.close(); + } + + /** + * Start accepting incoming requests. + * when the transport layer starts up it will block any incoming requests until + * this method is called + */ + final void acceptIncomingRequests() { + blockIncomingRequestsLatch.countDown(); + } + + /** + * Returns true iff the given node is already connected. + */ + boolean nodeConnected(DiscoveryNode node) { + return isLocalNode(node) || transport.nodeConnected(node); + } + + /** + * Connect to the specified node. + * + * @param node the node to connect to + */ + void connectToNode(final DiscoveryNode node) { + if (isLocalNode(node)) { + return; + } + transport.connectToNode(node, null, (newConnection, actualProfile) -> + handshake(newConnection, actualProfile.getHandshakeTimeout().millis())); + } + + /** + * Executes a high-level handshake using the given connection + * and returns the discovery node of the node the connection + * was established with. The handshake will fail if the cluster + * name on the target node mismatches the local cluster name. + * + * @param connection the connection to a specific node + * @param handshakeTimeout handshake timeout + * @return the connected node + * @throws ConnectTransportException if the connection failed + * @throws IllegalStateException if the handshake failed + */ + private DiscoveryNode handshake(final Transport.Connection connection, + final long handshakeTimeout) throws ConnectTransportException { + return handshake(connection, handshakeTimeout, clusterName::equals); + } + + /** + * Executes a high-level handshake using the given connection + * and returns the discovery node of the node the connection + * was established with. The handshake will fail if the cluster + * name on the target node doesn't match the local cluster name. + * + * @param connection the connection to a specific node + * @param handshakeTimeout handshake timeout + * @param clusterNamePredicate cluster name validation predicate + * @return the connected node + * @throws ConnectTransportException if the connection failed + * @throws IllegalStateException if the handshake failed + */ + private DiscoveryNode handshake(final Transport.Connection connection, + final long handshakeTimeout, Predicate clusterNamePredicate) + throws ConnectTransportException { + final HandshakeResponse response; + final DiscoveryNode node = connection.getNode(); + try { + PlainTransportFuture futureHandler = new PlainTransportFuture<>( + new FutureTransportResponseHandler() { + @Override + public HandshakeResponse newInstance() { + return new HandshakeResponse(); + } + }); + sendRequest(connection, HANDSHAKE_ACTION_NAME, HandshakeRequest.INSTANCE, + TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), futureHandler); + response = futureHandler.txGet(); + } catch (Exception e) { + throw new IllegalStateException("handshake failed with " + node, e); + } + if (!clusterNamePredicate.test(response.clusterName)) { + throw new IllegalStateException("handshake failed, mismatched cluster name [" + + response.clusterName + "] - " + node); + } else if (!response.version.isCompatible(localNode.getVersion())) { + throw new IllegalStateException("handshake failed, incompatible version [" + + response.version + "] - " + node); + } + return response.discoveryNode; + } + + void disconnectFromNode(DiscoveryNode node) { + if (isLocalNode(node)) { + return; + } + transport.disconnectFromNode(node); + } + + TransportFuture submitRequest(DiscoveryNode node, String action, + TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler) + throws TransportException { + PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); + try { + Transport.Connection connection = getConnection(node); + sendRequest(connection, action, request, options, futureHandler); + } catch (NodeNotConnectedException ex) { + futureHandler.handleException(ex); + } + return futureHandler; + } + + final void sendRequest(final DiscoveryNode node, final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { + try { + Transport.Connection connection = getConnection(node); + sendRequest(connection, action, request, options, handler); + } catch (NodeNotConnectedException ex) { + handler.handleException(ex); + } + } + + private void sendRequest(final Transport.Connection connection, final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { + + asyncSender.sendRequest(connection, action, request, options, handler); + } + + /** + * Returns either a real transport connection or a local node connection + * if we are using the local node optimization. + * @throws NodeNotConnectedException if the given node is not connected + */ + private Transport.Connection getConnection(DiscoveryNode node) { + if (isLocalNode(node)) { + return localNodeConnection; + } else { + return transport.getConnection(node); + } + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private void sendRequestInternal(final Transport.Connection connection, + final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { + if (connection == null) { + throw new IllegalStateException("can't send request to a null connection"); + } + DiscoveryNode node = connection.getNode(); + final long requestId = transport.newRequestId(); + final TimeoutHandler timeoutHandler; + try { + if (options.timeout() == null) { + timeoutHandler = null; + } else { + timeoutHandler = new TimeoutHandler(requestId); + } + Supplier storedContextSupplier = + threadPool.getThreadContext().newRestorableContext(true); + TransportResponseHandler responseHandler = + new ContextRestoreResponseHandler<>(storedContextSupplier, handler); + clientHandlers.put(requestId, + new RequestHolder(responseHandler, connection.getNode(), action, timeoutHandler)); + if (lifecycle.stoppedOrClosed()) { + // if we are not started the exception handling will remove the RequestHolder again + // and calls the handler to notify the caller. It will only notify if the toStop code + // hasn't done the work yet. + throw new TransportException("TransportService is closed stopped can't send request"); + } + if (timeoutHandler != null) { + assert options.timeout() != null; + timeoutHandler.future = threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler); + } + connection.sendRequest(requestId, action, request, options); + } catch (final Exception e) { + // usually happen either because we failed to connect to the node + // or because we failed serializing the message + final RequestHolder holderToNotify = clientHandlers.remove(requestId); + // If holderToNotify == null then handler has already been taken care of. + if (holderToNotify != null) { + holderToNotify.cancelTimeout(); + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + final SendRequestTransportException sendRequestException = + new SendRequestTransportException(node, action, e); + threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug((Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action()), e); + } + @Override + public void onFailure(Exception e) { + logger.warn((Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action()), e); + } + @Override + protected void doRun() throws Exception { + holderToNotify.handler().handleException(sendRequestException); + } + }); + } else { + logger.debug("Exception while sending request, handler likely already notified due to timeout", e); + } + } + } + + private void sendLocalRequest(long requestId, final String action, final TransportRequest request, + TransportRequestOptions options) { + final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, adapter, + threadPool); + try { + adapter.onRequestSent(localNode, requestId, action, request, options); + adapter.onRequestReceived(requestId, action); + final RequestHandlerRegistry reg = adapter.getRequestHandler(action); + if (reg == null) { + throw new ActionNotFoundTransportException("Action [" + action + "] not found"); + } + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + reg.processMessageReceived(request, channel); + } else { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, channel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn((Supplier) () -> + new ParameterizedMessage("failed to notify channel of error message for action [{}]", + action), inner); + } + } + }); + } + + } catch (Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify channel of error message for action [{}]", action), inner); + } + } + } + + private boolean shouldTraceAction(String action) { + if (tracerLogInclude.length > 0) { + if (!Regex.simpleMatch(tracerLogInclude, action)) { + return false; + } + } + return tracerLogExclude.length <= 0 || !Regex.simpleMatch(tracerLogExclude, action); + } + + /** + * Registers a new request handler. + * + * @param action the action the request handler is associated with + * @param request the request class that will be used to construct new instances for streaming + * @param executor the executor the request handling will be executed on + * @param handler the handler itself that implements the request handling + */ + private void registerRequestHandler(String action, Supplier request, + String executor, + TransportRequestHandler handler) { + handler = interceptor.interceptHandler(action, executor, false, handler); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, request, taskManager, handler, executor, false, false); + registerRequestHandler(reg); + } + + @SuppressWarnings("unchecked") + private void registerRequestHandler(RequestHandlerRegistry reg) { + synchronized (requestHandlerMutex) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), + (RequestHandlerRegistry) reg).immutableMap(); + } + } + + private boolean isLocalNode(DiscoveryNode discoveryNode) { + return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); + } + + static class HandshakeRequest extends TransportRequest { + + static final HandshakeRequest INSTANCE = new HandshakeRequest(); + + private HandshakeRequest() { + } + + } + + /** + * + */ + public static class HandshakeResponse extends TransportResponse { + + private DiscoveryNode discoveryNode; + + private ClusterName clusterName; + + private Version version; + + /** + * For extern construction. + */ + public HandshakeResponse() { + } + + HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { + this.discoveryNode = discoveryNode; + this.version = version; + this.clusterName = clusterName; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + discoveryNode = in.readOptionalWriteable(DiscoveryNode::new); + clusterName = new ClusterName(in); + version = Version.readVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(discoveryNode); + clusterName.writeTo(out); + Version.writeVersion(version, out); + } + } + + private final class Adapter implements TransportServiceAdapter { + + final MeanMetric rxMetric = new MeanMetric(); + + final MeanMetric txMetric = new MeanMetric(); + + @Override + public void addBytesReceived(long size) { + rxMetric.inc(size); + } + + @Override + public void addBytesSent(long size) { + txMetric.inc(size); + } + + @Override + public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions options) { + if (traceEnabled() && shouldTraceAction(action)) { + traceRequestSent(node, requestId, action, options); + } + } + + boolean traceEnabled() { + return tracerLog.isTraceEnabled(); + } + + @Override + public void onResponseSent(long requestId, String action, TransportResponse response, + TransportResponseOptions options) { + if (traceEnabled() && shouldTraceAction(action)) { + traceResponseSent(requestId, action); + } + } + + @Override + public void onResponseSent(long requestId, String action, Exception e) { + if (traceEnabled() && shouldTraceAction(action)) { + traceResponseSent(requestId, action, e); + } + } + + void traceResponseSent(long requestId, String action, Exception e) { + tracerLog.trace( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + } + + @Override + public void onRequestReceived(long requestId, String action) { + try { + blockIncomingRequestsLatch.await(); + } catch (InterruptedException e) { + logger.trace("interrupted while waiting for incoming requests block to be removed"); + } + if (traceEnabled() && shouldTraceAction(action)) { + traceReceivedRequest(requestId, action); + } + } + + @Override + public RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); + } + + @Override + public TransportResponseHandler onResponseReceived(final long requestId) { + RequestHolder holder = clientHandlers.remove(requestId); + if (holder == null) { + checkForTimeout(requestId); + return null; + } + holder.cancelTimeout(); + if (traceEnabled() && shouldTraceAction(holder.action())) { + traceReceivedResponse(requestId, holder.node(), holder.action()); + } + return holder.handler(); + } + + void checkForTimeout(long requestId) { + // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout + // handling has finished + final DiscoveryNode sourceNode; + final String action; + if (clientHandlers.get(requestId) != null) { + throw new IllegalStateException(); + } + TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); + if (timeoutInfoHolder != null) { + long time = System.currentTimeMillis(); + logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " + + "action [{}], node [{}], id [{}]", time - timeoutInfoHolder.sentTime(), + time - timeoutInfoHolder.timeoutTime(), + timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId); + action = timeoutInfoHolder.action(); + sourceNode = timeoutInfoHolder.node(); + } else { + logger.warn("Transport response handler not found of id [{}]", requestId); + action = null; + sourceNode = null; + } + // call tracer out of lock + if (!traceEnabled()) { + return; + } + if (action == null) { + assert sourceNode == null; + traceUnresolvedResponse(requestId); + } else if (shouldTraceAction(action)) { + traceReceivedResponse(requestId, sourceNode, action); + } + } + + @Override + public void onNodeConnected(final DiscoveryNode node) { + } + + @Override + public void onConnectionOpened(DiscoveryNode node) { + } + + @Override + public void onNodeDisconnected(final DiscoveryNode node) { + try { + for (Map.Entry> entry : clientHandlers.entrySet()) { + RequestHolder holder = entry.getValue(); + if (holder.node().equals(node)) { + final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); + if (holderToNotify != null) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + threadPool.generic().execute(() -> holderToNotify.handler() + .handleException(new NodeDisconnectedException(node, + holderToNotify.action()))); + } + } + } + } catch (EsRejectedExecutionException ex) { + logger.debug("Rejected execution on NodeDisconnected", ex); + } + } + + void traceReceivedRequest(long requestId, String action) { + tracerLog.trace("[{}][{}] received request", requestId, action); + } + + void traceResponseSent(long requestId, String action) { + tracerLog.trace("[{}][{}] sent response", requestId, action); + } + + void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { + tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode); + } + + void traceUnresolvedResponse(long requestId) { + tracerLog.trace("[{}] received response but can't resolve it to a request", requestId); + } + + void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { + tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); + } + } + + private final class TimeoutHandler implements Runnable { + + private final long requestId; + + private final long sentTime = System.currentTimeMillis(); + + volatile ScheduledFuture future; + + TimeoutHandler(long requestId) { + this.requestId = requestId; + } + + @Override + public void run() { + // we get first to make sure we only add the TimeoutInfoHandler if needed. + final RequestHolder holder = clientHandlers.get(requestId); + if (holder != null) { + // add it to the timeout information holder, in case we are going to get a response later + long timeoutTime = System.currentTimeMillis(); + timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(holder.node(), holder.action(), sentTime, + timeoutTime)); + // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id + final RequestHolder removedHolder = clientHandlers.remove(requestId); + if (removedHolder != null) { + assert removedHolder == holder : "two different holder instances for request [" + requestId + "]"; + removedHolder.handler().handleException( + new ReceiveTimeoutTransportException(holder.node(), holder.action(), + "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]")); + } else { + // response was processed, remove timeout info. + timeoutInfoHandlers.remove(requestId); + } + } + } + + /** + * Cancels timeout handling. This is a best effort only to avoid running it. + * Remove the requestId from {@link #clientHandlers} to make sure this doesn't run. + */ + void cancel() { + if (clientHandlers.get(requestId) != null) { + throw new IllegalStateException("cancel must be called after the requestId [" + + requestId + "] has been removed from clientHandlers"); + } + FutureUtils.cancel(future); + } + } + + private static class TimeoutInfoHolder { + + private final DiscoveryNode node; + private final String action; + private final long sentTime; + private final long timeoutTime; + + TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { + this.node = node; + this.action = action; + this.sentTime = sentTime; + this.timeoutTime = timeoutTime; + } + + public DiscoveryNode node() { + return node; + } + + String action() { + return action; + } + + long sentTime() { + return sentTime; + } + + long timeoutTime() { + return timeoutTime; + } + } + + private static class RequestHolder { + + private final TransportResponseHandler handler; + + private final DiscoveryNode node; + + private final String action; + + private final TimeoutHandler timeoutHandler; + + RequestHolder(TransportResponseHandler handler, DiscoveryNode node, String action, + TimeoutHandler timeoutHandler) { + this.handler = handler; + this.node = node; + this.action = action; + this.timeoutHandler = timeoutHandler; + } + + TransportResponseHandler handler() { + return handler; + } + + public DiscoveryNode node() { + return this.node; + } + + String action() { + return this.action; + } + + void cancelTimeout() { + if (timeoutHandler != null) { + timeoutHandler.cancel(); + } + } + } + + /** + * This handler wrapper ensures that the response thread executes with the correct thread context. + * Before any of the handle methods are invoked we restore the context. + * @param thr transport response type + */ + public static final class ContextRestoreResponseHandler + implements TransportResponseHandler { + + private final TransportResponseHandler delegate; + + private final Supplier contextSupplier; + + ContextRestoreResponseHandler(Supplier contextSupplier, + TransportResponseHandler delegate) { + this.delegate = delegate; + this.contextSupplier = contextSupplier; + } + + @Override + public T newInstance() { + return delegate.newInstance(); + } + + @SuppressWarnings("try") + @Override + public void handleResponse(T response) { + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleResponse(response); + } + } + + @SuppressWarnings("try") + @Override + public void handleException(TransportException exp) { + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleException(exp); + } + } + + @Override + public String executor() { + return delegate.executor(); + } + + @Override + public String toString() { + return getClass().getName() + "/" + delegate.toString(); + } + + } + + static class DirectResponseChannel implements TransportChannel { + + private static final String DIRECT_RESPONSE_PROFILE = ".direct"; + + private final Logger logger; + + private final DiscoveryNode localNode; + + private final String action; + + private final long requestId; + + private final TransportServiceAdapter adapter; + + private final ThreadPool threadPool; + + DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, + TransportServiceAdapter adapter, ThreadPool threadPool) { + this.logger = logger; + this.localNode = localNode; + this.action = action; + this.requestId = requestId; + this.adapter = adapter; + this.threadPool = threadPool; + } + + @Override + public String action() { + return action; + } + + @Override + public String getProfileName() { + return DIRECT_RESPONSE_PROFILE; + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + sendResponse(response, TransportResponseOptions.EMPTY); + } + + @SuppressWarnings("unchecked") + @Override + public void sendResponse(final TransportResponse response, TransportResponseOptions options) + throws IOException { + adapter.onResponseSent(requestId, action, response, options); + final TransportResponseHandler handler = adapter.onResponseReceived(requestId); + if (handler != null) { + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processResponse(handler, response); + } else { + threadPool.executor(executor).execute(() -> processResponse(handler, response)); + } + } + } + + void processResponse(TransportResponseHandler handler, TransportResponse response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + processException(handler, wrapInRemote(new ResponseHandlerFailureTransportException(e))); + } + } + + @SuppressWarnings("unchecked") + @Override + public void sendResponse(Exception exception) throws IOException { + adapter.onResponseSent(requestId, action, exception); + final TransportResponseHandler handler = adapter.onResponseReceived(requestId); + if (handler != null) { + final RemoteTransportException rtx = wrapInRemote(exception); + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processException(handler, rtx); + } else { + threadPool.executor(handler.executor()).execute(() -> processException(handler, rtx)); + } + } + } + + RemoteTransportException wrapInRemote(Exception e) { + if (e instanceof RemoteTransportException) { + return (RemoteTransportException) e; + } + return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); + } + + void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage( + "failed to handle exception for action [{}], handler [{}]", action, handler), e); + } + } + + @Override + public long getRequestId() { + return requestId; + } + + @Override + public String getChannelType() { + return "direct"; + } + + @Override + public Version getVersion() { + return localNode.getVersion(); + } + } +} diff --git a/build.gradle b/build.gradle index 2f8478a..49dfe7d 100644 --- a/build.gradle +++ b/build.gradle @@ -1,10 +1,27 @@ +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter + +buildscript { + repositories { + jcenter() + maven { + url 'http://xbib.org/repository' + } + } + dependencies { + classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" + } +} plugins { - id "org.sonarqube" version "2.2" + id "org.sonarqube" version "2.6.1" + id "io.codearte.nexus-staging" version "0.11.0" + id "org.xbib.gradle.plugin.asciidoctor" version "1.6.0.1" } -printf "Host: %s\nOS: %s %s %s\nJVM: %s %s %s %s\nGroovy: %s\nGradle: %s\n" + +printf "Date: %s\nHost: %s\nOS: %s %s %s\nJava: %s %s %s %s\nGradle: %s Groovy: %s Java: %s\n" + "Build: group: ${project.group} name: ${project.name} version: ${project.version}\n", + ZonedDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME), InetAddress.getLocalHost(), System.getProperty("os.name"), System.getProperty("os.arch"), @@ -13,102 +30,121 @@ printf "Host: %s\nOS: %s %s %s\nJVM: %s %s %s %s\nGroovy: %s\nGradle: %s\n" + System.getProperty("java.vm.version"), System.getProperty("java.vm.vendor"), System.getProperty("java.vm.name"), - GroovySystem.getVersion(), - gradle.gradleVersion - -apply plugin: 'java' -apply plugin: 'maven' -apply plugin: 'signing' -apply plugin: 'findbugs' -apply plugin: 'pmd' -apply plugin: 'checkstyle' -apply plugin: "jacoco" - -apply from: 'gradle/ext.gradle' - -sourceSets { - integrationTest { - java { - srcDir file('src/integration-test/java') - compileClasspath += main.output - compileClasspath += test.output - } - resources { - srcDir file('src/integration-test/resources') - } - } -} + gradle.gradleVersion, GroovySystem.getVersion(), JavaVersion.current() -configurations { - wagon - integrationTestCompile.extendsFrom testCompile - integrationTestRuntime.extendsFrom testRuntime + +apply plugin: "io.codearte.nexus-staging" +apply plugin: 'org.xbib.gradle.plugin.asciidoctor' + +ext { + user = 'jprante' + name = 'elx' + description = 'Elasticsearch extensions' + scmUrl = 'https://github.com/' + user + '/' + name + scmConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' + scmDeveloperConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' } -dependencies { - compile "org.xbib:metrics:1.0.0" - compile("org.elasticsearch:elasticsearch:2.2.1") { - exclude module: "securesm" +subprojects { + apply plugin: 'java' + apply plugin: 'maven' + apply plugin: 'signing' + + configurations { + wagon + alpnagent + asciidoclet } - testCompile "net.java.dev.jna:jna:4.1.0" - testCompile "junit:junit:4.12" - testCompile "org.apache.logging.log4j:log4j-core:2.7" - testCompile "org.apache.logging.log4j:log4j-slf4j-impl:2.7" - wagon 'org.apache.maven.wagon:wagon-ssh-external:2.10' -} -sourceCompatibility = JavaVersion.VERSION_1_8 -targetCompatibility = JavaVersion.VERSION_1_8 + dependencies { + alpnagent "org.mortbay.jetty.alpn:jetty-alpn-agent:${project.property('alpnagent.version')}" + asciidoclet "org.xbib:asciidoclet:${project.property('asciidoclet.version')}" + wagon "org.apache.maven.wagon:wagon-ssh:${project.property('wagon.version')}" + } -[compileJava, compileTestJava]*.options*.encoding = 'UTF-8' -tasks.withType(JavaCompile) { - options.compilerArgs << "-Xlint:all" -} + compileJava { + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 + } + compileTestJava { + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 + } -task integrationTest(type: Test) { - include '**/MiscTestSuite.class' - include '**/BulkNodeTestSuite.class' - include '**/BulkTransportTestSuite.class' - testClassesDir = sourceSets.integrationTest.output.classesDir - classpath = configurations.integrationTestCompile - classpath += configurations.integrationTestRuntime - classpath += sourceSets.main.output - classpath += sourceSets.test.output - classpath += sourceSets.integrationTest.output - outputs.upToDateWhen { false } - systemProperty 'path.home', projectDir.absolutePath - testLogging.showStandardStreams = true -} + jar { + baseName "${rootProject.name}-${project.name}" + } -integrationTest.mustRunAfter test -check.dependsOn integrationTest + javadoc { + options.docletpath = configurations.asciidoclet.files.asType(List) + options.doclet = 'org.xbib.asciidoclet.Asciidoclet' + options.overview = "src/docs/asciidoclet/overview.adoc" + options.addStringOption "-base-dir", "${projectDir}" + options.addStringOption "-attribute", + "name=${project.name},version=${project.version},title-link=https://github.com/jprante/${project.name}" + configure(options) { + noTimestamp = true + } + } -clean { - delete "plugins" - delete "logs" -} + /*task javadocJar(type: Jar, dependsOn: classes) { + baseName "${rootProject.name}-${project.name}" + from javadoc + into "build/tmp" + classifier 'javadoc' + } -task javadocJar(type: Jar, dependsOn: classes) { - from javadoc - into "build/tmp" - classifier 'javadoc' -} + task sourcesJar(type: Jar, dependsOn: classes) { + baseName "${rootProject.name}-${project.name}" + from sourceSets.main.allSource + into "build/tmp" + classifier 'sources' + } -task sourcesJar(type: Jar, dependsOn: classes) { - from sourceSets.main.allSource - into "build/tmp" - classifier 'sources' -} + artifacts { + archives javadocJar, sourcesJar + }*/ + + if (project.hasProperty('signing.keyId')) { + signing { + sign configurations.archives + } + } + + apply from: "${rootProject.projectDir}/gradle/ext.gradle" + apply from: "${rootProject.projectDir}/gradle/publish.gradle" + //apply from: "${rootProject.projectDir}/gradle/sonarqube.gradle" -artifacts { - archives javadocJar, sourcesJar } -if (project.hasProperty('signing.keyId')) { - signing { - sign configurations.archives +/*asciidoctor { + attributes toc: 'left', + doctype: 'book', + icons: 'font', + encoding: 'utf-8', + sectlink: true, + sectanchors: true, + linkattrs: true, + imagesdir: 'img', + 'source-highlighter': 'coderay' +}*/ + +/* +task aggregatedJavadoc(type: Javadoc) { + group = 'aggregation' + description = 'Generates aggregated Javadoc API documentation.' + title = "$description $version API" + destinationDir = file("$buildDir/docs/javadoc") + def sourceProjects = subprojects.findAll { + it.plugins.hasPlugin('java') || it.plugins.hasPlugin('groovy') + } + source sourceProjects.collect { + it.sourceSets.main.allJava } + classpath = files(sourceProjects.collect { + it.sourceSets.main.runtimeClasspath + }) + //options.overview = 'gradle/api/overview.html' + options.showFromProtected() } - -apply from: 'gradle/publish.gradle' -apply from: 'gradle/sonarqube.gradle' +*/ diff --git a/common/build.gradle b/common/build.gradle new file mode 100644 index 0000000..7e0f3cb --- /dev/null +++ b/common/build.gradle @@ -0,0 +1,65 @@ +buildscript { + repositories { + jcenter() + maven { + url 'http://xbib.org/repository' + } + } + dependencies { + classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" + } +} + +apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' + +configurations { + main + tests +} + +dependencies { + compile project(':api') + compile "org.xbib:metrics:${project.property('xbib-metrics.version')}" + compileOnly "org.apache.logging.log4j:log4j-api:${project.property('log4j.version')}" + testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" + testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" +} + +jar { + baseName "${rootProject.name}-common" +} + +/* +task testJar(type: Jar, dependsOn: testClasses) { + baseName = "${project.archivesBaseName}-tests" + from sourceSets.test.output +} +*/ + +artifacts { + main jar + tests testJar + archives sourcesJar, javadocJar +} + +test { + enabled = false + jvmArgs "-javaagent:" + configurations.alpnagent.asPath + systemProperty 'path.home', project.buildDir.absolutePath + testLogging { + showStandardStreams = true + exceptionFormat = 'full' + } +} + +randomizedTest { + enabled = false +} + +esTest { + // test with the jars, not the classes, for security manager + // classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files + systemProperty 'tests.security.manager', 'true' +} +esTest.dependsOn jar, testJar + diff --git a/common/config/checkstyle/checkstyle.xml b/common/config/checkstyle/checkstyle.xml new file mode 100644 index 0000000..8cb4438 --- /dev/null +++ b/common/config/checkstyle/checkstyle.xml @@ -0,0 +1,321 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/common/src/docs/asciidoc/css/foundation.css b/common/src/docs/asciidoc/css/foundation.css new file mode 100644 index 0000000..27be611 --- /dev/null +++ b/common/src/docs/asciidoc/css/foundation.css @@ -0,0 +1,684 @@ +/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ +/* ========================================================================== HTML5 display definitions ========================================================================== */ +/** Correct `block` display not defined in IE 8/9. */ +article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } + +/** Correct `inline-block` display not defined in IE 8/9. */ +audio, canvas, video { display: inline-block; } + +/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ +audio:not([controls]) { display: none; height: 0; } + +/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ +[hidden], template { display: none; } + +script { display: none !important; } + +/* ========================================================================== Base ========================================================================== */ +/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ +html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } + +/** Remove default margin. */ +body { margin: 0; } + +/* ========================================================================== Links ========================================================================== */ +/** Remove the gray background color from active links in IE 10. */ +a { background: transparent; } + +/** Address `outline` inconsistency between Chrome and other browsers. */ +a:focus { outline: thin dotted; } + +/** Improve readability when focused and also mouse hovered in all browsers. */ +a:active, a:hover { outline: 0; } + +/* ========================================================================== Typography ========================================================================== */ +/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ +h1 { font-size: 2em; margin: 0.67em 0; } + +/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ +abbr[title] { border-bottom: 1px dotted; } + +/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ +b, strong { font-weight: bold; } + +/** Address styling not present in Safari 5 and Chrome. */ +dfn { font-style: italic; } + +/** Address differences between Firefox and other browsers. */ +hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } + +/** Address styling not present in IE 8/9. */ +mark { background: #ff0; color: #000; } + +/** Correct font family set oddly in Safari 5 and Chrome. */ +code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } + +/** Improve readability of pre-formatted text in all browsers. */ +pre { white-space: pre-wrap; } + +/** Set consistent quote types. */ +q { quotes: "\201C" "\201D" "\2018" "\2019"; } + +/** Address inconsistent and variable font size in all browsers. */ +small { font-size: 80%; } + +/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ +sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } + +sup { top: -0.5em; } + +sub { bottom: -0.25em; } + +/* ========================================================================== Embedded content ========================================================================== */ +/** Remove border when inside `a` element in IE 8/9. */ +img { border: 0; } + +/** Correct overflow displayed oddly in IE 9. */ +svg:not(:root) { overflow: hidden; } + +/* ========================================================================== Figures ========================================================================== */ +/** Address margin not present in IE 8/9 and Safari 5. */ +figure { margin: 0; } + +/* ========================================================================== Forms ========================================================================== */ +/** Define consistent border, margin, and padding. */ +fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } + +/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ +legend { border: 0; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ +button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } + +/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ +button, input { line-height: normal; } + +/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ +button, select { text-transform: none; } + +/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ +button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } + +/** Re-set default cursor for disabled elements. */ +button[disabled], html input[disabled] { cursor: default; } + +/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ +input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ +input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } + +/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ +input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } + +/** Remove inner padding and border in Firefox 4+. */ +button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } + +/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ +textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } + +/* ========================================================================== Tables ========================================================================== */ +/** Remove most spacing between table cells. */ +table { border-collapse: collapse; border-spacing: 0; } + +meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } + +meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } + +meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } + +*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } + +html, body { font-size: 100%; } + +body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } + +a:hover { cursor: pointer; } + +img, object, embed { max-width: 100%; height: auto; } + +object, embed { height: 100%; } + +img { -ms-interpolation-mode: bicubic; } + +#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } + +.left { float: left !important; } + +.right { float: right !important; } + +.text-left { text-align: left !important; } + +.text-right { text-align: right !important; } + +.text-center { text-align: center !important; } + +.text-justify { text-align: justify !important; } + +.hide { display: none; } + +.antialiased { -webkit-font-smoothing: antialiased; } + +img { display: inline-block; vertical-align: middle; } + +textarea { height: auto; min-height: 50px; } + +select { width: 100%; } + +object, svg { display: inline-block; vertical-align: middle; } + +.center { margin-left: auto; margin-right: auto; } + +.spread { width: 100%; } + +p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } + +.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } + +/* Typography resets */ +div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } + +/* Default Link Styles */ +a { color: #2ba6cb; text-decoration: none; line-height: inherit; } +a:hover, a:focus { color: #2795b6; } +a img { border: none; } + +/* Default paragraph styles */ +p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } +p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } + +/* Default header styles */ +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } +h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } + +h1 { font-size: 2.125em; } + +h2 { font-size: 1.6875em; } + +h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } + +h4 { font-size: 1.125em; } + +h5 { font-size: 1.125em; } + +h6 { font-size: 1em; } + +hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } + +/* Helpful Typography Defaults */ +em, i { font-style: italic; line-height: inherit; } + +strong, b { font-weight: bold; line-height: inherit; } + +small { font-size: 60%; line-height: inherit; } + +code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } + +/* Lists */ +ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } + +ul, ol { margin-left: 1.5em; } +ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } + +/* Unordered Lists */ +ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } +ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } +ul.square { list-style-type: square; } +ul.circle { list-style-type: circle; } +ul.disc { list-style-type: disc; } +ul.no-bullet { list-style: none; } + +/* Ordered Lists */ +ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } + +/* Definition Lists */ +dl dt { margin-bottom: 0.3125em; font-weight: bold; } +dl dd { margin-bottom: 1.25em; } + +/* Abbreviations */ +abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } + +abbr { text-transform: none; } + +/* Blockquotes */ +blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } +blockquote cite { display: block; font-size: 0.8125em; color: #555555; } +blockquote cite:before { content: "\2014 \0020"; } +blockquote cite a, blockquote cite a:visited { color: #555555; } + +blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } + +/* Microformats */ +.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } +.vcard li { margin: 0; display: block; } +.vcard .fn { font-weight: bold; font-size: 0.9375em; } + +.vevent .summary { font-weight: bold; } +.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } + +@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + h1 { font-size: 2.75em; } + h2 { font-size: 2.3125em; } + h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } + h4 { font-size: 1.4375em; } } +/* Tables */ +table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } +table thead, table tfoot { background: whitesmoke; font-weight: bold; } +table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } +table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } +table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } +table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } + +body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } + +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + +.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } +.clearfix:after, .float-group:after { clear: both; } + +*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } +*:not(pre) > code.nobreak { word-wrap: normal; } +*:not(pre) > code.nowrap { white-space: nowrap; } + +pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } + +em em { font-style: normal; } + +strong strong { font-weight: normal; } + +.keyseq { color: #555555; } + +kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } + +.keyseq kbd:first-child { margin-left: 0; } + +.keyseq kbd:last-child { margin-right: 0; } + +.menuseq, .menu { color: #090909; } + +b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } + +b.button:before { content: "["; padding: 0 3px 0 2px; } + +b.button:after { content: "]"; padding: 0 2px 0 3px; } + +#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } +#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } +#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } + +#content { margin-top: 1.25em; } + +#content:before { content: none; } + +#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } +#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } +#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } +#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } +#header .details span:first-child { margin-left: -0.125em; } +#header .details span.email a { color: #6f6f6f; } +#header .details br { display: none; } +#header .details br + span:before { content: "\00a0\2013\00a0"; } +#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } +#header .details br + span#revremark:before { content: "\00a0|\00a0"; } +#header #revnumber { text-transform: capitalize; } +#header #revnumber:after { content: "\00a0"; } + +#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } + +#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } +#toc > ul { margin-left: 0.125em; } +#toc ul.sectlevel0 > li > a { font-style: italic; } +#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } +#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } +#toc li { line-height: 1.3334; margin-top: 0.3334em; } +#toc a { text-decoration: none; } +#toc a:active { text-decoration: underline; } + +#toctitle { color: #6f6f6f; font-size: 1.2em; } + +@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } + body.toc2 { padding-left: 15em; padding-right: 0; } + #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } + #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } + #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } + #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } + #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } + body.toc2.toc-right { padding-left: 0; padding-right: 15em; } + body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } +@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } + #toc.toc2 { width: 20em; } + #toc.toc2 #toctitle { font-size: 1.375em; } + #toc.toc2 > ul { font-size: 0.95em; } + #toc.toc2 ul ul { padding-left: 1.25em; } + body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } +#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +#content #toc > :first-child { margin-top: 0; } +#content #toc > :last-child { margin-bottom: 0; } + +#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } + +#footer-text { color: #dddddd; line-height: 1.44; } + +.sect1 { padding-bottom: 0.625em; } + +@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } +.sect1 + .sect1 { border-top: 1px solid #dddddd; } + +#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } +#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } +#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } +#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } +#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } + +.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } + +.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } + +table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } + +.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } + +table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } + +.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } +.admonitionblock > table td.icon { text-align: center; width: 80px; } +.admonitionblock > table td.icon img { max-width: initial; } +.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } +.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } +.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } + +.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } +.exampleblock > .content > :first-child { margin-top: 0; } +.exampleblock > .content > :last-child { margin-bottom: 0; } + +.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +.sidebarblock > :first-child { margin-top: 0; } +.sidebarblock > :last-child { margin-bottom: 0; } +.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } + +.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } + +.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } +.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } + +.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } +.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } +@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } +@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } + +.literalblock.output pre { color: #eeeeee; background-color: black; } + +.listingblock pre.highlightjs { padding: 0; } +.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } + +.listingblock > .content { position: relative; } + +.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } + +.listingblock:hover code[data-lang]:before { display: block; } + +.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } + +.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } + +table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } + +table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } + +table.pyhltable td.code { padding-left: .75em; padding-right: 0; } + +pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } + +pre.pygments .lineno { display: inline-block; margin-right: .25em; } + +table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } + +.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } +.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } +.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } +.quoteblock blockquote { margin: 0; padding: 0; border: 0; } +.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } +.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } +.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } +.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } +.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } +.quoteblock .quoteblock blockquote:before { display: none; } + +.verseblock { margin: 0 1em 1.25em 1em; } +.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } +.verseblock pre strong { font-weight: 400; } +.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } + +.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } +.quoteblock .attribution br, .verseblock .attribution br { display: none; } +.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } + +.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } +.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } +.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } + +table.tableblock { max-width: 100%; border-collapse: separate; } +table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } + +table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } + +table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } + +table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } + +table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } + +table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } + +table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } + +table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } + +table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } + +table.frame-all { border-width: 1px; } + +table.frame-sides { border-width: 0 1px; } + +table.frame-topbot { border-width: 1px 0; } + +th.halign-left, td.halign-left { text-align: left; } + +th.halign-right, td.halign-right { text-align: right; } + +th.halign-center, td.halign-center { text-align: center; } + +th.valign-top, td.valign-top { vertical-align: top; } + +th.valign-bottom, td.valign-bottom { vertical-align: bottom; } + +th.valign-middle, td.valign-middle { vertical-align: middle; } + +table thead th, table tfoot th { font-weight: bold; } + +tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } + +tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } + +p.tableblock > code:only-child { background: none; padding: 0; } + +p.tableblock { font-size: 1em; } + +td > div.verse { white-space: pre; } + +ol { margin-left: 1.75em; } + +ul li ol { margin-left: 1.5em; } + +dl dd { margin-left: 1.125em; } + +dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } + +ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } + +ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } + +ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } + +ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } + +ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } + +ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } +ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } +ul.inline > li > * { display: block; } + +.unstyled dl dt { font-weight: normal; font-style: normal; } + +ol.arabic { list-style-type: decimal; } + +ol.decimal { list-style-type: decimal-leading-zero; } + +ol.loweralpha { list-style-type: lower-alpha; } + +ol.upperalpha { list-style-type: upper-alpha; } + +ol.lowerroman { list-style-type: lower-roman; } + +ol.upperroman { list-style-type: upper-roman; } + +ol.lowergreek { list-style-type: lower-greek; } + +.hdlist > table, .colist > table { border: 0; background: none; } +.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } + +td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } + +td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } + +.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } + +.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } +.colist > table tr > td:first-of-type img { max-width: initial; } +.colist > table tr > td:last-of-type { padding: 0.25em 0; } + +.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } + +.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } +.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } +.imageblock > .title { margin-bottom: 0; } +.imageblock.thumb, .imageblock.th { border-width: 6px; } +.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } + +.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } +.image.left { margin-right: 0.625em; } +.image.right { margin-left: 0.625em; } + +a.image { text-decoration: none; display: inline-block; } +a.image object { pointer-events: none; } + +sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } +sup.footnote a, sup.footnoteref a { text-decoration: none; } +sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } + +#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } +#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } +#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } +#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } +#footnotes .footnote:last-of-type { margin-bottom: 0; } +#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } + +.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } +.gist .file-data > table td.line-data { width: 99%; } + +div.unbreakable { page-break-inside: avoid; } + +.big { font-size: larger; } + +.small { font-size: smaller; } + +.underline { text-decoration: underline; } + +.overline { text-decoration: overline; } + +.line-through { text-decoration: line-through; } + +.aqua { color: #00bfbf; } + +.aqua-background { background-color: #00fafa; } + +.black { color: black; } + +.black-background { background-color: black; } + +.blue { color: #0000bf; } + +.blue-background { background-color: #0000fa; } + +.fuchsia { color: #bf00bf; } + +.fuchsia-background { background-color: #fa00fa; } + +.gray { color: #606060; } + +.gray-background { background-color: #7d7d7d; } + +.green { color: #006000; } + +.green-background { background-color: #007d00; } + +.lime { color: #00bf00; } + +.lime-background { background-color: #00fa00; } + +.maroon { color: #600000; } + +.maroon-background { background-color: #7d0000; } + +.navy { color: #000060; } + +.navy-background { background-color: #00007d; } + +.olive { color: #606000; } + +.olive-background { background-color: #7d7d00; } + +.purple { color: #600060; } + +.purple-background { background-color: #7d007d; } + +.red { color: #bf0000; } + +.red-background { background-color: #fa0000; } + +.silver { color: #909090; } + +.silver-background { background-color: #bcbcbc; } + +.teal { color: #006060; } + +.teal-background { background-color: #007d7d; } + +.white { color: #bfbfbf; } + +.white-background { background-color: #fafafa; } + +.yellow { color: #bfbf00; } + +.yellow-background { background-color: #fafa00; } + +span.icon > .fa { cursor: default; } + +.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } +.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } +.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } +.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } +.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } +.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } + +.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } +.conum[data-value] * { color: #fff !important; } +.conum[data-value] + b { display: none; } +.conum[data-value]:after { content: attr(data-value); } +pre .conum[data-value] { position: relative; top: -0.125em; } + +b.conum * { color: inherit !important; } + +.conum:not([data-value]):empty { display: none; } + +.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/common/src/docs/asciidoclet/overview.adoc b/common/src/docs/asciidoclet/overview.adoc new file mode 100644 index 0000000..7947331 --- /dev/null +++ b/common/src/docs/asciidoclet/overview.adoc @@ -0,0 +1,4 @@ += Elasticsearch Java client +Jörg Prante +Version 5.4.0.0 + diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/AbstractClient.java b/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java similarity index 52% rename from src/main/java/org/xbib/elasticsearch/extras/client/AbstractClient.java rename to common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java index aed7be0..79a9336 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/AbstractClient.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java @@ -1,7 +1,10 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; @@ -14,6 +17,9 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; @@ -24,6 +30,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; @@ -31,18 +38,25 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; @@ -52,6 +66,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.StringWriter; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -60,34 +75,188 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * */ -public abstract class AbstractClient { +public abstract class AbstractClient implements ClientMethods { - private static final ESLogger logger = ESLoggerFactory.getLogger(AbstractClient.class.getName()); + private static final Logger logger = LogManager.getLogger(AbstractClient.class.getName()); private Settings.Builder settingsBuilder; private Settings settings; - private Map mappings = new HashMap<>(); + private Map mappings; - public abstract ElasticsearchClient client(); + private ElasticsearchClient client; - protected abstract void createClient(Settings settings) throws IOException; + protected BulkProcessor bulkProcessor; - public abstract void shutdown(); + protected BulkMetric metric; - public Settings.Builder getSettingsBuilder() { - return settingsBuilder(); + protected BulkControl control; + + protected Throwable throwable; + + protected boolean closed; + + protected int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST; + + protected int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS; + + protected ByteSizeValue maxVolume = DEFAULT_MAX_VOLUME_PER_REQUEST; + + protected TimeValue flushInterval = DEFAULT_FLUSH_INTERVAL; + + @Override + public AbstractClient init(ElasticsearchClient client, Settings settings, + final BulkMetric metric, final BulkControl control) { + this.client = client; + this.mappings = new HashMap<>(); + if (settings == null) { + settings = findSettings(); + } + if (client == null && settings != null) { + try { + this.client = createClient(settings); + } catch (IOException e) { + logger.error(e.getMessage(), e); + } + } + this.metric = metric; + this.control = control; + if (metric != null) { + metric.start(); + } + resetSettings(); + BulkProcessor.Listener listener = new BulkProcessor.Listener() { + + private final Logger logger = LogManager.getLogger(getClass().getName() + ".Listener"); + + @Override + public void beforeBulk(long executionId, BulkRequest request) { + long l = -1; + if (metric != null) { + metric.getCurrentIngest().inc(); + l = metric.getCurrentIngest().getCount(); + int n = request.numberOfActions(); + metric.getSubmitted().inc(n); + metric.getCurrentIngestNumDocs().inc(n); + metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes()); + } + logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]", + executionId, + request.numberOfActions(), + request.estimatedSizeInBytes(), + l); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + long l = -1; + if (metric != null) { + metric.getCurrentIngest().dec(); + l = metric.getCurrentIngest().getCount(); + metric.getSucceeded().inc(response.getItems().length); + } + int n = 0; + for (BulkItemResponse itemResponse : response.getItems()) { + if (metric != null) { + metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId()); + } + if (itemResponse.isFailed()) { + n++; + if (metric != null) { + metric.getSucceeded().dec(1); + metric.getFailed().inc(1); + } + } + } + if (metric != null) { + logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests", + executionId, + metric.getSucceeded().getCount(), + metric.getFailed().getCount(), + response.getTook().millis(), + l); + } + if (n > 0) { + logger.error("bulk [{}] failed with {} failed items, failure message = {}", + executionId, n, response.buildFailureMessage()); + } else { + if (metric != null) { + metric.getCurrentIngestNumDocs().dec(response.getItems().length); + } + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + if (metric != null) { + metric.getCurrentIngest().dec(); + } + throwable = failure; + closed = true; + logger.error("after bulk [" + executionId + "] error", failure); + } + }; + if (this.client != null) { + BulkProcessor.Builder builder = BulkProcessor.builder(this.client, listener) + .setBulkActions(maxActionsPerRequest) + .setConcurrentRequests(maxConcurrentRequests) + .setFlushInterval(flushInterval); + if (maxVolume != null) { + builder.setBulkSize(maxVolume); + } + this.bulkProcessor = builder.build(); + } + this.closed = false; + return this; + } + + protected abstract ElasticsearchClient createClient(Settings settings) throws IOException; + + @Override + public ElasticsearchClient client() { + return client; + } + + @Override + public ClientMethods maxActionsPerRequest(int maxActionsPerRequest) { + this.maxActionsPerRequest = maxActionsPerRequest; + return this; + } + + @Override + public ClientMethods maxConcurrentRequests(int maxConcurrentRequests) { + this.maxConcurrentRequests = maxConcurrentRequests; + return this; + } + + @Override + public ClientMethods maxVolumePerRequest(ByteSizeValue maxVolume) { + this.maxVolume = maxVolume; + return this; + } + + @Override + public ClientMethods flushIngestInterval(TimeValue flushInterval) { + this.flushInterval = flushInterval; + return this; + } + + @Override + public BulkMetric getMetric() { + return metric; } public void resetSettings() { - this.settingsBuilder = Settings.settingsBuilder(); + this.settingsBuilder = Settings.builder(); settings = null; mappings = new HashMap<>(); } @@ -98,31 +267,31 @@ public abstract class AbstractClient { public void setting(String key, String value) { if (settingsBuilder == null) { - settingsBuilder = Settings.settingsBuilder(); + settingsBuilder = Settings.builder(); } settingsBuilder.put(key, value); } public void setting(String key, Boolean value) { if (settingsBuilder == null) { - settingsBuilder = Settings.settingsBuilder(); + settingsBuilder = Settings.builder(); } settingsBuilder.put(key, value); } public void setting(String key, Integer value) { if (settingsBuilder == null) { - settingsBuilder = Settings.settingsBuilder(); + settingsBuilder = Settings.builder(); } settingsBuilder.put(key, value); } public void setting(InputStream in) throws IOException { - settingsBuilder = Settings.settingsBuilder().loadFromStream(".json", in); + settingsBuilder = Settings.builder().loadFromStream(".json", in, true); } public Settings.Builder settingsBuilder() { - return settingsBuilder != null ? settingsBuilder : Settings.settingsBuilder(); + return settingsBuilder != null ? settingsBuilder : Settings.builder(); } public Settings settings() { @@ -130,47 +299,253 @@ public abstract class AbstractClient { return settings; } if (settingsBuilder == null) { - settingsBuilder = Settings.settingsBuilder(); + settingsBuilder = Settings.builder(); } return settingsBuilder.build(); } + @Override public void mapping(String type, String mapping) throws IOException { mappings.put(type, mapping); } + @Override public void mapping(String type, InputStream in) throws IOException { if (type == null) { return; } StringWriter sw = new StringWriter(); - Streams.copy(new InputStreamReader(in), sw); + Streams.copy(new InputStreamReader(in, StandardCharsets.UTF_8), sw); mappings.put(type, sw.toString()); } - public Map mappings() { - return mappings.isEmpty() ? null : mappings; + @Override + public ClientMethods index(String index, String type, String id, boolean create, BytesReference source) { + return indexRequest(new IndexRequest(index).type(type).id(id).create(create).source(source, XContentType.JSON)); + } + + @Override + public ClientMethods index(String index, String type, String id, boolean create, String source) { + return indexRequest(new IndexRequest(index).type(type).id(id).create(create).source(source, XContentType.JSON)); + } + + @Override + public ClientMethods indexRequest(IndexRequest indexRequest) { + if (closed) { + throwClose(); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id()); + } + bulkProcessor.add(indexRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of index request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public ClientMethods delete(String index, String type, String id) { + return deleteRequest(new DeleteRequest(index).type(type).id(id)); + } + + @Override + public ClientMethods deleteRequest(DeleteRequest deleteRequest) { + if (closed) { + throwClose(); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); + } + bulkProcessor.add(deleteRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of delete failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public ClientMethods update(String index, String type, String id, BytesReference source) { + return updateRequest(new UpdateRequest().index(index).type(type).id(id).upsert(source, XContentType.JSON)); + } + + @Override + public ClientMethods update(String index, String type, String id, String source) { + return updateRequest(new UpdateRequest().index(index).type(type).id(id).upsert(source, XContentType.JSON)); + } + + @Override + public ClientMethods updateRequest(UpdateRequest updateRequest) { + if (closed) { + throwClose(); + } + try { + if (metric != null) { + metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); + } + bulkProcessor.add(updateRequest); + } catch (Exception e) { + throwable = e; + closed = true; + logger.error("bulk add of update request failed: " + e.getMessage(), e); + } + return this; + } + + @Override + public ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) + throws IOException { + if (control == null) { + return this; + } + if (!control.isBulk(index) && startRefreshIntervalSeconds > 0L && stopRefreshIntervalSeconds > 0L) { + control.startBulk(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds); + updateIndexSetting(index, "refresh_interval", startRefreshIntervalSeconds + "s"); + } + return this; + } + + @Override + public ClientMethods stopBulk(String index) throws IOException { + if (control == null) { + return this; + } + if (control.isBulk(index)) { + long secs = control.getStopBulkRefreshIntervals().get(index); + if (secs > 0L) { + updateIndexSetting(index, "refresh_interval", secs + "s"); + } + control.finishBulk(index); + } + return this; } + @Override + public ClientMethods flushIngest() { + if (closed) { + throwClose(); + } + logger.debug("flushing bulk processor"); + bulkProcessor.flush(); + return this; + } - public void updateIndexSetting(String index, String key, Object value) throws IOException { + @Override + public synchronized void shutdown() throws IOException { + if (closed) { + throwClose(); + } + if (bulkProcessor != null) { + logger.info("closing bulk processor..."); + bulkProcessor.close(); + } + if (metric != null) { + logger.info("stopping metric"); + metric.stop(); + } + if (control != null && control.indices() != null && !control.indices().isEmpty()) { + logger.info("stopping bulk mode for indices {}...", control.indices()); + for (String index : control.indices()) { + stopBulk(index); + } + } + } + + @Override + public ClientMethods newIndex(String index) { + if (closed) { + throwClose(); + } + return newIndex(index, null, null); + } + + @Override + public ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException { + resetSettings(); + setting(settings); + mapping(type, mappings); + return newIndex(index, settings(), this.mappings); + } + + @Override + public ClientMethods newIndex(String index, Settings settings, Map mappings) { + if (closed) { + throwClose(); + } if (client() == null) { - return; + logger.warn("no client for create index"); + return this; } if (index == null) { - throw new IOException("no index name given"); + logger.warn("no index name given to create index"); + return this; } - if (key == null) { - throw new IOException("no key given"); + CreateIndexRequestBuilder createIndexRequestBuilder = + new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index); + if (settings != null) { + logger.info("found settings {}", settings.toString()); + createIndexRequestBuilder.setSettings(settings); + } + if (mappings != null) { + for (Map.Entry entry : mappings.entrySet()) { + String type = entry.getKey(); + String mapping = entry.getValue(); + logger.info("found mapping for {}", type); + createIndexRequestBuilder.addMapping(type, mapping, XContentType.JSON); + } } - if (value == null) { - throw new IOException("no value given"); + CreateIndexResponse createIndexResponse = createIndexRequestBuilder.execute().actionGet(); + logger.info("index {} created: {}", index, createIndexResponse); + return this; + } + + + @Override + public ClientMethods newMapping(String index, String type, Map mapping) { + PutMappingRequestBuilder putMappingRequestBuilder = + new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE) + .setIndices(index) + .setType(type) + .setSource(mapping); + putMappingRequestBuilder.execute().actionGet(); + logger.info("mapping created for index {} and type {}", index, type); + return this; + } + + @Override + public ClientMethods deleteIndex(String index) { + if (closed) { + throwClose(); } - Settings.Builder updateSettingsBuilder = Settings.settingsBuilder(); - updateSettingsBuilder.put(key, value.toString()); - UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index) - .settings(updateSettingsBuilder); - client().execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet(); + if (client == null) { + logger.warn("no client"); + return this; + } + if (index == null) { + logger.warn("no index name given to delete index"); + return this; + } + DeleteIndexRequestBuilder deleteIndexRequestBuilder = + new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index); + deleteIndexRequestBuilder.execute().actionGet(); + return this; + } + + @Override + public ClientMethods waitForResponses(TimeValue maxWaitTime) throws InterruptedException, ExecutionException { + if (closed) { + throwClose(); + } + while (!bulkProcessor.awaitClose(maxWaitTime.getMillis(), TimeUnit.MILLISECONDS)) { + logger.warn("still waiting for responses"); + } + return this; } public void waitForRecovery() throws IOException { @@ -180,6 +555,7 @@ public abstract class AbstractClient { client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).actionGet(); } + @Override public int waitForRecovery(String index) throws IOException { if (client() == null) { return -1; @@ -194,7 +570,8 @@ public abstract class AbstractClient { return shards; } - public void waitForCluster(String statusString, String timeout) throws IOException { + @Override + public void waitForCluster(String statusString, TimeValue timeout) throws IOException { if (client() == null) { return; } @@ -218,7 +595,7 @@ public abstract class AbstractClient { new ClusterStateRequestBuilder(client(), ClusterStateAction.INSTANCE).all(); ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); String name = clusterStateResponse.getClusterName().value(); - int nodeCount = clusterStateResponse.getState().getNodes().size(); + int nodeCount = clusterStateResponse.getState().getNodes().getSize(); return name + " (" + nodeCount + " nodes connected)"; } catch (ElasticsearchTimeoutException e) { logger.warn(e.getMessage(), e); @@ -255,7 +632,7 @@ public abstract class AbstractClient { } public int updateReplicaLevel(String index, int level) throws IOException { - waitForCluster("YELLOW", "30s"); + waitForCluster("YELLOW", TimeValue.timeValueSeconds(30)); updateIndexSetting(index, "number_of_replicas", level); return waitForRecovery(index); } @@ -282,10 +659,10 @@ public abstract class AbstractClient { if (client() == null) { return; } - if (!mappings().isEmpty()) { - for (Map.Entry me : mappings().entrySet()) { + if (!mappings.isEmpty()) { + for (Map.Entry me : mappings.entrySet()) { client().execute(PutMappingAction.INSTANCE, - new PutMappingRequest(index).type(me.getKey()).source(me.getValue())).actionGet(); + new PutMappingRequest(index).type(me.getKey()).source(me.getValue(), XContentType.JSON)).actionGet(); } } } @@ -332,25 +709,13 @@ public abstract class AbstractClient { return getFilters(getAliasesRequestBuilder.setIndices(index).execute().actionGet()); } - private Map getFilters(GetAliasesResponse getAliasesResponse) { - Map result = new HashMap<>(); - for (ObjectObjectCursor> object : getAliasesResponse.getAliases()) { - List aliasMetaDataList = object.value; - for (AliasMetaData aliasMetaData : aliasMetaDataList) { - if (aliasMetaData.filteringRequired()) { - result.put(aliasMetaData.alias(), new String(aliasMetaData.getFilter().uncompressed())); - } else { - result.put(aliasMetaData.alias(), null); - } - } - } - return result; - } + @Override public void switchAliases(String index, String concreteIndex, List extraAliases) { switchAliases(index, concreteIndex, extraAliases, null); } + @Override public void switchAliases(String index, String concreteIndex, List extraAliases, IndexAliasAdder adder) { if (client() == null) { @@ -413,6 +778,7 @@ public abstract class AbstractClient { } } + @Override public void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep) { if (client() == null) { return; @@ -471,14 +837,15 @@ public abstract class AbstractClient { } } + @Override public Long mostRecentDocument(String index, String timestampfieldname) { if (client() == null) { return null; } SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client(), SearchAction.INSTANCE); - SortBuilder sort = SortBuilders.fieldSort(timestampfieldname).order(SortOrder.DESC); + SortBuilder sort = SortBuilders.fieldSort(timestampfieldname).order(SortOrder.DESC); SearchResponse searchResponse = searchRequestBuilder.setIndices(index) - .addField(timestampfieldname) + .addStoredField(timestampfieldname) .setSize(1) .addSort(sort) .execute().actionGet(); @@ -493,4 +860,68 @@ public abstract class AbstractClient { return null; } + @Override + public boolean hasThrowable() { + return throwable != null; + } + + @Override + public Throwable getThrowable() { + return throwable; + } + + protected static void throwClose() { + throw new ElasticsearchException("client is closed"); + } + + + protected void updateIndexSetting(String index, String key, Object value) throws IOException { + if (client() == null) { + return; + } + if (index == null) { + throw new IOException("no index name given"); + } + if (key == null) { + throw new IOException("no key given"); + } + if (value == null) { + throw new IOException("no value given"); + } + Settings.Builder updateSettingsBuilder = Settings.builder(); + updateSettingsBuilder.put(key, value.toString()); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index) + .settings(updateSettingsBuilder); + client().execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet(); + } + + private Map getFilters(GetAliasesResponse getAliasesResponse) { + Map result = new HashMap<>(); + for (ObjectObjectCursor> object : getAliasesResponse.getAliases()) { + List aliasMetaDataList = object.value; + for (AliasMetaData aliasMetaData : aliasMetaDataList) { + if (aliasMetaData.filteringRequired()) { + String metaData = new String(aliasMetaData.getFilter().uncompressed(), StandardCharsets.UTF_8); + result.put(aliasMetaData.alias(), metaData); + } else { + result.put(aliasMetaData.alias(), null); + } + } + } + return result; + } + + private Settings findSettings() { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put("host", "localhost"); + try { + String hostname = NetworkUtils.getLocalAddress().getHostName(); + logger.debug("the hostname is {}", hostname); + settingsBuilder.put("host", hostname) + .put("port", 9300); + } catch (Exception e) { + logger.warn(e.getMessage(), e); + } + return settingsBuilder.build(); + } } diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/BulkControl.java b/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java similarity index 89% rename from src/main/java/org/xbib/elasticsearch/extras/client/BulkControl.java rename to common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java index 910f2f2..5fe6311 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/BulkControl.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import java.util.Map; import java.util.Set; @@ -18,5 +18,4 @@ public interface BulkControl { Map getStartBulkRefreshIntervals(); Map getStopBulkRefreshIntervals(); - } diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/BulkMetric.java b/common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java similarity index 89% rename from src/main/java/org/xbib/elasticsearch/extras/client/BulkMetric.java rename to common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java index a45e9c2..e7a60d2 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/BulkMetric.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import org.xbib.metrics.Count; import org.xbib.metrics.Metered; @@ -27,5 +27,4 @@ public interface BulkMetric { void stop(); long elapsed(); - } diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/BulkProcessor.java b/common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java similarity index 77% rename from src/main/java/org/xbib/elasticsearch/extras/client/BulkProcessor.java rename to common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java index b32637e..7de2a3b 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/BulkProcessor.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java @@ -1,18 +1,17 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.FutureUtils; import java.io.Closeable; import java.util.concurrent.Executors; @@ -30,9 +29,9 @@ import java.util.concurrent.atomic.AtomicLong; */ public class BulkProcessor implements Closeable { - private final int bulkActions; + private final int maximumBulkActionsPerRequest; - private final long bulkSize; + private final long maximumBulkRequestByteSize; private final ScheduledThreadPoolExecutor scheduler; @@ -40,26 +39,24 @@ public class BulkProcessor implements Closeable { private final AtomicLong executionIdGen = new AtomicLong(); - private final BulkRequestHandler bulkRequestHandler; + private final BulkExecutor bulkExecutor; private BulkRequest bulkRequest; private volatile boolean closed = false; - private BulkProcessor(Client client, Listener listener, String name, int concurrentRequests, - int bulkActions, ByteSizeValue bulkSize, TimeValue flushInterval) { - this.bulkActions = bulkActions; - this.bulkSize = bulkSize.bytes(); - + private BulkProcessor(ElasticsearchClient client, Listener listener, int maximumConcurrentBulkRequests, + int maximumBulkActionsPerRequest, ByteSizeValue maximumBulkRequestByteSize, + @Nullable TimeValue flushInterval) { + this.maximumBulkActionsPerRequest = maximumBulkActionsPerRequest; + this.maximumBulkRequestByteSize = maximumBulkRequestByteSize.getBytes(); this.bulkRequest = new BulkRequest(); - this.bulkRequestHandler = concurrentRequests == 0 ? - new SyncBulkRequestHandler(client, listener) : - new AsyncBulkRequestHandler(client, listener, concurrentRequests); + this.bulkExecutor = maximumConcurrentBulkRequests == 0 ? + new SyncBulkExecutor(client, listener) : + new AsyncBulkExecutor(client, listener, maximumConcurrentBulkRequests); if (flushInterval != null) { - this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, - EsExecutors.daemonThreadFactory(client.settings(), - name != null ? "[" + name + "]" : "" + "bulk_processor")); + this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), @@ -70,7 +67,7 @@ public class BulkProcessor implements Closeable { } } - public static Builder builder(Client client, Listener listener) { + public static Builder builder(ElasticsearchClient client, Listener listener) { if (client == null) { throw new NullPointerException("The client you specified while building a BulkProcessor is null"); } @@ -110,13 +107,13 @@ public class BulkProcessor implements Closeable { } closed = true; if (this.scheduledFuture != null) { - FutureUtils.cancel(this.scheduledFuture); + this.scheduledFuture.cancel(false); this.scheduler.shutdown(); } if (bulkRequest.numberOfActions() > 0) { execute(); } - return this.bulkRequestHandler.awaitClose(timeout, unit); + return bulkExecutor.awaitClose(timeout, unit); } /** @@ -126,8 +123,16 @@ public class BulkProcessor implements Closeable { * @param request request * @return his bulk processor */ - public BulkProcessor add(IndexRequest request) { - return add((ActionRequest) request); + public synchronized BulkProcessor add(IndexRequest request) { + if (request == null) { + return this; + } + ensureOpen(); + bulkRequest.add(request); + if (isOverTheLimit()) { + execute(); + } + return this; } /** @@ -136,29 +141,33 @@ public class BulkProcessor implements Closeable { * @param request request * @return his bulk processor */ - public BulkProcessor add(DeleteRequest request) { - return add((ActionRequest) request); - } - - /** - * Adds either a delete or an index request. - * - * @param request request - * @return his bulk processor - */ - public BulkProcessor add(ActionRequest request) { - return add(request, null); + public synchronized BulkProcessor add(DeleteRequest request) { + if (request == null) { + return this; + } + ensureOpen(); + bulkRequest.add(request); + if (isOverTheLimit()) { + execute(); + } + return this; } /** - * Adds either a delete or an index request with a payload. + * Adds an {@link UpdateRequest} to the list of actions to execute. * * @param request request - * @param payload payload * @return his bulk processor */ - public BulkProcessor add(ActionRequest request, Object payload) { - internalAdd(request, payload); + public synchronized BulkProcessor add(UpdateRequest request) { + if (request == null) { + return this; + } + ensureOpen(); + bulkRequest.add(request); + if (isOverTheLimit()) { + execute(); + } return this; } @@ -168,32 +177,17 @@ public class BulkProcessor implements Closeable { } } - private synchronized void internalAdd(ActionRequest request, Object payload) { - ensureOpen(); - bulkRequest.add(request, payload); - executeIfNeeded(); - } - - private void executeIfNeeded() { - ensureOpen(); - if (!isOverTheLimit()) { - return; - } - execute(); + private boolean isOverTheLimit() { + final int count = bulkRequest.numberOfActions(); + return count > 0 && + (maximumBulkActionsPerRequest != -1 && count >= maximumBulkActionsPerRequest) || + (maximumBulkRequestByteSize != -1 && bulkRequest.estimatedSizeInBytes() >= maximumBulkRequestByteSize); } private void execute() { final BulkRequest myBulkRequest = this.bulkRequest; - final long executionId = executionIdGen.incrementAndGet(); + bulkExecutor.execute(myBulkRequest, executionIdGen.incrementAndGet()); this.bulkRequest = new BulkRequest(); - this.bulkRequestHandler.execute(myBulkRequest, executionId); - } - - private boolean isOverTheLimit() { - return bulkActions != -1 && - bulkRequest.numberOfActions() >= bulkActions || - bulkSize != -1 && - bulkRequest.estimatedSizeInBytes() >= bulkSize; } /** @@ -247,9 +241,8 @@ public class BulkProcessor implements Closeable { */ public static class Builder { - private final Client client; + private final ElasticsearchClient client; private final Listener listener; - private String name; private int concurrentRequests = 1; private int bulkActions = 1000; private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); @@ -262,22 +255,11 @@ public class BulkProcessor implements Closeable { * @param client the client * @param listener the listener */ - Builder(Client client, Listener listener) { + Builder(ElasticsearchClient client, Listener listener) { this.client = client; this.listener = listener; } - /** - * Sets an optional name to identify this bulk processor. - * - * @param name name - * @return this builder - */ - public Builder setName(String name) { - this.name = name; - return this; - } - /** * Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single * request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed @@ -334,7 +316,7 @@ public class BulkProcessor implements Closeable { * @return a bulk processor */ public BulkProcessor build() { - return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); + return new BulkProcessor(client, listener, concurrentRequests, bulkActions, bulkSize, flushInterval); } } @@ -346,18 +328,14 @@ public class BulkProcessor implements Closeable { if (closed) { return; } - if (bulkRequest.numberOfActions() == 0) { - return; + if (bulkRequest.numberOfActions() > 0) { + execute(); } - execute(); } } } - /** - * Abstracts the low-level details of bulk request handling. - */ - interface BulkRequestHandler { + interface BulkExecutor { void execute(BulkRequest bulkRequest, long executionId); @@ -365,11 +343,13 @@ public class BulkProcessor implements Closeable { } - private class SyncBulkRequestHandler implements BulkRequestHandler { - private final Client client; + private static class SyncBulkExecutor implements BulkExecutor { + + private final ElasticsearchClient client; + private final BulkProcessor.Listener listener; - SyncBulkRequestHandler(Client client, BulkProcessor.Listener listener) { + SyncBulkExecutor(ElasticsearchClient client, BulkProcessor.Listener listener) { this.client = client; this.listener = listener; } @@ -395,13 +375,17 @@ public class BulkProcessor implements Closeable { } } - private class AsyncBulkRequestHandler implements BulkRequestHandler { - private final Client client; + private static class AsyncBulkExecutor implements BulkExecutor { + + private final ElasticsearchClient client; + private final BulkProcessor.Listener listener; + private final Semaphore semaphore; + private final int concurrentRequests; - private AsyncBulkRequestHandler(Client client, BulkProcessor.Listener listener, int concurrentRequests) { + private AsyncBulkExecutor(ElasticsearchClient client, BulkProcessor.Listener listener, int concurrentRequests) { this.client = client; this.listener = listener; this.concurrentRequests = concurrentRequests; @@ -427,7 +411,7 @@ public class BulkProcessor implements Closeable { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { try { listener.afterBulk(executionId, bulkRequest, e); } finally { diff --git a/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java b/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java new file mode 100644 index 0000000..441c8e2 --- /dev/null +++ b/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java @@ -0,0 +1,104 @@ +package org.xbib.elasticsearch.client; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.ServiceLoader; + +/** + * + */ +public final class ClientBuilder implements Parameters { + + private final Settings.Builder settingsBuilder; + + private Map, ClientMethods> clientMethodsMap; + + private BulkMetric metric; + + private BulkControl control; + + public ClientBuilder() { + this(Thread.currentThread().getContextClassLoader()); + } + + public ClientBuilder(ClassLoader classLoader) { + this.settingsBuilder = Settings.builder(); + //settingsBuilder.put("node.name", "clientnode"); + this.clientMethodsMap = new HashMap<>(); + ServiceLoader serviceLoader = ServiceLoader.load(ClientMethods.class, + classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader()); + for (ClientMethods clientMethods : serviceLoader) { + clientMethodsMap.put(clientMethods.getClass(), clientMethods); + } + } + + public static ClientBuilder builder() { + return new ClientBuilder(); + } + + public ClientBuilder put(String key, String value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Integer value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Long value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Double value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, ByteSizeValue value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, TimeValue value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(Settings settings) { + settingsBuilder.put(settings); + return this; + } + + public ClientBuilder setMetric(BulkMetric metric) { + this.metric = metric; + return this; + } + + public ClientBuilder setControl(BulkControl control) { + this.control = control; + return this; + } + + public C getClient(Class clientClass) throws IOException { + return getClient(null, clientClass); + } + + @SuppressWarnings("unchecked") + public C getClient(Client client, Class clientClass) throws IOException { + Settings settings = settingsBuilder.build(); + return (C) clientMethodsMap.get(clientClass) + .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) + .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) + .maxVolumePerRequest(settings.getAsBytesSize(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) + .flushIngestInterval(settings.getAsTime(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) + .init(client, settings, metric, control); + } +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/ClientMethods.java b/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java similarity index 83% rename from src/main/java/org/xbib/elasticsearch/extras/client/ClientMethods.java rename to common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java index a683b63..df199ce 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/ClientMethods.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java @@ -1,9 +1,10 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import java.io.IOException; @@ -17,45 +18,49 @@ import java.util.concurrent.ExecutionException; */ public interface ClientMethods extends Parameters { - /** - * Initialize new ingest client, wrap an existing Elasticsearch client, and set up metrics. - * - * @param client the Elasticsearch client - * @param metric metric - * @param control control - * @return this ingest - * @throws IOException if client could not get created - */ - ClientMethods init(ElasticsearchClient client, BulkMetric metric, BulkControl control) throws IOException; + ClientMethods init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control); /** - * Initialize, create new ingest client, and set up metrics. + * Return Elasticsearch client. * - * @param settings settings - * @param metric metric - * @param control control - * @return this ingest - * @throws IOException if client could not get created + * @return Elasticsearch client */ - ClientMethods init(Settings settings, BulkMetric metric, BulkControl control) throws IOException; + ElasticsearchClient client(); /** - * Return Elasticsearch client. + * Bulked index request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. * - * @return Elasticsearch client + * @param index the index + * @param type the type + * @param id the id + * @param create true if document must be created + * @param source the source + * @return this */ - ElasticsearchClient client(); + ClientMethods index(String index, String type, String id, boolean create, BytesReference source); /** - * Index document. + * Bulked index request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. * * @param index the index * @param type the type * @param id the id + * @param create true if document must be created * @param source the source * @return this */ - ClientMethods index(String index, String type, String id, String source); + ClientMethods index(String index, String type, String id, boolean create, String source); + + /** + * Bulked index request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * + * @param indexRequest the index request to add + * @return this ingest + */ + ClientMethods indexRequest(IndexRequest indexRequest); /** * Delete document. @@ -68,7 +73,31 @@ public interface ClientMethods extends Parameters { ClientMethods delete(String index, String type, String id); /** - * Update document. Use with precaution! Does not work in all cases. + * Bulked delete request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * + * @param deleteRequest the delete request to add + * @return this ingest + */ + ClientMethods deleteRequest(DeleteRequest deleteRequest); + + /** + * Bulked update request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * Note that updates only work correctly when all operations between nodes are synchronized. + * + * @param index the index + * @param type the type + * @param id the id + * @param source the source + * @return this + */ + ClientMethods update(String index, String type, String id, BytesReference source); + + /** + * Bulked update request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * Note that updates only work correctly when all operations between nodes are synchronized. * * @param index the index * @param type the type @@ -78,6 +107,16 @@ public interface ClientMethods extends Parameters { */ ClientMethods update(String index, String type, String id, String source); + /** + * Bulked update request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * Note that updates only work correctly when all operations between nodes are synchronized. + * + * @param updateRequest the update request to add + * @return this ingest + */ + ClientMethods updateRequest(UpdateRequest updateRequest); + /** * Set the maximum number of actions per request. * @@ -203,34 +242,6 @@ public interface ClientMethods extends Parameters { */ ClientMethods stopBulk(String index) throws IOException; - /** - * Bulked index request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * - * @param indexRequest the index request to add - * @return this ingest - */ - ClientMethods bulkIndex(IndexRequest indexRequest); - - /** - * Bulked delete request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * - * @param deleteRequest the delete request to add - * @return this ingest - */ - ClientMethods bulkDelete(DeleteRequest deleteRequest); - - /** - * Bulked update request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * Note that updates only work correctly when all operations between nodes are synchronized! - * - * @param updateRequest the update request to add - * @return this ingest - */ - ClientMethods bulkUpdate(UpdateRequest updateRequest); - /** * Flush ingest, move all pending documents to the cluster. * @@ -386,5 +397,5 @@ public interface ClientMethods extends Parameters { /** * Shutdown the ingesting. */ - void shutdown(); + void shutdown() throws IOException; } diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/IndexAliasAdder.java b/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java similarity index 84% rename from src/main/java/org/xbib/elasticsearch/extras/client/IndexAliasAdder.java rename to common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java index a659ab4..7c93d22 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/IndexAliasAdder.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/NetworkUtils.java b/common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java similarity index 96% rename from src/main/java/org/xbib/elasticsearch/extras/client/NetworkUtils.java rename to common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java index 9c5ffc2..847e414 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/NetworkUtils.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java @@ -1,7 +1,7 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.io.IOException; import java.net.Inet4Address; @@ -11,6 +11,7 @@ import java.net.NetworkInterface; import java.net.SocketException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.Enumeration; import java.util.List; import java.util.Locale; @@ -20,7 +21,7 @@ import java.util.Locale; */ public class NetworkUtils { - private static final ESLogger logger = ESLoggerFactory.getLogger(NetworkUtils.class.getName()); + private static final Logger logger = LogManager.getLogger(NetworkUtils.class.getName()); private static final String IPV4_SETTING = "java.net.preferIPv4Stack"; @@ -234,7 +235,7 @@ public class NetworkUtils { } private static void sortInterfaces(List interfaces) { - Collections.sort(interfaces, (o1, o2) -> Integer.compare(o1.getIndex(), o2.getIndex())); + Collections.sort(interfaces, Comparator.comparingInt(NetworkInterface::getIndex)); } private static void sortAddresses(List addressList) { diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/Parameters.java b/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java similarity index 91% rename from src/main/java/org/xbib/elasticsearch/extras/client/Parameters.java rename to common/src/main/java/org/xbib/elasticsearch/client/Parameters.java index d77ce24..a9dd7c8 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/Parameters.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; /** * @@ -20,5 +20,4 @@ public interface Parameters { String MAX_VOLUME_PER_REQUEST = "max_volume_per_request"; String FLUSH_INTERVAL = "flush_interval"; - } diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkControl.java b/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java similarity index 96% rename from src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkControl.java rename to common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java index b9a92d6..b8257f7 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkControl.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import java.util.HashMap; import java.util.HashSet; diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkMetric.java b/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java similarity index 53% rename from src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkMetric.java rename to common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java index e836816..9b82444 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/SimpleBulkMetric.java +++ b/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java @@ -1,32 +1,53 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client; import org.xbib.metrics.Count; import org.xbib.metrics.CountMetric; import org.xbib.metrics.Meter; import org.xbib.metrics.Metered; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + /** * */ public class SimpleBulkMetric implements BulkMetric { - private final Meter totalIngest = new Meter(); + private final ScheduledExecutorService executorService; - private final Count totalIngestSizeInBytes = new CountMetric(); + private final Meter totalIngest; - private final Count currentIngest = new CountMetric(); + private final Count totalIngestSizeInBytes; - private final Count currentIngestNumDocs = new CountMetric(); + private final Count currentIngest; - private final Count submitted = new CountMetric(); + private final Count currentIngestNumDocs; - private final Count succeeded = new CountMetric(); + private final Count submitted; - private final Count failed = new CountMetric(); + private final Count succeeded; + + private final Count failed; private Long started; private Long stopped; + public SimpleBulkMetric() { + this(Executors.newSingleThreadScheduledExecutor()); + } + + public SimpleBulkMetric(ScheduledExecutorService executorService) { + this.executorService = executorService; + totalIngest = new Meter(executorService); + totalIngestSizeInBytes = new CountMetric(); + currentIngest = new CountMetric(); + currentIngestNumDocs = new CountMetric(); + submitted = new CountMetric(); + succeeded = new CountMetric(); + failed = new CountMetric(); + } + @Override public Metered getTotalIngest() { return totalIngest; @@ -65,13 +86,14 @@ public class SimpleBulkMetric implements BulkMetric { @Override public void start() { this.started = System.nanoTime(); - totalIngest.spawn(5L); + totalIngest.start(5L); } @Override public void stop() { this.stopped = System.nanoTime(); totalIngest.stop(); + executorService.shutdownNow(); } @Override diff --git a/common/src/main/java/org/xbib/elasticsearch/client/package-info.java b/common/src/main/java/org/xbib/elasticsearch/client/package-info.java new file mode 100644 index 0000000..941a500 --- /dev/null +++ b/common/src/main/java/org/xbib/elasticsearch/client/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for Elasticsearch client. + */ +package org.xbib.elasticsearch.client; diff --git a/src/integration-test/java/org/xbib/elasticsearch/AliasTest.java b/common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java similarity index 53% rename from src/integration-test/java/org/xbib/elasticsearch/AliasTest.java rename to common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java index 970268e..37db0de 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/AliasTest.java +++ b/common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java @@ -1,22 +1,17 @@ -package org.xbib.elasticsearch; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +package org.xbib.elasticsearch.client.common; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.junit.Test; +import org.elasticsearch.test.ESSingleNodeTestCase; -import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.Set; @@ -24,62 +19,52 @@ import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; -/** - * - */ -public class AliasTest extends NodeTestUtils { +public class AliasTests extends ESSingleNodeTestCase { - private static final ESLogger logger = ESLoggerFactory.getLogger(AliasTest.class.getName()); + private static final Logger logger = LogManager.getLogger(AliasTests.class.getName()); - @Test - public void testAlias() throws IOException { + public void testAlias() { CreateIndexRequest indexRequest = new CreateIndexRequest("test"); - client("1").admin().indices().create(indexRequest).actionGet(); + client().admin().indices().create(indexRequest).actionGet(); // put alias IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - String[] indices = new String[]{"test"}; - String[] aliases = new String[]{"test_alias"}; - IndicesAliasesRequest.AliasActions aliasAction = - new IndicesAliasesRequest.AliasActions(AliasAction.Type.ADD, indices, aliases); - indicesAliasesRequest.addAliasAction(aliasAction); - client("1").admin().indices().aliases(indicesAliasesRequest).actionGet(); + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index("test").alias("test_alias") + ); + client().admin().indices().aliases(indicesAliasesRequest).actionGet(); // get alias GetAliasesRequest getAliasesRequest = new GetAliasesRequest(Strings.EMPTY_ARRAY); long t0 = System.nanoTime(); - GetAliasesResponse getAliasesResponse = client("1").admin().indices().getAliases(getAliasesRequest).actionGet(); + GetAliasesResponse getAliasesResponse = client().admin().indices().getAliases(getAliasesRequest).actionGet(); long t1 = (System.nanoTime() - t0) / 1000000; logger.info("{} time(ms) = {}", getAliasesResponse.getAliases(), t1); assertTrue(t1 >= 0); } - @Test - public void testMostRecentIndex() throws IOException { + public void testMostRecentIndex() { String alias = "test"; CreateIndexRequest indexRequest = new CreateIndexRequest("test20160101"); - client("1").admin().indices().create(indexRequest).actionGet(); + client().admin().indices().create(indexRequest).actionGet(); indexRequest = new CreateIndexRequest("test20160102"); - client("1").admin().indices().create(indexRequest).actionGet(); + client().admin().indices().create(indexRequest).actionGet(); indexRequest = new CreateIndexRequest("test20160103"); - client("1").admin().indices().create(indexRequest).actionGet(); + client().admin().indices().create(indexRequest).actionGet(); IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - String[] indices = new String[]{"test20160101", "test20160102", "test20160103"}; - String[] aliases = new String[]{alias}; - IndicesAliasesRequest.AliasActions aliasAction = - new IndicesAliasesRequest.AliasActions(AliasAction.Type.ADD, indices, aliases); - indicesAliasesRequest.addAliasAction(aliasAction); - client("1").admin().indices().aliases(indicesAliasesRequest).actionGet(); + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .indices("test20160101", "test20160102", "test20160103") + .alias(alias) + ); + client().admin().indices().aliases(indicesAliasesRequest).actionGet(); - GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client("1"), + GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet(); Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); Set result = new TreeSet<>(Collections.reverseOrder()); for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) { Matcher m = pattern.matcher(indexName.value); - if (m.matches()) { - if (alias.equals(m.group(1))) { - result.add(indexName.value); - } + if (m.matches() && alias.equals(m.group(1))) { + result.add(indexName.value); } } Iterator it = result.iterator(); @@ -88,5 +73,4 @@ public class AliasTest extends NodeTestUtils { assertEquals("test20160101", it.next()); logger.info("result={}", result); } - } diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/NetworkTest.java b/common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java similarity index 81% rename from src/integration-test/java/org/xbib/elasticsearch/extras/client/NetworkTest.java rename to common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java index b9e7a87..0ed4fc8 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/NetworkTest.java +++ b/common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client.common; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -9,18 +9,21 @@ import java.net.NetworkInterface; import java.util.Collections; import java.util.Enumeration; -/** - * - */ public class NetworkTest { private static final Logger logger = LogManager.getLogger(NetworkTest.class); + /** + * Demonstrates the slowness oj Java network interface lookup on certain environments. + * May be a killer for ES node startup - so avoid automatic traversal of NICs at all costs. + * + * @throws Exception if test fails + */ @Test public void testNetwork() throws Exception { Enumeration nets = NetworkInterface.getNetworkInterfaces(); for (NetworkInterface netint : Collections.list(nets)) { - System.out.println("checking network interface = " + netint.getName()); + logger.info("checking network interface = " + netint.getName()); Enumeration inetAddresses = netint.getInetAddresses(); for (InetAddress addr : Collections.list(inetAddresses)) { logger.info("found address = " + addr.getHostAddress() diff --git a/src/integration-test/java/org/xbib/elasticsearch/SearchTest.java b/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java similarity index 67% rename from src/integration-test/java/org/xbib/elasticsearch/SearchTest.java rename to common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java index 8d1276a..09771c5 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/SearchTest.java +++ b/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java @@ -1,37 +1,29 @@ -package org.xbib.elasticsearch; - -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.client.Requests.refreshRequest; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +package org.xbib.elasticsearch.client.common; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.sort.SortOrder; -import org.junit.Test; +import org.elasticsearch.test.ESSingleNodeTestCase; -/** - * - */ -public class SearchTest extends NodeTestUtils { +public class SearchTests extends ESSingleNodeTestCase { - private static final ESLogger logger = ESLoggerFactory.getLogger("test"); + private static final Logger logger = LogManager.getLogger(SearchTests.class.getName()); - @Test public void testSearch() throws Exception { - Client client = client("1"); long t0 = System.currentTimeMillis(); - BulkRequestBuilder builder = new BulkRequestBuilder(client, BulkAction.INSTANCE); + BulkRequestBuilder builder = new BulkRequestBuilder(client(), BulkAction.INSTANCE); for (int i = 0; i < 1000; i++) { - builder.add(indexRequest() + builder.add(Requests.indexRequest() .index("pages").type("row") - .source(jsonBuilder() + .source(XContentFactory.jsonBuilder() .startObject() .field("user1", "kimchy") .field("user2", "kimchy") @@ -43,20 +35,18 @@ public class SearchTest extends NodeTestUtils { .field("user8", "kimchy") .field("user9", "kimchy") .field("rowcount", i) - .field("rs", 1234))); + .field("rs", 1234) + .endObject())); } - client.bulk(builder.request()).actionGet(); - - client.admin().indices().refresh(refreshRequest()).actionGet(); - + client().bulk(builder.request()).actionGet(); + client().admin().indices().refresh(Requests.refreshRequest()).actionGet(); long t1 = System.currentTimeMillis(); logger.info("t1-t0 = {}", t1 - t0); - for (int i = 0; i < 100; i++) { t1 = System.currentTimeMillis(); QueryBuilder queryStringBuilder = QueryBuilders.queryStringQuery("rs:" + 1234); - SearchRequestBuilder requestBuilder = client.prepareSearch() + SearchRequestBuilder requestBuilder = client().prepareSearch() .setIndices("pages") .setTypes("row") .setQuery(queryStringBuilder) diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java b/common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java new file mode 100644 index 0000000..8b89df6 --- /dev/null +++ b/common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java @@ -0,0 +1,61 @@ +package org.xbib.elasticsearch.client.common; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESSingleNodeTestCase; + +public class SimpleTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(SimpleTests.class.getName()); + + public void test() throws Exception { + try { + DeleteIndexRequestBuilder deleteIndexRequestBuilder = + new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, "test"); + deleteIndexRequestBuilder.execute().actionGet(); + } catch (Exception e) { + logger.warn(e.getMessage(), e); + } + CreateIndexRequestBuilder createIndexRequestBuilder = new CreateIndexRequestBuilder(client(), + CreateIndexAction.INSTANCE) + .setIndex("test") + .setSettings(Settings.builder() + .put("index.analysis.analyzer.default.filter.0", "lowercase") + // where is the trim token filter??? + //.put("index.analysis.analyzer.default.filter.1", "trim") + .put("index.analysis.analyzer.default.tokenizer", "keyword") + .build()); + createIndexRequestBuilder.execute().actionGet(); + + IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client(), IndexAction.INSTANCE); + indexRequestBuilder + .setIndex("test") + .setType("test") + .setId("1") + .setSource(XContentFactory.jsonBuilder().startObject().field("field", + "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8").endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .execute() + .actionGet(); + String doc = client().prepareSearch("test") + .setTypes("test") + .setQuery(QueryBuilders.matchQuery("field", + "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8")) + .execute() + .actionGet() + .getHits().getAt(0).getSourceAsString(); + + assertEquals(doc, + "{\"field\":\"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8\"}"); + } +} diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java b/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java new file mode 100644 index 0000000..bb84bb9 --- /dev/null +++ b/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java @@ -0,0 +1,51 @@ +package org.xbib.elasticsearch.client.common; + +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.io.IOException; + +public class WildcardTests extends ESSingleNodeTestCase { + + public void testWildcard() throws Exception { + index("1", "010"); + index("2", "0*0"); + // exact + validateCount(QueryBuilders.queryStringQuery("010").defaultField("field"), 1); + validateCount(QueryBuilders.queryStringQuery("0\\*0").defaultField("field"), 1); + // pattern + validateCount(QueryBuilders.queryStringQuery("0*0").defaultField("field"), 1); // 2? + validateCount(QueryBuilders.queryStringQuery("0?0").defaultField("field"), 1); // 2? + validateCount(QueryBuilders.queryStringQuery("0**0").defaultField("field"), 1); // 2? + validateCount(QueryBuilders.queryStringQuery("0??0").defaultField("field"), 0); + validateCount(QueryBuilders.queryStringQuery("*10").defaultField("field"), 1); + validateCount(QueryBuilders.queryStringQuery("*1*").defaultField("field"), 1); + validateCount(QueryBuilders.queryStringQuery("*\\*0").defaultField("field"), 0); // 1? + validateCount(QueryBuilders.queryStringQuery("*\\**").defaultField("field"), 0); // 1? + } + + private void index(String id, String fieldValue) throws IOException { + client().index(Requests.indexRequest() + .index("index").type("type").id(id) + .source(XContentFactory.jsonBuilder().startObject().field("field", fieldValue).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)) + .actionGet(); + } + + private void validateCount(QueryBuilder queryBuilder, long expectedHits) { + final long actualHits = count(queryBuilder); + if (actualHits != expectedHits) { + throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits); + } + } + + private long count(QueryBuilder queryBuilder) { + return client().prepareSearch("index").setTypes("type") + .setQuery(queryBuilder) + .execute().actionGet().getHits().getTotalHits(); + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/package-info.java b/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java similarity index 52% rename from src/integration-test/java/org/xbib/elasticsearch/extras/client/package-info.java rename to common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java index 2bfc45c..af3209f 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/package-info.java +++ b/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java @@ -1,4 +1,4 @@ /** * Classes to test Elasticsearch clients. */ -package org.xbib.elasticsearch.extras.client; +package org.xbib.elasticsearch.client.common; diff --git a/src/integration-test/resources/log4j2.xml b/common/src/test/resources/log4j2.xml similarity index 100% rename from src/integration-test/resources/log4j2.xml rename to common/src/test/resources/log4j2.xml diff --git a/gradle.properties b/gradle.properties index 3f32f32..24bd424 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,3 +1,30 @@ -group = org.xbib -name = elasticsearch-extras-client -version = 2.2.1.2 +group = org.xbib.elasticsearch +name = elasticsearch-client +version = 6.3.2.0 +profile = default +release = 0 + +elasticsearch.version = 6.3.2 +lucene.version = 7.3.1 + +netty.version = 4.1.29.Final +tcnative.version = 2.0.15.Final +alpnagent.version = 2.0.7 +xbib-netty-http-client.version = 4.1.29.0 +xbib-metrics.version = 1.1.0 + +# elasticsearch build plugin +elasticsearch-libs.version = 6.3.2.1 +elasticsearch-devkit.version = 6.3.2.4 +spatial4j.version = 0.7 +jts.version = 1.15.1 +jna.version = 4.5.2 +log4j.version = 2.11.1 +checkstyle.version = 8.13 + +# test +junit.version = 4.12 +wagon.version = 3.0.0 +asciidoclet.version = 1.6.0.0 + +org.gradle.warning.mode=all diff --git a/gradle/ext.gradle b/gradle/ext.gradle index 7bb7c73..e69de29 100644 --- a/gradle/ext.gradle +++ b/gradle/ext.gradle @@ -1,8 +0,0 @@ -ext { - user = 'xbib' - name = 'elasticsearch-extras-client' - description = 'Some extras implemented for using Elasticsearch clients (node and transport)' - scmUrl = 'https://github.com/' + user + '/' + name - scmConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' - scmDeveloperConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' -} diff --git a/gradle/publish.gradle b/gradle/publish.gradle index 0337849..c05a223 100644 --- a/gradle/publish.gradle +++ b/gradle/publish.gradle @@ -6,7 +6,7 @@ task xbibUpload(type: Upload) { if (project.hasProperty("xbibUsername")) { mavenDeployer { configuration = configurations.wagon - repository(url: 'scpexe://xbib.org/repository') { + repository(url: 'sftp://xbib.org/repository') { authentication(userName: xbibUsername, privateKey: xbibPrivateKey) } } diff --git a/gradle/sonarqube.gradle b/gradle/sonarqube.gradle index 5de408d..d759e4c 100644 --- a/gradle/sonarqube.gradle +++ b/gradle/sonarqube.gradle @@ -1,8 +1,8 @@ tasks.withType(FindBugs) { ignoreFailures = true reports { - xml.enabled = true - html.enabled = false + xml.enabled = false + html.enabled = true } } tasks.withType(Pmd) { @@ -20,22 +20,11 @@ tasks.withType(Checkstyle) { } } -jacocoTestReport { - reports { - xml.enabled true - csv.enabled false - xml.destination "${buildDir}/reports/jacoco-xml" - html.destination "${buildDir}/reports/jacoco-html" - } -} - sonarqube { properties { property "sonar.projectName", "${project.group} ${project.name}" property "sonar.sourceEncoding", "UTF-8" - property "sonar.tests", "src/integration-test/java" property "sonar.scm.provider", "git" - property "sonar.java.coveragePlugin", "jacoco" property "sonar.junit.reportsPath", "build/test-results/test/" } } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 27b5466..6b3851a 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,5 @@ -#Tue Jan 03 14:13:22 CET 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-5.1-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-3.2.1-all.zip diff --git a/gradlew b/gradlew index 4453cce..2c74879 100755 --- a/gradlew +++ b/gradlew @@ -28,16 +28,16 @@ APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" +DEFAULT_JVM_OPTS='"-Xmx64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" -warn ( ) { +warn () { echo "$*" } -die ( ) { +die () { echo echo "$*" echo @@ -155,7 +155,11 @@ if $cygwin ; then fi # Escape application args +<<<<<<< HEAD save ( ) { +======= +save () { +>>>>>>> 6.3 for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done echo " " } diff --git a/gradlew.bat b/gradlew.bat index e95643d..0f8d593 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -14,7 +14,7 @@ set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= +set DEFAULT_JVM_OPTS="-Xmx64m" @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome diff --git a/http/build.gradle b/http/build.gradle new file mode 100644 index 0000000..f5d73a1 --- /dev/null +++ b/http/build.gradle @@ -0,0 +1,65 @@ +buildscript { + repositories { + jcenter() + maven { + url 'http://xbib.org/repository' + } + } + dependencies { + classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" + } +} + +apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' + +configurations { + main + tests +} + +dependencies { + compile project(':common') + compile "org.xbib:netty-http-client:${project.property('xbib-netty-http-client.version')}" + testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" + testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" +} + +jar { + baseName "${rootProject.name}-common" +} + +/* +task testJar(type: Jar, dependsOn: testClasses) { + baseName = "${project.archivesBaseName}-tests" + from sourceSets.test.output +} +*/ + +artifacts { + main jar + tests testJar + archives sourcesJar, javadocJar +} + +test { + enabled = true + include '**/SimpleTest.*' + testLogging { + showStandardStreams = true + exceptionFormat = 'full' + } +} + +randomizedTest { + enabled = false +} + +esTest { + enabled = true + // test with the jars, not the classes, for security manager + // classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files + systemProperty 'tests.security.manager', 'true' + // maybe we like some extra security policy for our code + systemProperty 'tests.security.policy', '/extra-security.policy' +} +esTest.dependsOn jar, testJar diff --git a/http/config/checkstyle/checkstyle.xml b/http/config/checkstyle/checkstyle.xml new file mode 100644 index 0000000..7af8d6d --- /dev/null +++ b/http/config/checkstyle/checkstyle.xml @@ -0,0 +1,323 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/http/src/docs/asciidoc/css/foundation.css b/http/src/docs/asciidoc/css/foundation.css new file mode 100644 index 0000000..27be611 --- /dev/null +++ b/http/src/docs/asciidoc/css/foundation.css @@ -0,0 +1,684 @@ +/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ +/* ========================================================================== HTML5 display definitions ========================================================================== */ +/** Correct `block` display not defined in IE 8/9. */ +article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } + +/** Correct `inline-block` display not defined in IE 8/9. */ +audio, canvas, video { display: inline-block; } + +/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ +audio:not([controls]) { display: none; height: 0; } + +/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ +[hidden], template { display: none; } + +script { display: none !important; } + +/* ========================================================================== Base ========================================================================== */ +/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ +html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } + +/** Remove default margin. */ +body { margin: 0; } + +/* ========================================================================== Links ========================================================================== */ +/** Remove the gray background color from active links in IE 10. */ +a { background: transparent; } + +/** Address `outline` inconsistency between Chrome and other browsers. */ +a:focus { outline: thin dotted; } + +/** Improve readability when focused and also mouse hovered in all browsers. */ +a:active, a:hover { outline: 0; } + +/* ========================================================================== Typography ========================================================================== */ +/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ +h1 { font-size: 2em; margin: 0.67em 0; } + +/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ +abbr[title] { border-bottom: 1px dotted; } + +/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ +b, strong { font-weight: bold; } + +/** Address styling not present in Safari 5 and Chrome. */ +dfn { font-style: italic; } + +/** Address differences between Firefox and other browsers. */ +hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } + +/** Address styling not present in IE 8/9. */ +mark { background: #ff0; color: #000; } + +/** Correct font family set oddly in Safari 5 and Chrome. */ +code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } + +/** Improve readability of pre-formatted text in all browsers. */ +pre { white-space: pre-wrap; } + +/** Set consistent quote types. */ +q { quotes: "\201C" "\201D" "\2018" "\2019"; } + +/** Address inconsistent and variable font size in all browsers. */ +small { font-size: 80%; } + +/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ +sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } + +sup { top: -0.5em; } + +sub { bottom: -0.25em; } + +/* ========================================================================== Embedded content ========================================================================== */ +/** Remove border when inside `a` element in IE 8/9. */ +img { border: 0; } + +/** Correct overflow displayed oddly in IE 9. */ +svg:not(:root) { overflow: hidden; } + +/* ========================================================================== Figures ========================================================================== */ +/** Address margin not present in IE 8/9 and Safari 5. */ +figure { margin: 0; } + +/* ========================================================================== Forms ========================================================================== */ +/** Define consistent border, margin, and padding. */ +fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } + +/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ +legend { border: 0; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ +button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } + +/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ +button, input { line-height: normal; } + +/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ +button, select { text-transform: none; } + +/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ +button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } + +/** Re-set default cursor for disabled elements. */ +button[disabled], html input[disabled] { cursor: default; } + +/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ +input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ +input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } + +/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ +input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } + +/** Remove inner padding and border in Firefox 4+. */ +button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } + +/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ +textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } + +/* ========================================================================== Tables ========================================================================== */ +/** Remove most spacing between table cells. */ +table { border-collapse: collapse; border-spacing: 0; } + +meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } + +meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } + +meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } + +*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } + +html, body { font-size: 100%; } + +body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } + +a:hover { cursor: pointer; } + +img, object, embed { max-width: 100%; height: auto; } + +object, embed { height: 100%; } + +img { -ms-interpolation-mode: bicubic; } + +#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } + +.left { float: left !important; } + +.right { float: right !important; } + +.text-left { text-align: left !important; } + +.text-right { text-align: right !important; } + +.text-center { text-align: center !important; } + +.text-justify { text-align: justify !important; } + +.hide { display: none; } + +.antialiased { -webkit-font-smoothing: antialiased; } + +img { display: inline-block; vertical-align: middle; } + +textarea { height: auto; min-height: 50px; } + +select { width: 100%; } + +object, svg { display: inline-block; vertical-align: middle; } + +.center { margin-left: auto; margin-right: auto; } + +.spread { width: 100%; } + +p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } + +.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } + +/* Typography resets */ +div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } + +/* Default Link Styles */ +a { color: #2ba6cb; text-decoration: none; line-height: inherit; } +a:hover, a:focus { color: #2795b6; } +a img { border: none; } + +/* Default paragraph styles */ +p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } +p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } + +/* Default header styles */ +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } +h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } + +h1 { font-size: 2.125em; } + +h2 { font-size: 1.6875em; } + +h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } + +h4 { font-size: 1.125em; } + +h5 { font-size: 1.125em; } + +h6 { font-size: 1em; } + +hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } + +/* Helpful Typography Defaults */ +em, i { font-style: italic; line-height: inherit; } + +strong, b { font-weight: bold; line-height: inherit; } + +small { font-size: 60%; line-height: inherit; } + +code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } + +/* Lists */ +ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } + +ul, ol { margin-left: 1.5em; } +ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } + +/* Unordered Lists */ +ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } +ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } +ul.square { list-style-type: square; } +ul.circle { list-style-type: circle; } +ul.disc { list-style-type: disc; } +ul.no-bullet { list-style: none; } + +/* Ordered Lists */ +ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } + +/* Definition Lists */ +dl dt { margin-bottom: 0.3125em; font-weight: bold; } +dl dd { margin-bottom: 1.25em; } + +/* Abbreviations */ +abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } + +abbr { text-transform: none; } + +/* Blockquotes */ +blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } +blockquote cite { display: block; font-size: 0.8125em; color: #555555; } +blockquote cite:before { content: "\2014 \0020"; } +blockquote cite a, blockquote cite a:visited { color: #555555; } + +blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } + +/* Microformats */ +.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } +.vcard li { margin: 0; display: block; } +.vcard .fn { font-weight: bold; font-size: 0.9375em; } + +.vevent .summary { font-weight: bold; } +.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } + +@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + h1 { font-size: 2.75em; } + h2 { font-size: 2.3125em; } + h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } + h4 { font-size: 1.4375em; } } +/* Tables */ +table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } +table thead, table tfoot { background: whitesmoke; font-weight: bold; } +table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } +table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } +table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } +table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } + +body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } + +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + +.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } +.clearfix:after, .float-group:after { clear: both; } + +*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } +*:not(pre) > code.nobreak { word-wrap: normal; } +*:not(pre) > code.nowrap { white-space: nowrap; } + +pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } + +em em { font-style: normal; } + +strong strong { font-weight: normal; } + +.keyseq { color: #555555; } + +kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } + +.keyseq kbd:first-child { margin-left: 0; } + +.keyseq kbd:last-child { margin-right: 0; } + +.menuseq, .menu { color: #090909; } + +b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } + +b.button:before { content: "["; padding: 0 3px 0 2px; } + +b.button:after { content: "]"; padding: 0 2px 0 3px; } + +#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } +#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } +#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } + +#content { margin-top: 1.25em; } + +#content:before { content: none; } + +#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } +#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } +#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } +#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } +#header .details span:first-child { margin-left: -0.125em; } +#header .details span.email a { color: #6f6f6f; } +#header .details br { display: none; } +#header .details br + span:before { content: "\00a0\2013\00a0"; } +#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } +#header .details br + span#revremark:before { content: "\00a0|\00a0"; } +#header #revnumber { text-transform: capitalize; } +#header #revnumber:after { content: "\00a0"; } + +#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } + +#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } +#toc > ul { margin-left: 0.125em; } +#toc ul.sectlevel0 > li > a { font-style: italic; } +#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } +#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } +#toc li { line-height: 1.3334; margin-top: 0.3334em; } +#toc a { text-decoration: none; } +#toc a:active { text-decoration: underline; } + +#toctitle { color: #6f6f6f; font-size: 1.2em; } + +@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } + body.toc2 { padding-left: 15em; padding-right: 0; } + #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } + #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } + #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } + #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } + #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } + body.toc2.toc-right { padding-left: 0; padding-right: 15em; } + body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } +@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } + #toc.toc2 { width: 20em; } + #toc.toc2 #toctitle { font-size: 1.375em; } + #toc.toc2 > ul { font-size: 0.95em; } + #toc.toc2 ul ul { padding-left: 1.25em; } + body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } +#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +#content #toc > :first-child { margin-top: 0; } +#content #toc > :last-child { margin-bottom: 0; } + +#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } + +#footer-text { color: #dddddd; line-height: 1.44; } + +.sect1 { padding-bottom: 0.625em; } + +@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } +.sect1 + .sect1 { border-top: 1px solid #dddddd; } + +#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } +#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } +#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } +#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } +#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } + +.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } + +.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } + +table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } + +.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } + +table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } + +.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } +.admonitionblock > table td.icon { text-align: center; width: 80px; } +.admonitionblock > table td.icon img { max-width: initial; } +.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } +.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } +.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } + +.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } +.exampleblock > .content > :first-child { margin-top: 0; } +.exampleblock > .content > :last-child { margin-bottom: 0; } + +.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +.sidebarblock > :first-child { margin-top: 0; } +.sidebarblock > :last-child { margin-bottom: 0; } +.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } + +.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } + +.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } +.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } + +.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } +.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } +@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } +@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } + +.literalblock.output pre { color: #eeeeee; background-color: black; } + +.listingblock pre.highlightjs { padding: 0; } +.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } + +.listingblock > .content { position: relative; } + +.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } + +.listingblock:hover code[data-lang]:before { display: block; } + +.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } + +.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } + +table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } + +table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } + +table.pyhltable td.code { padding-left: .75em; padding-right: 0; } + +pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } + +pre.pygments .lineno { display: inline-block; margin-right: .25em; } + +table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } + +.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } +.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } +.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } +.quoteblock blockquote { margin: 0; padding: 0; border: 0; } +.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } +.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } +.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } +.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } +.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } +.quoteblock .quoteblock blockquote:before { display: none; } + +.verseblock { margin: 0 1em 1.25em 1em; } +.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } +.verseblock pre strong { font-weight: 400; } +.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } + +.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } +.quoteblock .attribution br, .verseblock .attribution br { display: none; } +.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } + +.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } +.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } +.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } + +table.tableblock { max-width: 100%; border-collapse: separate; } +table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } + +table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } + +table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } + +table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } + +table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } + +table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } + +table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } + +table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } + +table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } + +table.frame-all { border-width: 1px; } + +table.frame-sides { border-width: 0 1px; } + +table.frame-topbot { border-width: 1px 0; } + +th.halign-left, td.halign-left { text-align: left; } + +th.halign-right, td.halign-right { text-align: right; } + +th.halign-center, td.halign-center { text-align: center; } + +th.valign-top, td.valign-top { vertical-align: top; } + +th.valign-bottom, td.valign-bottom { vertical-align: bottom; } + +th.valign-middle, td.valign-middle { vertical-align: middle; } + +table thead th, table tfoot th { font-weight: bold; } + +tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } + +tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } + +p.tableblock > code:only-child { background: none; padding: 0; } + +p.tableblock { font-size: 1em; } + +td > div.verse { white-space: pre; } + +ol { margin-left: 1.75em; } + +ul li ol { margin-left: 1.5em; } + +dl dd { margin-left: 1.125em; } + +dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } + +ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } + +ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } + +ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } + +ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } + +ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } + +ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } +ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } +ul.inline > li > * { display: block; } + +.unstyled dl dt { font-weight: normal; font-style: normal; } + +ol.arabic { list-style-type: decimal; } + +ol.decimal { list-style-type: decimal-leading-zero; } + +ol.loweralpha { list-style-type: lower-alpha; } + +ol.upperalpha { list-style-type: upper-alpha; } + +ol.lowerroman { list-style-type: lower-roman; } + +ol.upperroman { list-style-type: upper-roman; } + +ol.lowergreek { list-style-type: lower-greek; } + +.hdlist > table, .colist > table { border: 0; background: none; } +.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } + +td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } + +td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } + +.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } + +.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } +.colist > table tr > td:first-of-type img { max-width: initial; } +.colist > table tr > td:last-of-type { padding: 0.25em 0; } + +.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } + +.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } +.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } +.imageblock > .title { margin-bottom: 0; } +.imageblock.thumb, .imageblock.th { border-width: 6px; } +.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } + +.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } +.image.left { margin-right: 0.625em; } +.image.right { margin-left: 0.625em; } + +a.image { text-decoration: none; display: inline-block; } +a.image object { pointer-events: none; } + +sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } +sup.footnote a, sup.footnoteref a { text-decoration: none; } +sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } + +#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } +#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } +#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } +#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } +#footnotes .footnote:last-of-type { margin-bottom: 0; } +#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } + +.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } +.gist .file-data > table td.line-data { width: 99%; } + +div.unbreakable { page-break-inside: avoid; } + +.big { font-size: larger; } + +.small { font-size: smaller; } + +.underline { text-decoration: underline; } + +.overline { text-decoration: overline; } + +.line-through { text-decoration: line-through; } + +.aqua { color: #00bfbf; } + +.aqua-background { background-color: #00fafa; } + +.black { color: black; } + +.black-background { background-color: black; } + +.blue { color: #0000bf; } + +.blue-background { background-color: #0000fa; } + +.fuchsia { color: #bf00bf; } + +.fuchsia-background { background-color: #fa00fa; } + +.gray { color: #606060; } + +.gray-background { background-color: #7d7d7d; } + +.green { color: #006000; } + +.green-background { background-color: #007d00; } + +.lime { color: #00bf00; } + +.lime-background { background-color: #00fa00; } + +.maroon { color: #600000; } + +.maroon-background { background-color: #7d0000; } + +.navy { color: #000060; } + +.navy-background { background-color: #00007d; } + +.olive { color: #606000; } + +.olive-background { background-color: #7d7d00; } + +.purple { color: #600060; } + +.purple-background { background-color: #7d007d; } + +.red { color: #bf0000; } + +.red-background { background-color: #fa0000; } + +.silver { color: #909090; } + +.silver-background { background-color: #bcbcbc; } + +.teal { color: #006060; } + +.teal-background { background-color: #007d7d; } + +.white { color: #bfbfbf; } + +.white-background { background-color: #fafafa; } + +.yellow { color: #bfbf00; } + +.yellow-background { background-color: #fafa00; } + +span.icon > .fa { cursor: default; } + +.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } +.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } +.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } +.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } +.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } +.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } + +.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } +.conum[data-value] * { color: #fff !important; } +.conum[data-value] + b { display: none; } +.conum[data-value]:after { content: attr(data-value); } +pre .conum[data-value] { position: relative; top: -0.125em; } + +b.conum * { color: inherit !important; } + +.conum:not([data-value]):empty { display: none; } + +.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/http/src/docs/asciidoclet/overview.adoc b/http/src/docs/asciidoclet/overview.adoc new file mode 100644 index 0000000..7947331 --- /dev/null +++ b/http/src/docs/asciidoclet/overview.adoc @@ -0,0 +1,4 @@ += Elasticsearch Java client +Jörg Prante +Version 5.4.0.0 + diff --git a/http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java b/http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java new file mode 100644 index 0000000..e50358b --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java @@ -0,0 +1,167 @@ +package org.elasticsearch.action.admin.cluster.node.info; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.ingest.IngestInfo; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.monitor.os.OsInfo; +import org.elasticsearch.monitor.process.ProcessInfo; +import org.elasticsearch.threadpool.ThreadPoolInfo; +import org.elasticsearch.transport.TransportInfo; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elasticsearch.client.http.HttpActionContext; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * + */ +public class HttpNodesInfoAction extends HttpAction { + + @Override + public NodesInfoAction getActionInstance() { + return NodesInfoAction.INSTANCE; + } + + /** + * Endpoint "/_nodes/{nodeId}/{metrics}" + * + * @param url url + * @param request request + * @return HTTP request + */ + @Override + protected RequestBuilder createHttpRequest(String url, NodesInfoRequest request) { + StringBuilder path = new StringBuilder("/_nodes"); + if (request.nodesIds() != null) { + String nodeIds = String.join(",", request.nodesIds()); + if (nodeIds.length() > 0) { + path.append("/").append(nodeIds); + } + } else { + path.append("/_all"); + } + List metrics = new LinkedList<>(); + if (request.http()) { + metrics.add("http"); + } + if (request.jvm()) { + metrics.add("jvm"); + } + if (request.os()) { + metrics.add("os"); + } + if (request.plugins()) { + metrics.add("plugins"); + } + if (request.process()) { + metrics.add("process"); + } + if (request.settings()) { + metrics.add("settings"); + } + if (request.threadPool()) { + metrics.add("thread_pool"); + } + if (request.transport()) { + metrics.add("transport"); + } + if (!metrics.isEmpty()) { + path.append("/").append(String.join(",", metrics)); + } + return newGetRequest(url, path.toString()); + } + + @Override + protected CheckedFunction entityParser() { + throw new UnsupportedOperationException(); + } + + @SuppressWarnings("unchecked") + protected NodesInfoResponse createResponse(HttpActionContext httpContext) { + Map map = null; + + String string = (String)map.get("cluster_name"); + ClusterName clusterName = new ClusterName(string); + List nodeInfoList = new LinkedList<>(); + map = (Map)map.get("nodes"); + for (Map.Entry entry : map.entrySet()) { + String nodeId = entry.getKey(); + String ephemeralId = null; + Map map2 = (Map) entry.getValue(); + String nodeName = (String)map2.get("name"); + String hostName = (String)map2.get("host"); + String hostAddress = (String)map2.get("ip"); + // [/][:] + String transportAddressString = (String)map2.get("transport_address"); + int pos = transportAddressString.indexOf(':'); + String host = pos > 0 ? transportAddressString.substring(0, pos) : transportAddressString; + int port = Integer.parseInt(pos > 0 ? transportAddressString.substring(pos + 1) : "0"); + pos = host.indexOf('/'); + host = pos > 0 ? host.substring(0, pos) : host; + try { + InetAddress[] inetAddresses = InetAddress.getAllByName(host); + TransportAddress transportAddress = new TransportAddress(inetAddresses[0], port); + Build build = new Build(Build.Flavor.OSS, Build.Type.TAR, + (String) map2.get("build"), + (String)map2.get("date"), + (Boolean)map2.get("snapshot")); + Map attributes = Collections.emptyMap(); + Set roles = new HashSet<>(); + Version version = Version.fromString((String) map2.get("version")); + DiscoveryNode discoveryNode = new DiscoveryNode(nodeName, nodeId, ephemeralId, hostName, hostAddress, + transportAddress, + attributes, roles, version); + /*Map settingsMap = map2.containsKey("settings") ? + XContentHelper. + SettingsLoader.Helper.loadNestedFromMap((Map) map2.get("settings")) : + Collections.emptyMap(); + + Settings settings = Settings.builder() + + .put(settingsMap) + .build();*/ + OsInfo os = null; + ProcessInfo processInfo = null; + JvmInfo jvmInfo = null; + ThreadPoolInfo threadPoolInfo = null; + TransportInfo transportInfo = null; + HttpInfo httpInfo = null; + PluginsAndModules pluginsAndModules = null; + IngestInfo ingestInfo = null; + ByteSizeValue totalIndexingBuffer = null; + NodeInfo nodeInfo = new NodeInfo(version, + build, + discoveryNode, + //serviceAttributes, + //settings, + null, + os, processInfo, jvmInfo, threadPoolInfo, transportInfo, httpInfo, pluginsAndModules, + ingestInfo, + totalIndexingBuffer); + nodeInfoList.add(nodeInfo); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + List failures = null; + return new NodesInfoResponse(clusterName, nodeInfoList, failures); + } +} diff --git a/http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java b/http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java new file mode 100644 index 0000000..b66675c --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java @@ -0,0 +1,49 @@ +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; +import java.io.UncheckedIOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * + */ +public class HttpClusterUpdateSettingsAction extends HttpAction { + + @Override + public ClusterUpdateSettingsAction getActionInstance() { + return ClusterUpdateSettingsAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, ClusterUpdateSettingsRequest request) { + try { + XContentBuilder builder = jsonBuilder(); + builder.startObject().startObject("persistent"); + request.persistentSettings().toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.startObject("transient"); + request.transientSettings().toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject().endObject(); + return newPutRequest(url, "/_cluster/settings", BytesReference.bytes(builder)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + protected CheckedFunction entityParser() { + return parser -> { + // TODO(jprante) + return new ClusterUpdateSettingsResponse(); + }; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java b/http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java new file mode 100644 index 0000000..da64f8b --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java @@ -0,0 +1,35 @@ +package org.elasticsearch.action.admin.indices.create; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +public class HttpCreateIndexAction extends HttpAction { + + @Override + public CreateIndexAction getActionInstance() { + return CreateIndexAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, CreateIndexRequest createIndexRequest) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder = createIndexRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); + return newPutRequest(url, "/" + createIndexRequest.index(), BytesReference.bytes(builder)); + } + + @Override + protected CheckedFunction entityParser() { + return parser -> { + // TODO(jprante) build real create index response + return new CreateIndexResponse(); + }; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java b/http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java new file mode 100644 index 0000000..88f76ea --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java @@ -0,0 +1,30 @@ +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + * + */ +public class HttpRefreshIndexAction extends HttpAction { + + @Override + public RefreshAction getActionInstance() { + return RefreshAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, RefreshRequest request) { + String index = request.indices() != null ? "/" + String.join(",", request.indices()) : ""; + return newPostRequest(url, index + "/_refresh"); + } + + @Override + protected CheckedFunction entityParser() { + return parser -> new RefreshResponse(); + } +} diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java b/http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java new file mode 100644 index 0000000..b8facce --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java @@ -0,0 +1,44 @@ +package org.elasticsearch.action.admin.indices.settings.put; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; +import java.io.UncheckedIOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * + */ +public class HttpUpdateSettingsAction extends HttpAction { + + @Override + public UpdateSettingsAction getActionInstance() { + return UpdateSettingsAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, UpdateSettingsRequest request) { + try { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + request.settings().toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + String index = request.indices() != null ? "/" + String.join(",", request.indices()) : ""; + return newPutRequest(url, index + "/_settings", BytesReference.bytes(builder)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + protected CheckedFunction entityParser() { + return parser -> new UpdateSettingsResponse(); + } +} diff --git a/http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java b/http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java new file mode 100644 index 0000000..050d608 --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java @@ -0,0 +1,69 @@ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + * + */ +public class HttpBulkAction extends HttpAction { + + @Override + public BulkAction getActionInstance() { + return BulkAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, BulkRequest request) { + StringBuilder bulkContent = new StringBuilder(); + for (DocWriteRequest actionRequest : request.requests()) { + if (actionRequest instanceof IndexRequest) { + IndexRequest indexRequest = (IndexRequest) actionRequest; + bulkContent.append("{\"").append(indexRequest.opType()).append("\":{"); + bulkContent.append("\"_index\":\"").append(indexRequest.index()).append("\""); + bulkContent.append(",\"_type\":\"").append(indexRequest.type()).append("\""); + if (indexRequest.id() != null) { + bulkContent.append(",\"_id\":\"").append(indexRequest.id()).append("\""); + } + if (indexRequest.routing() != null) { + bulkContent.append(",\"_routing\":\"").append(indexRequest.routing()).append("\""); + } + if (indexRequest.parent() != null) { + bulkContent.append(",\"_parent\":\"").append(indexRequest.parent()).append("\""); + } + if (indexRequest.version() > 0) { + bulkContent.append(",\"_version\":\"").append(indexRequest.version()).append("\""); + if (indexRequest.versionType() != null) { + bulkContent.append(",\"_version_type\":\"").append(indexRequest.versionType().name()).append("\""); + } + } + bulkContent.append("}}\n"); + bulkContent.append(indexRequest.source().utf8ToString()); + bulkContent.append("\n"); + } else if (actionRequest instanceof DeleteRequest) { + DeleteRequest deleteRequest = (DeleteRequest) actionRequest; + bulkContent.append("{\"delete\":{"); + bulkContent.append("\"_index\":\"").append(deleteRequest.index()).append("\""); + bulkContent.append(",\"_type\":\"").append(deleteRequest.type()).append("\""); + bulkContent.append(",\"_id\":\"").append(deleteRequest.id()).append("\""); + if (deleteRequest.routing() != null) { + bulkContent.append(",\"_routing\":\"").append(deleteRequest.routing()).append("\""); // _routing + } + bulkContent.append("}}\n"); + } + } + return newPostRequest(url, "/_bulk", bulkContent.toString()); + } + + @Override + protected CheckedFunction entityParser() { + return BulkResponse::fromXContent; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java b/http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java new file mode 100644 index 0000000..fd2443e --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java @@ -0,0 +1,29 @@ +package org.elasticsearch.action.get; + +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + */ +public class HttpExistsAction extends HttpAction { + + @Override + public GenericAction getActionInstance() { + return GetAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, GetRequest request) { + return newHeadRequest(url, request.index() + "/" + request.type() + "/" + request.id()); + } + + @Override + protected CheckedFunction entityParser() { + return GetResponse::fromXContent; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java b/http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java new file mode 100644 index 0000000..3a72116 --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java @@ -0,0 +1,29 @@ +package org.elasticsearch.action.get; + +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + */ +public class HttpGetAction extends HttpAction { + + @Override + public GenericAction getActionInstance() { + return GetAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, GetRequest request) { + return newGetRequest(url, request.index() + "/" + request.type() + "/" + request.id()); + } + + @Override + protected CheckedFunction entityParser() { + return GetResponse::fromXContent; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java b/http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java new file mode 100644 index 0000000..5352682 --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java @@ -0,0 +1,30 @@ +package org.elasticsearch.action.index; + +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + */ +public class HttpIndexAction extends HttpAction { + + @Override + public GenericAction getActionInstance() { + return IndexAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, IndexRequest request) { + return newPutRequest(url, request.index() + "/" + request.type() + "/" + request.id(), + request.source()); + } + + @Override + protected CheckedFunction entityParser() { + return IndexResponse::fromXContent; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java b/http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java new file mode 100644 index 0000000..ee5dc8c --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java @@ -0,0 +1,29 @@ +package org.elasticsearch.action.main; + +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + */ +public class HttpMainAction extends HttpAction { + + @Override + public GenericAction getActionInstance() { + return MainAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, MainRequest request) { + return newGetRequest(url, "/"); + } + + @Override + protected CheckedFunction entityParser() { + return MainResponse::fromXContent; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java b/http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java new file mode 100644 index 0000000..4c637b7 --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java @@ -0,0 +1,33 @@ +package org.elasticsearch.action.search; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + * + */ +public class HttpSearchAction extends HttpAction { + + @Override + public SearchAction getActionInstance() { + return SearchAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, SearchRequest request) { + String index = request.indices() != null ? "/" + String.join(",", request.indices()) : ""; + return newPostRequest(url, index + "/_search", request.source().toString() ); + } + + @Override + protected CheckedFunction entityParser() { + return parser -> { + // TODO(jprante) build search response + return new SearchResponse(); + }; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java b/http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java new file mode 100644 index 0000000..c703075 --- /dev/null +++ b/http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java @@ -0,0 +1,61 @@ +package org.elasticsearch.action.update; + +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +/** + */ +public class HttpUpdateAction extends HttpAction { + + @Override + public GenericAction getActionInstance() { + return UpdateAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, UpdateRequest updateRequest) { + try { + // The Java API allows update requests with different content types + // set for the partial document and the upsert document. This client + // only accepts update requests that have the same content types set + // for both doc and upsert. + XContentType xContentType = null; + if (updateRequest.doc() != null) { + xContentType = updateRequest.doc().getContentType(); + } + if (updateRequest.upsertRequest() != null) { + XContentType upsertContentType = updateRequest.upsertRequest().getContentType(); + if ((xContentType != null) && (xContentType != upsertContentType)) { + throw new IllegalStateException("update request cannot have different content types for doc [" + xContentType + "]" + + " and upsert [" + upsertContentType + "] documents"); + } else { + xContentType = upsertContentType; + } + } + if (xContentType == null) { + xContentType = Requests.INDEX_CONTENT_TYPE; + } + BytesReference source = XContentHelper.toXContent(updateRequest, xContentType, false); + return newPostRequest(url, + updateRequest.index() + "/" + updateRequest.type() + "/" + updateRequest.id() + "/_update", + source); + } catch (IOException e) { + logger.error(e.getMessage(), e); + return null; + } + } + + @Override + protected CheckedFunction entityParser() { + return UpdateResponse::fromXContent; + } +} diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java new file mode 100644 index 0000000..674ee6d --- /dev/null +++ b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java @@ -0,0 +1,160 @@ +package org.xbib.elasticsearch.client.http; + +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.xbib.netty.http.client.Request; +import org.xbib.netty.http.client.RequestBuilder; +import org.xbib.netty.http.client.transport.Transport; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +/** + * Base class for HTTP actions. + * + * @param the request type + * @param the response type + */ +public abstract class HttpAction { + + protected final Logger logger = LogManager.getLogger(getClass().getName()); + + protected static final String APPLICATION_JSON = "application/json"; + + protected Settings settings; + + protected void setSettings(Settings settings) { + this.settings = settings; + } + + public abstract GenericAction getActionInstance(); + + /*public final ActionFuture execute(HttpActionContext httpActionContext) { + PlainActionFuture future = PlainActionFuture.newFuture(); + //HttpActionFuture future = new HttpActionFuture<>(); + execute(httpActionContext, future); + return future; + }*/ + + public final void execute(HttpActionContext httpActionContext, ActionListener listener) throws IOException { + try { + ActionRequestValidationException validationException = httpActionContext.getRequest().validate(); + if (validationException != null) { + listener.onFailure(validationException); + return; + } + RequestBuilder httpRequestBuilder = + createHttpRequest(httpActionContext.getUrl(), httpActionContext.getRequest()); + //httpRequestBuilder.addHeader("content-type", "application/json"); + Request httpRequest = httpRequestBuilder.build(); +// logger.info("action = {} request = {}", this.getClass().getName(), httpRequest.toString()); + httpRequest.setResponseListener(fullHttpResponse -> { + logger.info("returned response " + fullHttpResponse.status().code() + + " headers = " + fullHttpResponse.headers().entries() + + " content = " + fullHttpResponse.content().toString(StandardCharsets.UTF_8)); + listener.onResponse(parseToResponse(httpActionContext.setHttpResponse(fullHttpResponse))); + }); + Transport transport = httpActionContext.getHttpClient().internalClient().execute(httpRequest); + logger.info("transport = " + transport); + httpActionContext.setHttpClientTransport(transport); + if (transport.isFailed()) { + listener.onFailure(new Exception(transport.getFailure())); + } + logger.info("done, listener is " + listener); + } catch (Throwable e) { + listener.onFailure(new RuntimeException(e)); + throw new IOException(e); + } + } + + protected RequestBuilder newGetRequest(String url, String path) { + return Request.builder(HttpMethod.GET).url(url).uri(path); + } + + protected RequestBuilder newGetRequest(String url, String path, BytesReference content) { + return newRequest(HttpMethod.GET, url, path, content); + } + + protected RequestBuilder newHeadRequest(String url, String path) { + return newRequest(HttpMethod.HEAD, url, path); + } + + protected RequestBuilder newPostRequest(String url, String path) { + return newRequest(HttpMethod.POST, url, path); + } + + protected RequestBuilder newPostRequest(String url, String path, BytesReference content) { + return newRequest(HttpMethod.POST, url, path, content); + } + + protected RequestBuilder newPostRequest(String url, String path, String content) { + return newRequest(HttpMethod.POST, url, path, content); + } + + protected RequestBuilder newPutRequest(String url, String path) { + return newRequest(HttpMethod.PUT, url, path); + } + + protected RequestBuilder newPutRequest(String url, String path, String content) { + return newRequest(HttpMethod.PUT, url, path, content); + } + + protected RequestBuilder newPutRequest(String url, String path, BytesReference content) { + return newRequest(HttpMethod.PUT, url, path, content); + } + + protected RequestBuilder newDeleteRequest(String url, String path, BytesReference content) { + return newRequest(HttpMethod.DELETE, url, path, content); + } + + protected RequestBuilder newRequest(HttpMethod method, String baseUrl, String path) { + return Request.builder(method).url(baseUrl).uri(path); + } + + protected RequestBuilder newRequest(HttpMethod method, String baseUrl, String path, BytesReference content) { + return Request.builder(method).url(baseUrl).uri(path).content(content.toBytesRef().bytes, APPLICATION_JSON); + } + + protected RequestBuilder newRequest(HttpMethod method, String baseUrl, String path, String content) { + return Request.builder(method).url(baseUrl).uri(path).content(content, APPLICATION_JSON); + } + + protected RequestBuilder newRequest(HttpMethod method, String baseUrl, String path, ByteBuf byteBuf) { + return Request.builder(method).url(baseUrl).uri(path).content(byteBuf, APPLICATION_JSON); + } + + protected T parseToResponse(HttpActionContext httpActionContext) { + String mediaType = httpActionContext.getHttpResponse().headers().get(HttpHeaderNames.CONTENT_TYPE); + XContentType xContentType = XContentType.fromMediaTypeOrFormat(mediaType); + if (xContentType == null) { + throw new IllegalStateException("unsupported content-type: " + mediaType); + } + try (XContentParser parser = xContentType.xContent().createParser(httpActionContext.getHttpClient().getRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + httpActionContext.getHttpResponse().content().array())) { + return entityParser().apply(parser); + } catch (IOException e) { + logger.error(e.getMessage(), e); + return null; + } + } + + protected abstract RequestBuilder createHttpRequest(String baseUrl, R request) throws IOException; + + protected abstract CheckedFunction entityParser(); + +} diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpActionContext.java b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpActionContext.java new file mode 100644 index 0000000..3d403db --- /dev/null +++ b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpActionContext.java @@ -0,0 +1,60 @@ +package org.xbib.elasticsearch.client.http; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.xbib.netty.http.client.transport.Transport; + +/** + * HTTP action context. + * + * @param request type + * @param response type + */ +public class HttpActionContext { + + private final HttpClient httpClient; + + private final R request; + + private final String url; + + private Transport httpClientTransport; + + private FullHttpResponse httpResponse; + + HttpActionContext(HttpClient httpClient, R request, String url) { + this.httpClient = httpClient; + this.request = request; + this.url = url; + } + + public HttpClient getHttpClient() { + return httpClient; + } + + public R getRequest() { + return request; + } + + public String getUrl() { + return url; + } + + public void setHttpClientTransport(Transport httpClientTransport) { + this.httpClientTransport = httpClientTransport; + } + + public Transport getHttpClientTransport() { + return httpClientTransport; + } + + public HttpActionContext setHttpResponse(FullHttpResponse fullHttpResponse) { + this.httpResponse = fullHttpResponse; + return this; + } + + public FullHttpResponse getHttpResponse() { + return httpResponse; + } +} diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpActionFuture.java b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpActionFuture.java new file mode 100644 index 0000000..33588c8 --- /dev/null +++ b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpActionFuture.java @@ -0,0 +1,100 @@ +package org.xbib.elasticsearch.client.http; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.BaseFuture; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; +import org.xbib.netty.http.client.transport.Transport; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + */ +public class HttpActionFuture extends BaseFuture implements ActionFuture, ActionListener { + + private Transport httpClientTransport; + + HttpActionFuture setHttpClientTransport(Transport httpClientTransport) { + this.httpClientTransport = httpClientTransport; + return this; + } + + @Override + public T actionGet() { + try { + httpClientTransport.get(); + return get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("future got interrupted", e); + } catch (ExecutionException e) { + throw rethrowExecutionException(e); + } + } + + @Override + public T actionGet(String timeout) { + return actionGet(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".actionGet.timeout")); + } + + @Override + public T actionGet(long timeoutMillis) { + return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); + } + + @Override + public T actionGet(TimeValue timeout) { + return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); + } + + @Override + public T actionGet(long timeout, TimeUnit unit) { + try { + return get(timeout, unit); + } catch (TimeoutException e) { + throw new ElasticsearchTimeoutException(e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Future got interrupted", e); + } catch (ExecutionException e) { + throw rethrowExecutionException(e); + } + } + + private static RuntimeException rethrowExecutionException(ExecutionException e) { + if (e.getCause() instanceof ElasticsearchException) { + ElasticsearchException esEx = (ElasticsearchException) e.getCause(); + Throwable root = esEx.unwrapCause(); + if (root instanceof ElasticsearchException) { + return (ElasticsearchException) root; + } else if (root instanceof RuntimeException) { + return (RuntimeException) root; + } + return new UncategorizedExecutionException("Failed execution", root); + } else if (e.getCause() instanceof RuntimeException) { + return (RuntimeException) e.getCause(); + } else { + return new UncategorizedExecutionException("Failed execution", e); + } + } + + @Override + public void onResponse(L result) { + set(convert(result)); + } + + @Override + public void onFailure(Exception e) { + setException(e); + } + + @SuppressWarnings("unchecked") + private T convert(L listenerResponse) { + return (T) listenerResponse; + } +} diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java new file mode 100644 index 0000000..31e5d95 --- /dev/null +++ b/http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java @@ -0,0 +1,209 @@ +package org.xbib.elasticsearch.client.http; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.node.Node; +import org.elasticsearch.threadpool.ThreadPool; +import org.xbib.elasticsearch.client.AbstractClient; +import org.xbib.elasticsearch.client.BulkControl; +import org.xbib.elasticsearch.client.BulkMetric; +import org.xbib.netty.http.client.Client; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.ServiceLoader; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Elasticsearch HTTP client. + */ +public class HttpClient extends AbstractClient implements ElasticsearchClient { + + private static final Logger logger = LogManager.getLogger(HttpClient.class); + + private Client client; + + private NamedXContentRegistry registry; + + @SuppressWarnings("rawtypes") + private Map actionMap; + + private List urls; + + //private ThreadPool threadPool; + + @Override + public HttpClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { + init(client, settings, metric, control, null, Collections.emptyList()); + return this; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private void init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control, + ClassLoader classLoader, List namedXContentEntries) { + //super.init(client, settings, metric, control); + this.urls = settings.getAsList("urls"); + if (urls.isEmpty()) { + throw new IllegalArgumentException("no urls given"); + } + this.registry = new NamedXContentRegistry(Stream.of(getNamedXContents().stream(), + namedXContentEntries.stream() + ).flatMap(Function.identity()).collect(Collectors.toList())); + this.actionMap = new HashMap<>(); + ServiceLoader httpActionServiceLoader = ServiceLoader.load(HttpAction.class, + classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader()); + for (HttpAction httpAction : httpActionServiceLoader) { + httpAction.setSettings(settings); + actionMap.put(httpAction.getActionInstance(), httpAction); + } + this.client = Client.builder().enableDebug().build(); + Settings threadPoolsettings = Settings.builder() + .put(settings) + .put(Node.NODE_NAME_SETTING.getKey(), "httpclient") + .build(); + //this.threadPool = threadPool != null ? threadPool : new ThreadPool(threadPoolsettings); + logger.info("HTTP client initialized with {} actions", actionMap.size()); + } + + private static List getNamedXContents() { + return new ArrayList<>(); + } + + public NamedXContentRegistry getRegistry() { + return registry; + } + + public static Builder builder() { + return new Builder(); + } + + public Client internalClient() { + return client; + } + + @Override + public ElasticsearchClient client() { + return this; + } + + @Override + protected ElasticsearchClient createClient(Settings settings) throws IOException { + return this; + } + + @Override + public void shutdown() throws IOException { + client.shutdownGracefully(); + //threadPool.close(); + } + + @Override + public > ActionFuture + execute(Action action, Request request) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + logger.info("plain action future = " + actionFuture); + execute(action, request, actionFuture); + return actionFuture; + } + + @Override + public > void + execute(Action action, Request request, ActionListener listener) { + doExecute(action, request, listener); + } + + @Override + public > RequestBuilder + prepareExecute(Action action) { + return action.newRequestBuilder(this); + } + + @Override + public ThreadPool threadPool() { + logger.info("returning null for threadPool() request"); + return null; //threadPool; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + public > + void doExecute(Action action, R request, ActionListener listener) { + HttpAction httpAction = actionMap.get(action); + if (httpAction == null) { + throw new IllegalStateException("failed to find http action [" + action + "] to execute"); + } + logger.info("http action = " + httpAction); + String url = urls.get(0); // TODO + try { + logger.info("submitting to URL {}", url); + HttpActionContext httpActionContext = new HttpActionContext(this, request, url); + httpAction.execute(httpActionContext, listener); + logger.info("submitted to URL {}", url); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + } + + /** + * The Builder for HTTP client. + */ + public static class Builder { + + private final Settings.Builder settingsBuilder = Settings.builder(); + + private ClassLoader classLoader; + + private List namedXContentEntries; + + private ThreadPool threadPool = null; + + public Builder settings(Settings settings) { + this.settingsBuilder.put(settings); + return this; + } + + public Builder classLoader(ClassLoader classLoader) { + this.classLoader = classLoader; + return this; + } + + public Builder namedXContentEntries(List namedXContentEntries) { + this.namedXContentEntries = namedXContentEntries; + return this; + } + + public Builder threadPool(ThreadPool threadPool) { + this.threadPool = threadPool; + return this; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + public HttpClient build() { + Settings settings = settingsBuilder.build(); + HttpClient httpClient = new HttpClient(); + httpClient.init(null, settings, null, null, + classLoader, namedXContentEntries); + return httpClient; + } + } +} diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java b/http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java new file mode 100644 index 0000000..a9c3ded --- /dev/null +++ b/http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for Elasticsearch HTTP client. + */ +package org.xbib.elasticsearch.client.http; diff --git a/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods b/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods new file mode 100644 index 0000000..18f7ab4 --- /dev/null +++ b/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods @@ -0,0 +1 @@ +org.xbib.elasticsearch.client.http.HttpClient \ No newline at end of file diff --git a/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.http.HttpAction b/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.http.HttpAction new file mode 100644 index 0000000..cce80e6 --- /dev/null +++ b/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.http.HttpAction @@ -0,0 +1,11 @@ +org.elasticsearch.action.admin.cluster.node.info.HttpNodesInfoAction +org.elasticsearch.action.admin.cluster.settings.HttpClusterUpdateSettingsAction +org.elasticsearch.action.admin.indices.create.HttpCreateIndexAction +org.elasticsearch.action.admin.indices.refresh.HttpRefreshIndexAction +org.elasticsearch.action.admin.indices.settings.put.HttpUpdateSettingsAction +org.elasticsearch.action.bulk.HttpBulkAction +org.elasticsearch.action.index.HttpIndexAction +org.elasticsearch.action.search.HttpSearchAction +org.elasticsearch.action.main.HttpMainAction +org.elasticsearch.action.get.HttpExistsAction +org.elasticsearch.action.get.HttpGetAction diff --git a/http/src/main/resources/extra-security.policy b/http/src/main/resources/extra-security.policy new file mode 100644 index 0000000..a1e19dd --- /dev/null +++ b/http/src/main/resources/extra-security.policy @@ -0,0 +1,20 @@ + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect,resolve"; + // 4.1.24 io.netty.util.concurrent.GlobalEventExecutor$2.run(GlobalEventExecutor.java:228) + permission java.lang.RuntimePermission "setContextClassLoader"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; + +grant codeBase "${codebase.netty-http-client}" { + // org.xbib.netty.http.client.Client.(Client.java:67) + permission java.util.PropertyPermission "io.netty.noUnsafe", "write"; +;} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientAliasTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientAliasTests.java new file mode 100644 index 0000000..09bff1b --- /dev/null +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientAliasTests.java @@ -0,0 +1,109 @@ +package org.xbib.elasticsearch.client.http; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.IndexAliasAdder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class HttpClientAliasTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(HttpClientAliasTests.class.getName()); + + private TransportAddress httpAddress; + + @Before + public void fetchTransportAddress() { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); + } + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + private String findHttpAddress() { + return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); + } + + public void testIndexAlias() throws Exception { + final HttpClient client = ClientBuilder.builder() + .put("urls", findHttpAddress()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + try { + client.newIndex("test1234"); + for (int i = 0; i < 1; i++) { + client.index("test1234", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.refreshIndex("test1234"); + + List simpleAliases = Arrays.asList("a", "b", "c"); + client.switchAliases("test", "test1234", simpleAliases); + + client.newIndex("test5678"); + for (int i = 0; i < 1; i++) { + client.index("test5678", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.refreshIndex("test5678"); + + simpleAliases = Arrays.asList("d", "e", "f"); + client.switchAliases("test", "test5678", simpleAliases, new IndexAliasAdder() { + @Override + public void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias) { + builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)); + } + }); + Map aliases = client.getIndexFilters("test5678"); + logger.info("aliases of index test5678 = {}", aliases); + + aliases = client.getAliasFilters("test"); + logger.info("aliases of alias test = {}", aliases); + + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.waitForResponses(TimeValue.timeValueSeconds(30)); + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } +} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientDuplicateIDTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientDuplicateIDTests.java new file mode 100644 index 0000000..456926a --- /dev/null +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientDuplicateIDTests.java @@ -0,0 +1,101 @@ +package org.xbib.elasticsearch.client.http; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.util.Collection; +import java.util.Collections; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class HttpClientDuplicateIDTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(HttpClientDuplicateIDTests.class.getName()); + + private static final long MAX_ACTIONS = 10L; + + private static final long NUM_ACTIONS = 12345L; + + private TransportAddress httpAddress; + + @Before + public void fetchTransportAddress() { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); + } + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + private String findHttpAddress() { + return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); + } + + public void testDuplicateDocIDs() throws Exception { + final HttpClient client = ClientBuilder.builder() + //.put(ClientBuilder.MAX_CONCURRENT_REQUESTS, 2) // avoid EsRejectedExecutionException + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put("urls", findHttpAddress()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(QueryBuilders.matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < NUM_ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + logger.info("numactions = {}, submitted = {}, succeeded= {}, failed = {}", NUM_ACTIONS, + client.getMetric().getSubmitted().getCount(), + client.getMetric().getSucceeded().getCount(), + client.getMetric().getFailed().getCount()); + assertEquals(NUM_ACTIONS, client.getMetric().getSubmitted().getCount()); + assertEquals(NUM_ACTIONS, client.getMetric().getSucceeded().getCount()); + } + } +} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientReplicaTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientReplicaTests.java new file mode 100644 index 0000000..fc036af --- /dev/null +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientReplicaTests.java @@ -0,0 +1,142 @@ +package org.xbib.elasticsearch.client.http; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) +public class HttpClientReplicaTests extends ESIntegTestCase { + + private static final Logger logger = LogManager.getLogger(HttpClientReplicaTests.class.getName()); + + private TransportAddress httpAddress; + + @Before + public void fetchTransportAddress() { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public Settings nodeSettings(int nodeNumber) { + return Settings.builder() + .put(super.nodeSettings(nodeNumber)) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + private String findHttpAddress() { + return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); + } + + public void testReplicaLevel() throws Exception { + + Settings settingsTest1 = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 2) + .build(); + + Settings settingsTest2 = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .build(); + + final HttpClient client = ClientBuilder.builder() + .put("urls", findHttpAddress()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + + try { + client.newIndex("test1", settingsTest1, null) + .newIndex("test2", settingsTest2, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 1234; i++) { + client.index("test1", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + for (int i = 0; i < 1234; i++) { + client.index("test2", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(60)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("refreshing"); + client.refreshIndex("test1"); + client.refreshIndex("test2"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test1", "test2") + .setQuery(QueryBuilders.matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("query total hits={}", hits); + assertEquals(2468, hits); + IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), + IndicesStatsAction.INSTANCE) + .all(); + IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); + for (Map.Entry m : response.getIndices().entrySet()) { + IndexStats indexStats = m.getValue(); + CommonStats commonStats = indexStats.getTotal(); + IndexingStats indexingStats = commonStats.getIndexing(); + IndexingStats.Stats stats = indexingStats.getTotal(); + logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); + for (Map.Entry me : indexStats.getIndexShards().entrySet()) { + IndexShardStats indexShardStats = me.getValue(); + CommonStats commonShardStats = indexShardStats.getTotal(); + logger.info("shard {} count = {}", me.getKey(), + commonShardStats.getIndexing().getTotal().getIndexCount()); + } + } + try { + client.deleteIndex("test1") + .deleteIndex("test2"); + } catch (Exception e) { + logger.error("delete index failed, ignored. Reason:", e); + } + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } + +} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientTests.java new file mode 100644 index 0000000..f64999d --- /dev/null +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientTests.java @@ -0,0 +1,204 @@ +package org.xbib.elasticsearch.client.http; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.util.Collection; +import java.util.Collections; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class HttpClientTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(HttpClientTests.class.getName()); + + private static final Long MAX_ACTIONS = 10L; + + private static final Long NUM_ACTIONS = 1234L; + + private TransportAddress httpAddress; + + @Before + public void fetchTransportAddress() { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); + } + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + private String findHttpAddress() { + return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); + } + + public void testNewIndex() throws Exception { + final HttpClient client = ClientBuilder.builder() + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .put("urls", findHttpAddress()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + client.newIndex("test"); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + /*public void testMapping() throws Exception { + final HttpClient client = ClientBuilder.builder() + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .put("urls", findHttpAddress()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("location") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .endObject(); + client.mapping("test", builder.string()); + client.newIndex("test"); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); + GetMappingsResponse getMappingsResponse = + client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + logger.info("mappings={}", getMappingsResponse.getMappings()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + public void testSingleDoc() throws Exception { + final HttpClient client = ClientBuilder.builder() + .put("urls", findHttpAddress()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(30)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + client.newIndex("test"); + client.index("test", "test", "1", false,"{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + assertEquals(1, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + public void testRandomDocs() throws Exception { + long numactions = NUM_ACTIONS; + final HttpClient client = ClientBuilder.builder() + .put("urls", findHttpAddress()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + logger.info("assuring {} == {}", numactions, client.getMetric().getSucceeded().getCount()); + assertEquals(numactions, client.getMetric().getSucceeded().getCount()); + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + public void testThreadedRandomDocs() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + Long maxactions = MAX_ACTIONS; + final Long maxloop = NUM_ACTIONS; + logger.info("max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); + final HttpClient client = ClientBuilder.builder() + .put("urls", findHttpAddress()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + try { + client.newIndex("test").startBulk("test", 30 * 1000, 1000); + ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + executorService.execute(() -> { + for (int i1 = 0; i1 < maxloop; i1++) { + client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + latch.countDown(); + }); + } + logger.info("waiting for max 30 seconds..."); + latch.await(30, TimeUnit.SECONDS); + logger.info("flush..."); + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + logger.info("got all responses, executor service shutdown..."); + executorService.shutdown(); + logger.info("executor service is shut down"); + client.stopBulk("test"); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("assuring {} == {}", maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(0); + assertEquals(maxthreads * maxloop, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.shutdown(); + } + }*/ +} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientUpdateReplicaLevelTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientUpdateReplicaLevelTests.java new file mode 100644 index 0000000..db0f894 --- /dev/null +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientUpdateReplicaLevelTests.java @@ -0,0 +1,97 @@ +package org.xbib.elasticsearch.client.http; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.util.Collection; +import java.util.Collections; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) +public class HttpClientUpdateReplicaLevelTests extends ESIntegTestCase { + + private static final Logger logger = LogManager.getLogger(HttpClientUpdateReplicaLevelTests.class.getName()); + + private TransportAddress httpAddress; + + @Before + public void fetchTransportAddress() { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public Settings nodeSettings(int nodeNumber) { + return Settings.builder() + .put(super.nodeSettings(nodeNumber)) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + private String findHttpAddress() { + return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); + } + + public void testUpdateReplicaLevel() throws Exception { + + int numberOfShards = 1; + int replicaLevel = 2; + + int shardsAfterReplica; + + Settings settings = Settings.builder() + .put("index.number_of_shards", numberOfShards) + .put("index.number_of_replicas", 0) + .build(); + + final HttpClient client = ClientBuilder.builder() + .put("urls", findHttpAddress()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), HttpClient.class); + + try { + client.newIndex("replicatest", settings, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 12345; i++) { + client.index("replicatest", "replicatest", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); + assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } + +} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/IndexCreationTest.java b/http/src/test/java/org/xbib/elasticsearch/client/http/IndexCreationTest.java new file mode 100644 index 0000000..db768be --- /dev/null +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/IndexCreationTest.java @@ -0,0 +1,50 @@ +package org.xbib.elasticsearch.client.http; + +import org.junit.Test; +import org.xbib.elasticsearch.client.ClientBuilder; + +import java.util.logging.ConsoleHandler; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogManager; +import java.util.logging.Logger; +import java.util.logging.SimpleFormatter; + +public class IndexCreationTest { + + private static final Logger logger = Logger.getLogger(IndexCreationTest.class.getName()); + static { + //System.setProperty("io.netty.leakDetection.level", "paranoid"); + System.setProperty("io.netty.noKeySetOptimization", Boolean.toString(true)); + System.setProperty("log4j2.disable.jmx", Boolean.toString(true)); + + System.setProperty("java.util.logging.SimpleFormatter.format", + "%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL %4$-7s [%3$s] %5$s %6$s%n"); + LogManager.getLogManager().reset(); + Logger rootLogger = LogManager.getLogManager().getLogger(""); + Handler handler = new ConsoleHandler(); + handler.setFormatter(new SimpleFormatter()); + rootLogger.addHandler(handler); + rootLogger.setLevel(Level.ALL); + for (Handler h : rootLogger.getHandlers()) { + handler.setFormatter(new SimpleFormatter()); + h.setLevel(Level.ALL); + } + } + + @Test + public void testNewIndex() throws Exception { + HttpClient client = ClientBuilder.builder() + //.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .put("urls", "http://localhost:9200") + //.setMetric(new SimpleBulkMetric()) + //.setControl(new SimpleBulkControl()) + .getClient(HttpClient.class); + try { + client.newIndex("demo"); + Thread.sleep(3000L); + } finally { + client.shutdown(); + } + } +} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/TestRunnerThreadsFilter.java b/http/src/test/java/org/xbib/elasticsearch/client/http/TestRunnerThreadsFilter.java new file mode 100644 index 0000000..15e845e --- /dev/null +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/TestRunnerThreadsFilter.java @@ -0,0 +1,11 @@ +package org.xbib.elasticsearch.client.http; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +public class TestRunnerThreadsFilter implements ThreadFilter { + + @Override + public boolean reject(Thread thread) { + return thread.getName().startsWith("ObjectCleanerThread"); + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/package-info.java b/http/src/test/java/org/xbib/elasticsearch/client/http/package-info.java similarity index 56% rename from src/integration-test/java/org/xbib/elasticsearch/extras/client/node/package-info.java rename to http/src/test/java/org/xbib/elasticsearch/client/http/package-info.java index 873ebae..aea79de 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/package-info.java +++ b/http/src/test/java/org/xbib/elasticsearch/client/http/package-info.java @@ -1,4 +1,4 @@ /** * Classes for testing Elasticsearch node client extras. */ -package org.xbib.elasticsearch.extras.client.node; +package org.xbib.elasticsearch.client.http; diff --git a/node/build.gradle b/node/build.gradle new file mode 100644 index 0000000..9f401d8 --- /dev/null +++ b/node/build.gradle @@ -0,0 +1,65 @@ +buildscript { + repositories { + jcenter() + maven { + url 'http://xbib.org/repository' + } + } + dependencies { + classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" + } +} + +apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' + +configurations { + main + tests +} + +dependencies { + compile project(':common') + testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" + testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" +} + +jar { + baseName "${rootProject.name}-node" +} + +/* +task testJar(type: Jar, dependsOn: testClasses) { + baseName = "${project.archivesBaseName}-tests" + from sourceSets.test.output +} +*/ + +artifacts { + main jar + tests testJar + archives sourcesJar, javadocJar +} + +test { + enabled = false + jvmArgs "-javaagent:" + configurations.alpnagent.asPath + systemProperty 'path.home', projectDir.absolutePath + testLogging { + showStandardStreams = true + exceptionFormat = 'full' + } +} + +randomizedTest { + enabled = false +} + + +esTest { + // test with the jars, not the classes, for security manager + // classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files + systemProperty 'tests.security.manager', 'true' + // maybe we like some extra security policy for our code + systemProperty 'tests.security.policy', '/extra-security.policy' +} +esTest.dependsOn jar, testJar diff --git a/node/config/checkstyle/checkstyle.xml b/node/config/checkstyle/checkstyle.xml new file mode 100644 index 0000000..52fe33c --- /dev/null +++ b/node/config/checkstyle/checkstyle.xml @@ -0,0 +1,323 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/node/src/docs/asciidoc/css/foundation.css b/node/src/docs/asciidoc/css/foundation.css new file mode 100644 index 0000000..27be611 --- /dev/null +++ b/node/src/docs/asciidoc/css/foundation.css @@ -0,0 +1,684 @@ +/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ +/* ========================================================================== HTML5 display definitions ========================================================================== */ +/** Correct `block` display not defined in IE 8/9. */ +article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } + +/** Correct `inline-block` display not defined in IE 8/9. */ +audio, canvas, video { display: inline-block; } + +/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ +audio:not([controls]) { display: none; height: 0; } + +/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ +[hidden], template { display: none; } + +script { display: none !important; } + +/* ========================================================================== Base ========================================================================== */ +/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ +html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } + +/** Remove default margin. */ +body { margin: 0; } + +/* ========================================================================== Links ========================================================================== */ +/** Remove the gray background color from active links in IE 10. */ +a { background: transparent; } + +/** Address `outline` inconsistency between Chrome and other browsers. */ +a:focus { outline: thin dotted; } + +/** Improve readability when focused and also mouse hovered in all browsers. */ +a:active, a:hover { outline: 0; } + +/* ========================================================================== Typography ========================================================================== */ +/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ +h1 { font-size: 2em; margin: 0.67em 0; } + +/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ +abbr[title] { border-bottom: 1px dotted; } + +/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ +b, strong { font-weight: bold; } + +/** Address styling not present in Safari 5 and Chrome. */ +dfn { font-style: italic; } + +/** Address differences between Firefox and other browsers. */ +hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } + +/** Address styling not present in IE 8/9. */ +mark { background: #ff0; color: #000; } + +/** Correct font family set oddly in Safari 5 and Chrome. */ +code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } + +/** Improve readability of pre-formatted text in all browsers. */ +pre { white-space: pre-wrap; } + +/** Set consistent quote types. */ +q { quotes: "\201C" "\201D" "\2018" "\2019"; } + +/** Address inconsistent and variable font size in all browsers. */ +small { font-size: 80%; } + +/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ +sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } + +sup { top: -0.5em; } + +sub { bottom: -0.25em; } + +/* ========================================================================== Embedded content ========================================================================== */ +/** Remove border when inside `a` element in IE 8/9. */ +img { border: 0; } + +/** Correct overflow displayed oddly in IE 9. */ +svg:not(:root) { overflow: hidden; } + +/* ========================================================================== Figures ========================================================================== */ +/** Address margin not present in IE 8/9 and Safari 5. */ +figure { margin: 0; } + +/* ========================================================================== Forms ========================================================================== */ +/** Define consistent border, margin, and padding. */ +fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } + +/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ +legend { border: 0; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ +button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } + +/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ +button, input { line-height: normal; } + +/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ +button, select { text-transform: none; } + +/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ +button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } + +/** Re-set default cursor for disabled elements. */ +button[disabled], html input[disabled] { cursor: default; } + +/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ +input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ +input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } + +/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ +input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } + +/** Remove inner padding and border in Firefox 4+. */ +button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } + +/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ +textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } + +/* ========================================================================== Tables ========================================================================== */ +/** Remove most spacing between table cells. */ +table { border-collapse: collapse; border-spacing: 0; } + +meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } + +meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } + +meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } + +*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } + +html, body { font-size: 100%; } + +body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } + +a:hover { cursor: pointer; } + +img, object, embed { max-width: 100%; height: auto; } + +object, embed { height: 100%; } + +img { -ms-interpolation-mode: bicubic; } + +#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } + +.left { float: left !important; } + +.right { float: right !important; } + +.text-left { text-align: left !important; } + +.text-right { text-align: right !important; } + +.text-center { text-align: center !important; } + +.text-justify { text-align: justify !important; } + +.hide { display: none; } + +.antialiased { -webkit-font-smoothing: antialiased; } + +img { display: inline-block; vertical-align: middle; } + +textarea { height: auto; min-height: 50px; } + +select { width: 100%; } + +object, svg { display: inline-block; vertical-align: middle; } + +.center { margin-left: auto; margin-right: auto; } + +.spread { width: 100%; } + +p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } + +.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } + +/* Typography resets */ +div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } + +/* Default Link Styles */ +a { color: #2ba6cb; text-decoration: none; line-height: inherit; } +a:hover, a:focus { color: #2795b6; } +a img { border: none; } + +/* Default paragraph styles */ +p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } +p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } + +/* Default header styles */ +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } +h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } + +h1 { font-size: 2.125em; } + +h2 { font-size: 1.6875em; } + +h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } + +h4 { font-size: 1.125em; } + +h5 { font-size: 1.125em; } + +h6 { font-size: 1em; } + +hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } + +/* Helpful Typography Defaults */ +em, i { font-style: italic; line-height: inherit; } + +strong, b { font-weight: bold; line-height: inherit; } + +small { font-size: 60%; line-height: inherit; } + +code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } + +/* Lists */ +ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } + +ul, ol { margin-left: 1.5em; } +ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } + +/* Unordered Lists */ +ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } +ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } +ul.square { list-style-type: square; } +ul.circle { list-style-type: circle; } +ul.disc { list-style-type: disc; } +ul.no-bullet { list-style: none; } + +/* Ordered Lists */ +ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } + +/* Definition Lists */ +dl dt { margin-bottom: 0.3125em; font-weight: bold; } +dl dd { margin-bottom: 1.25em; } + +/* Abbreviations */ +abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } + +abbr { text-transform: none; } + +/* Blockquotes */ +blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } +blockquote cite { display: block; font-size: 0.8125em; color: #555555; } +blockquote cite:before { content: "\2014 \0020"; } +blockquote cite a, blockquote cite a:visited { color: #555555; } + +blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } + +/* Microformats */ +.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } +.vcard li { margin: 0; display: block; } +.vcard .fn { font-weight: bold; font-size: 0.9375em; } + +.vevent .summary { font-weight: bold; } +.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } + +@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + h1 { font-size: 2.75em; } + h2 { font-size: 2.3125em; } + h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } + h4 { font-size: 1.4375em; } } +/* Tables */ +table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } +table thead, table tfoot { background: whitesmoke; font-weight: bold; } +table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } +table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } +table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } +table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } + +body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } + +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + +.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } +.clearfix:after, .float-group:after { clear: both; } + +*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } +*:not(pre) > code.nobreak { word-wrap: normal; } +*:not(pre) > code.nowrap { white-space: nowrap; } + +pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } + +em em { font-style: normal; } + +strong strong { font-weight: normal; } + +.keyseq { color: #555555; } + +kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } + +.keyseq kbd:first-child { margin-left: 0; } + +.keyseq kbd:last-child { margin-right: 0; } + +.menuseq, .menu { color: #090909; } + +b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } + +b.button:before { content: "["; padding: 0 3px 0 2px; } + +b.button:after { content: "]"; padding: 0 2px 0 3px; } + +#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } +#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } +#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } + +#content { margin-top: 1.25em; } + +#content:before { content: none; } + +#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } +#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } +#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } +#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } +#header .details span:first-child { margin-left: -0.125em; } +#header .details span.email a { color: #6f6f6f; } +#header .details br { display: none; } +#header .details br + span:before { content: "\00a0\2013\00a0"; } +#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } +#header .details br + span#revremark:before { content: "\00a0|\00a0"; } +#header #revnumber { text-transform: capitalize; } +#header #revnumber:after { content: "\00a0"; } + +#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } + +#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } +#toc > ul { margin-left: 0.125em; } +#toc ul.sectlevel0 > li > a { font-style: italic; } +#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } +#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } +#toc li { line-height: 1.3334; margin-top: 0.3334em; } +#toc a { text-decoration: none; } +#toc a:active { text-decoration: underline; } + +#toctitle { color: #6f6f6f; font-size: 1.2em; } + +@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } + body.toc2 { padding-left: 15em; padding-right: 0; } + #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } + #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } + #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } + #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } + #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } + body.toc2.toc-right { padding-left: 0; padding-right: 15em; } + body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } +@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } + #toc.toc2 { width: 20em; } + #toc.toc2 #toctitle { font-size: 1.375em; } + #toc.toc2 > ul { font-size: 0.95em; } + #toc.toc2 ul ul { padding-left: 1.25em; } + body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } +#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +#content #toc > :first-child { margin-top: 0; } +#content #toc > :last-child { margin-bottom: 0; } + +#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } + +#footer-text { color: #dddddd; line-height: 1.44; } + +.sect1 { padding-bottom: 0.625em; } + +@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } +.sect1 + .sect1 { border-top: 1px solid #dddddd; } + +#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } +#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } +#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } +#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } +#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } + +.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } + +.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } + +table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } + +.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } + +table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } + +.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } +.admonitionblock > table td.icon { text-align: center; width: 80px; } +.admonitionblock > table td.icon img { max-width: initial; } +.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } +.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } +.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } + +.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } +.exampleblock > .content > :first-child { margin-top: 0; } +.exampleblock > .content > :last-child { margin-bottom: 0; } + +.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +.sidebarblock > :first-child { margin-top: 0; } +.sidebarblock > :last-child { margin-bottom: 0; } +.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } + +.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } + +.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } +.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } + +.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } +.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } +@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } +@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } + +.literalblock.output pre { color: #eeeeee; background-color: black; } + +.listingblock pre.highlightjs { padding: 0; } +.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } + +.listingblock > .content { position: relative; } + +.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } + +.listingblock:hover code[data-lang]:before { display: block; } + +.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } + +.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } + +table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } + +table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } + +table.pyhltable td.code { padding-left: .75em; padding-right: 0; } + +pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } + +pre.pygments .lineno { display: inline-block; margin-right: .25em; } + +table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } + +.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } +.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } +.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } +.quoteblock blockquote { margin: 0; padding: 0; border: 0; } +.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } +.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } +.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } +.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } +.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } +.quoteblock .quoteblock blockquote:before { display: none; } + +.verseblock { margin: 0 1em 1.25em 1em; } +.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } +.verseblock pre strong { font-weight: 400; } +.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } + +.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } +.quoteblock .attribution br, .verseblock .attribution br { display: none; } +.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } + +.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } +.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } +.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } + +table.tableblock { max-width: 100%; border-collapse: separate; } +table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } + +table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } + +table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } + +table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } + +table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } + +table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } + +table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } + +table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } + +table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } + +table.frame-all { border-width: 1px; } + +table.frame-sides { border-width: 0 1px; } + +table.frame-topbot { border-width: 1px 0; } + +th.halign-left, td.halign-left { text-align: left; } + +th.halign-right, td.halign-right { text-align: right; } + +th.halign-center, td.halign-center { text-align: center; } + +th.valign-top, td.valign-top { vertical-align: top; } + +th.valign-bottom, td.valign-bottom { vertical-align: bottom; } + +th.valign-middle, td.valign-middle { vertical-align: middle; } + +table thead th, table tfoot th { font-weight: bold; } + +tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } + +tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } + +p.tableblock > code:only-child { background: none; padding: 0; } + +p.tableblock { font-size: 1em; } + +td > div.verse { white-space: pre; } + +ol { margin-left: 1.75em; } + +ul li ol { margin-left: 1.5em; } + +dl dd { margin-left: 1.125em; } + +dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } + +ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } + +ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } + +ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } + +ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } + +ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } + +ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } +ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } +ul.inline > li > * { display: block; } + +.unstyled dl dt { font-weight: normal; font-style: normal; } + +ol.arabic { list-style-type: decimal; } + +ol.decimal { list-style-type: decimal-leading-zero; } + +ol.loweralpha { list-style-type: lower-alpha; } + +ol.upperalpha { list-style-type: upper-alpha; } + +ol.lowerroman { list-style-type: lower-roman; } + +ol.upperroman { list-style-type: upper-roman; } + +ol.lowergreek { list-style-type: lower-greek; } + +.hdlist > table, .colist > table { border: 0; background: none; } +.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } + +td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } + +td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } + +.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } + +.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } +.colist > table tr > td:first-of-type img { max-width: initial; } +.colist > table tr > td:last-of-type { padding: 0.25em 0; } + +.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } + +.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } +.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } +.imageblock > .title { margin-bottom: 0; } +.imageblock.thumb, .imageblock.th { border-width: 6px; } +.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } + +.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } +.image.left { margin-right: 0.625em; } +.image.right { margin-left: 0.625em; } + +a.image { text-decoration: none; display: inline-block; } +a.image object { pointer-events: none; } + +sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } +sup.footnote a, sup.footnoteref a { text-decoration: none; } +sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } + +#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } +#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } +#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } +#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } +#footnotes .footnote:last-of-type { margin-bottom: 0; } +#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } + +.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } +.gist .file-data > table td.line-data { width: 99%; } + +div.unbreakable { page-break-inside: avoid; } + +.big { font-size: larger; } + +.small { font-size: smaller; } + +.underline { text-decoration: underline; } + +.overline { text-decoration: overline; } + +.line-through { text-decoration: line-through; } + +.aqua { color: #00bfbf; } + +.aqua-background { background-color: #00fafa; } + +.black { color: black; } + +.black-background { background-color: black; } + +.blue { color: #0000bf; } + +.blue-background { background-color: #0000fa; } + +.fuchsia { color: #bf00bf; } + +.fuchsia-background { background-color: #fa00fa; } + +.gray { color: #606060; } + +.gray-background { background-color: #7d7d7d; } + +.green { color: #006000; } + +.green-background { background-color: #007d00; } + +.lime { color: #00bf00; } + +.lime-background { background-color: #00fa00; } + +.maroon { color: #600000; } + +.maroon-background { background-color: #7d0000; } + +.navy { color: #000060; } + +.navy-background { background-color: #00007d; } + +.olive { color: #606000; } + +.olive-background { background-color: #7d7d00; } + +.purple { color: #600060; } + +.purple-background { background-color: #7d007d; } + +.red { color: #bf0000; } + +.red-background { background-color: #fa0000; } + +.silver { color: #909090; } + +.silver-background { background-color: #bcbcbc; } + +.teal { color: #006060; } + +.teal-background { background-color: #007d7d; } + +.white { color: #bfbfbf; } + +.white-background { background-color: #fafafa; } + +.yellow { color: #bfbf00; } + +.yellow-background { background-color: #fafa00; } + +span.icon > .fa { cursor: default; } + +.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } +.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } +.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } +.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } +.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } +.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } + +.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } +.conum[data-value] * { color: #fff !important; } +.conum[data-value] + b { display: none; } +.conum[data-value]:after { content: attr(data-value); } +pre .conum[data-value] { position: relative; top: -0.125em; } + +b.conum * { color: inherit !important; } + +.conum:not([data-value]):empty { display: none; } + +.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/node/src/docs/asciidoclet/overview.adoc b/node/src/docs/asciidoclet/overview.adoc new file mode 100644 index 0000000..7947331 --- /dev/null +++ b/node/src/docs/asciidoclet/overview.adoc @@ -0,0 +1,4 @@ += Elasticsearch Java client +Jörg Prante +Version 5.4.0.0 + diff --git a/node/src/main/java/org/xbib/elasticsearch/client/node/NodeBulkClient.java b/node/src/main/java/org/xbib/elasticsearch/client/node/NodeBulkClient.java new file mode 100644 index 0000000..103c1be --- /dev/null +++ b/node/src/main/java/org/xbib/elasticsearch/client/node/NodeBulkClient.java @@ -0,0 +1,79 @@ +package org.xbib.elasticsearch.client.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; +import org.xbib.elasticsearch.client.AbstractClient; +import org.xbib.elasticsearch.client.BulkControl; +import org.xbib.elasticsearch.client.BulkMetric; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +/** + * + */ +public class NodeBulkClient extends AbstractClient { + + private static final Logger logger = LogManager.getLogger(NodeBulkClient.class.getName()); + + private Node node; + + public NodeBulkClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { + super.init(client, settings, metric, control); + return this; + } + + @Override + protected ElasticsearchClient createClient(Settings settings) throws IOException { + if (settings != null) { + String version = System.getProperty("os.name") + + " " + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.vendor") + + " " + System.getProperty("java.runtime.version") + + " " + System.getProperty("java.vm.version"); + Settings effectiveSettings = Settings.builder().put(settings) + .put("node.client", true) + .put("node.master", false) + .put("node.data", false) + .build(); + logger.info("creating node client on {} with effective settings {}", + version, effectiveSettings.toString()); + Collection> plugins = Collections.emptyList(); + this.node = new BulkNode(new Environment(effectiveSettings, null), plugins); + try { + node.start(); + } catch (NodeValidationException e) { + throw new IOException(e); + } + return node.client(); + } + return null; + } + + @Override + public synchronized void shutdown() throws IOException { + super.shutdown(); + try { + if (node != null) { + logger.debug("closing node..."); + node.close(); + } + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + } + + private static class BulkNode extends Node { + + BulkNode(Environment env, Collection> classpathPlugins) { + super(env, classpathPlugins); + } + } +} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/node/package-info.java b/node/src/main/java/org/xbib/elasticsearch/client/node/package-info.java similarity index 52% rename from src/main/java/org/xbib/elasticsearch/extras/client/node/package-info.java rename to node/src/main/java/org/xbib/elasticsearch/client/node/package-info.java index c5c0895..08795e8 100644 --- a/src/main/java/org/xbib/elasticsearch/extras/client/node/package-info.java +++ b/node/src/main/java/org/xbib/elasticsearch/client/node/package-info.java @@ -1,4 +1,4 @@ /** * Classes for Elasticsearch node client extras. */ -package org.xbib.elasticsearch.extras.client.node; +package org.xbib.elasticsearch.client.node; diff --git a/node/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods b/node/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods new file mode 100644 index 0000000..631ddb7 --- /dev/null +++ b/node/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods @@ -0,0 +1 @@ +org.xbib.elasticsearch.client.node.NodeBulkClient \ No newline at end of file diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientDuplicateIDTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientDuplicateIDTests.java new file mode 100644 index 0000000..5681d63 --- /dev/null +++ b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientDuplicateIDTests.java @@ -0,0 +1,64 @@ +package org.xbib.elasticsearch.client.node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class NodeBulkClientDuplicateIDTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(NodeBulkClientDuplicateIDTests.class.getName()); + + private static final long MAX_ACTIONS = 100L; + + private static final long NUM_ACTIONS = 12345L; + + public void testDuplicateDocIDs() throws Exception { + final NodeBulkClient client = ClientBuilder.builder() + //.put(ClientBuilder.MAX_CONCURRENT_REQUESTS, 2) // avoid EsRejectedExecutionException + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), NodeBulkClient.class); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < NUM_ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + logger.info("numactions = {}, submitted = {}, succeeded= {}, failed = {}", NUM_ACTIONS, + client.getMetric().getSubmitted().getCount(), + client.getMetric().getSucceeded().getCount(), + client.getMetric().getFailed().getCount()); + assertEquals(NUM_ACTIONS, client.getMetric().getSubmitted().getCount()); + assertEquals(NUM_ACTIONS, client.getMetric().getSucceeded().getCount()); + } + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeIndexAliasTest.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientIndexAliasTests.java similarity index 62% rename from src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeIndexAliasTest.java rename to node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientIndexAliasTests.java index eb5256c..7f36794 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeIndexAliasTest.java +++ b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientIndexAliasTests.java @@ -1,41 +1,36 @@ -package org.xbib.elasticsearch.extras.client.node; +package org.xbib.elasticsearch.client.node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilders; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.IndexAliasAdder; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.IndexAliasAdder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; import java.util.Arrays; import java.util.List; import java.util.Map; -import static org.junit.Assert.assertFalse; +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class NodeBulkClientIndexAliasTests extends ESSingleNodeTestCase { -/** - * - */ -public class BulkNodeIndexAliasTest extends NodeTestUtils { + private static final Logger logger = LogManager.getLogger(NodeBulkClientIndexAliasTests.class.getName()); - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeIndexAliasTest.class.getSimpleName()); - - @Test public void testIndexAlias() throws Exception { - final BulkNodeClient client = Clients.builder() + final NodeBulkClient client = ClientBuilder.builder() .setMetric(new SimpleBulkMetric()) .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); + .getClient(client(), NodeBulkClient.class); try { client.newIndex("test1234"); for (int i = 0; i < 1; i++) { - client.index("test1234", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); + client.index("test1234", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); } client.flushIngest(); client.refreshIndex("test1234"); @@ -45,7 +40,7 @@ public class BulkNodeIndexAliasTest extends NodeTestUtils { client.newIndex("test5678"); for (int i = 0; i < 1; i++) { - client.index("test5678", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); + client.index("test5678", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); } client.flushIngest(); client.refreshIndex("test5678"); @@ -66,7 +61,7 @@ public class BulkNodeIndexAliasTest extends NodeTestUtils { } catch (NoNodeAvailableException e) { logger.warn("skipping, no node available"); } finally { - client.waitForResponses("30s"); + client.waitForResponses(TimeValue.timeValueSeconds(30)); client.shutdown(); if (client.hasThrowable()) { logger.error("error", client.getThrowable()); diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportReplicaTest.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientReplicaTests.java similarity index 65% rename from src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportReplicaTest.java rename to node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientReplicaTests.java index bc8f449..c8e6186 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportReplicaTest.java +++ b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientReplicaTests.java @@ -1,5 +1,8 @@ -package org.xbib.elasticsearch.extras.client.transport; +package org.xbib.elasticsearch.client.node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndexStats; @@ -9,64 +12,52 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.indexing.IndexingStats; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.test.ESIntegTestCase; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; import java.util.Map; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) +public class NodeBulkClientReplicaTests extends ESIntegTestCase { -/** - * - */ -public class BulkTransportReplicaTest extends NodeTestUtils { + private static final Logger logger = LogManager.getLogger(NodeBulkClientReplicaTests.class.getName()); - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportReplicaTest.class.getSimpleName()); - - @Test public void testReplicaLevel() throws Exception { - // we need nodes for replica levels - startNode("2"); - startNode("3"); - startNode("4"); - - Settings settingsTest1 = Settings.settingsBuilder() - .put("index.number_of_shards", 2) - .put("index.number_of_replicas", 3) + Settings settingsTest1 = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 2) .build(); - Settings settingsTest2 = Settings.settingsBuilder() - .put("index.number_of_shards", 2) + Settings settingsTest2 = Settings.builder() + .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) .build(); - final BulkTransportClient client = Clients.builder() - .put(getSettings()) + final NodeBulkClient client = ClientBuilder.builder() .setMetric(new SimpleBulkMetric()) .setControl(new SimpleBulkControl()) - .toBulkTransportClient(); + .getClient(client(), NodeBulkClient.class); + try { client.newIndex("test1", settingsTest1, null) .newIndex("test2", settingsTest2, null); - client.waitForCluster("GREEN", "30s"); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); for (int i = 0; i < 1234; i++) { - client.index("test1", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + client.index("test1", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); } for (int i = 0; i < 1234; i++) { - client.index("test2", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + client.index("test2", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); } client.flushIngest(); - client.waitForResponses("30s"); + client.waitForResponses(TimeValue.timeValueSeconds(60)); } catch (NoNodeAvailableException e) { logger.warn("skipping, no node available"); } finally { @@ -75,12 +66,13 @@ public class BulkTransportReplicaTest extends NodeTestUtils { client.refreshIndex("test2"); SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) .setIndices("test1", "test2") - .setQuery(matchAllQuery()); + .setQuery(QueryBuilders.matchAllQuery()); long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); logger.info("query total hits={}", hits); assertEquals(2468, hits); IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), - IndicesStatsAction.INSTANCE).all(); + IndicesStatsAction.INSTANCE) + .all(); IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); for (Map.Entry m : response.getIndices().entrySet()) { IndexStats indexStats = m.getValue(); diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientTests.java new file mode 100644 index 0000000..6b261df --- /dev/null +++ b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientTests.java @@ -0,0 +1,176 @@ +package org.xbib.elasticsearch.client.node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class NodeBulkClientTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(NodeBulkClientTests.class.getName()); + + private static final Long MAX_ACTIONS = 10L; + + private static final Long NUM_ACTIONS = 1234L; + + public void testNewIndexNodeClient() throws Exception { + final NodeBulkClient client = ClientBuilder.builder() + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), NodeBulkClient.class); + client.newIndex("test"); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + public void testBulkNodeClientMapping() throws Exception { + final NodeBulkClient client = ClientBuilder.builder() + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), NodeBulkClient.class); + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("location") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .endObject(); + client.mapping("test", builder.string()); + client.newIndex("test"); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); + GetMappingsResponse getMappingsResponse = + client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + logger.info("mappings={}", getMappingsResponse.getMappings()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + public void testBulkNodeClientSingleDoc() throws Exception { + final NodeBulkClient client = ClientBuilder.builder() + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(30)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), NodeBulkClient.class); + client.newIndex("test"); + client.index("test", "test", "1", false, "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + assertEquals(1, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + public void testBulkNodeClientRandomDocs() throws Exception { + long numactions = NUM_ACTIONS; + final NodeBulkClient client = ClientBuilder.builder() + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), NodeBulkClient.class); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + logger.info("assuring {} == {}", numactions, client.getMetric().getSucceeded().getCount()); + assertEquals(numactions, client.getMetric().getSucceeded().getCount()); + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + public void testBulkNodeClientThreadedRandomDocs() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + Long maxactions = MAX_ACTIONS; + final Long maxloop = NUM_ACTIONS; + logger.info("NodeClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); + final NodeBulkClient client = ClientBuilder.builder() + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), NodeBulkClient.class); + try { + client.newIndex("test").startBulk("test", 30 * 1000, 1000); + ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + executorService.execute(() -> { + for (int i1 = 0; i1 < maxloop; i1++) { + client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + latch.countDown(); + }); + } + logger.info("waiting for max 30 seconds..."); + latch.await(30, TimeUnit.SECONDS); + logger.info("flush..."); + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + logger.info("got all responses, executor service shutdown..."); + executorService.shutdown(); + logger.info("executor service is shut down"); + client.stopBulk("test"); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("assuring {} == {}", maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(0); + assertEquals(maxthreads * maxloop, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.shutdown(); + } + } +} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientUpdateReplicaLevelTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientUpdateReplicaLevelTests.java new file mode 100644 index 0000000..97ab80d --- /dev/null +++ b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientUpdateReplicaLevelTests.java @@ -0,0 +1,57 @@ +package org.xbib.elasticsearch.client.node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESIntegTestCase; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) +public class NodeBulkClientUpdateReplicaLevelTests extends ESIntegTestCase { + + private static final Logger logger = LogManager.getLogger(NodeBulkClientUpdateReplicaLevelTests.class.getName()); + + public void testUpdateReplicaLevel() throws Exception { + + int numberOfShards = 1; + int replicaLevel = 2; + + int shardsAfterReplica; + + Settings settings = Settings.builder() + .put("index.number_of_shards", numberOfShards) + .put("index.number_of_replicas", 0) + .build(); + + final NodeBulkClient client = ClientBuilder.builder() + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(client(), NodeBulkClient.class); + + try { + client.newIndex("replicatest", settings, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 12345; i++) { + client.index("replicatest", "replicatest", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); + assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } +} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/TestRunnerThreadsFilter.java b/node/src/test/java/org/xbib/elasticsearch/client/node/TestRunnerThreadsFilter.java new file mode 100644 index 0000000..6d0252d --- /dev/null +++ b/node/src/test/java/org/xbib/elasticsearch/client/node/TestRunnerThreadsFilter.java @@ -0,0 +1,11 @@ +package org.xbib.elasticsearch.client.node; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +public class TestRunnerThreadsFilter implements ThreadFilter { + + @Override + public boolean reject(Thread thread) { + return thread.getName().startsWith("ObjectCleanerThread"); + } +} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/package-info.java b/node/src/test/java/org/xbib/elasticsearch/client/node/package-info.java new file mode 100644 index 0000000..f0ef244 --- /dev/null +++ b/node/src/test/java/org/xbib/elasticsearch/client/node/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for testing Elasticsearch node client extras. + */ +package org.xbib.elasticsearch.client.node; diff --git a/node/src/test/resources/log4j2.xml b/node/src/test/resources/log4j2.xml new file mode 100644 index 0000000..b175dfc --- /dev/null +++ b/node/src/test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/integration-test/resources/org/xbib/elasticsearch/extras/client/settings.json b/node/src/test/resources/org/xbib/elasticsearch/client/node/settings.json similarity index 100% rename from src/integration-test/resources/org/xbib/elasticsearch/extras/client/settings.json rename to node/src/test/resources/org/xbib/elasticsearch/client/node/settings.json diff --git a/settings.gradle b/settings.gradle index ef50653..cc4c55d 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1 +1,5 @@ -rootProject.name = 'elasticsearch-extras-client' +include 'api' +include 'common' +include 'node' +include 'transport' +include 'http' diff --git a/src/integration-test/java/org/elasticsearch/node/MockNode.java b/src/integration-test/java/org/elasticsearch/node/MockNode.java deleted file mode 100644 index b0c02eb..0000000 --- a/src/integration-test/java/org/elasticsearch/node/MockNode.java +++ /dev/null @@ -1,38 +0,0 @@ -package org.elasticsearch.node; - -import org.elasticsearch.Version; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.plugins.Plugin; - -import java.util.ArrayList; -import java.util.Collection; - -/** - * - */ -public class MockNode extends Node { - - public MockNode() { - super(Settings.EMPTY); - } - - public MockNode(Settings settings) { - super(settings); - } - - public MockNode(Settings settings, Collection> classpathPlugins) { - super(InternalSettingsPreparer.prepareEnvironment(settings, null), Version.CURRENT, classpathPlugins); - } - - public MockNode(Settings settings, Class classpathPlugin) { - this(settings, list(classpathPlugin)); - } - - private static Collection> list(Class classpathPlugin) { - Collection> list = new ArrayList<>(); - list.add(classpathPlugin); - return list; - } - -} diff --git a/src/integration-test/java/org/elasticsearch/node/package-info.java b/src/integration-test/java/org/elasticsearch/node/package-info.java deleted file mode 100644 index f299cbc..0000000 --- a/src/integration-test/java/org/elasticsearch/node/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes to support Elasticsearch node creation. - */ -package org.elasticsearch.node; diff --git a/src/integration-test/java/org/xbib/elasticsearch/NodeTestUtils.java b/src/integration-test/java/org/xbib/elasticsearch/NodeTestUtils.java deleted file mode 100644 index d098332..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/NodeTestUtils.java +++ /dev/null @@ -1,213 +0,0 @@ -package org.xbib.elasticsearch; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; - -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.MockNode; -import org.elasticsearch.node.Node; -import org.junit.After; -import org.junit.Before; -import org.xbib.elasticsearch.extras.client.NetworkUtils; - -import java.io.IOException; -import java.nio.file.*; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.HashMap; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * - */ -public class NodeTestUtils { - - private static final ESLogger logger = ESLoggerFactory.getLogger("test"); - - private static Random random = new Random(); - - private static char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray(); - - private Map nodes = new HashMap<>(); - - private Map clients = new HashMap<>(); - - private AtomicInteger counter = new AtomicInteger(); - - private String cluster; - - private String host; - - private int port; - - private static void deleteFiles() throws IOException { - Path directory = Paths.get(System.getProperty("path.home") + "/data"); - Files.walkFileTree(directory, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - Files.delete(file); - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { - Files.delete(dir); - return FileVisitResult.CONTINUE; - } - - }); - - } - - @Before - public void startNodes() { - try { - logger.info("starting"); - setClusterName(); - startNode("1"); - findNodeAddress(); - try { - ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE, - new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN) - .timeout(TimeValue.timeValueSeconds(30))).actionGet(); - if (healthResponse != null && healthResponse.isTimedOut()) { - throw new IOException("cluster state is " + healthResponse.getStatus().name() - + ", from here on, everything will fail!"); - } - } catch (ElasticsearchTimeoutException e) { - throw new IOException("cluster does not respond to health request, cowardly refusing to continue"); - } - } catch (Throwable t) { - logger.error("startNodes failed", t); - } - } - - @After - public void stopNodes() { - try { - closeNodes(); - } catch (Exception e) { - logger.error("can not close nodes", e); - } finally { - try { - deleteFiles(); - logger.info("data files wiped"); - Thread.sleep(2000L); // let OS commit changes - } catch (IOException e) { - logger.error(e.getMessage(), e); - } catch (InterruptedException e) { - // ignore - } - } - } - - protected void setClusterName() { - this.cluster = "test-helper-cluster-" - + NetworkUtils.getLocalAddress().getHostName() - + "-" + System.getProperty("user.name") - + "-" + counter.incrementAndGet(); - } - - protected String getClusterName() { - return cluster; - } - - protected Settings getSettings() { - return settingsBuilder() - .put("host", host) - .put("port", port) - .put("cluster.name", cluster) - .put("path.home", getHome()) - .build(); - } - - protected Settings getNodeSettings() { - return settingsBuilder() - .put("cluster.name", cluster) - .put("cluster.routing.schedule", "50ms") - .put("cluster.routing.allocation.disk.threshold_enabled", false) - .put("discovery.zen.multicast.enabled", true) - .put("discovery.zen.multicast.ping_timeout", "5s") - .put("http.enabled", true) - .put("threadpool.bulk.size", Runtime.getRuntime().availableProcessors()) - .put("threadpool.bulk.queue_size", 16 * Runtime.getRuntime().availableProcessors()) // default is 50, too low - .put("index.number_of_replicas", 0) - .put("path.home", getHome()) - .build(); - } - - protected String getHome() { - return System.getProperty("path.home"); - } - - public void startNode(String id) throws IOException { - buildNode(id).start(); - } - - public AbstractClient client(String id) { - return clients.get(id); - } - - private void closeNodes() throws IOException { - logger.info("closing all clients"); - for (AbstractClient client : clients.values()) { - client.close(); - } - clients.clear(); - logger.info("closing all nodes"); - for (Node node : nodes.values()) { - if (node != null) { - node.close(); - } - } - nodes.clear(); - logger.info("all nodes closed"); - } - - protected void findNodeAddress() { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true); - NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); - Object obj = response.iterator().next().getTransport().getAddress() - .publishAddress(); - if (obj instanceof InetSocketTransportAddress) { - InetSocketTransportAddress address = (InetSocketTransportAddress) obj; - host = address.address().getHostName(); - port = address.address().getPort(); - } - } - - private Node buildNode(String id) throws IOException { - Settings nodeSettings = settingsBuilder() - .put(getNodeSettings()) - .put("name", id) - .build(); - logger.info("settings={}", nodeSettings.getAsMap()); - Node node = new MockNode(nodeSettings); - AbstractClient client = (AbstractClient) node.client(); - nodes.put(id, node); - clients.put(id, client); - logger.info("clients={}", clients); - return node; - } - - protected String randomString(int len) { - final char[] buf = new char[len]; - final int n = numbersAndLetters.length - 1; - for (int i = 0; i < buf.length; i++) { - buf[i] = numbersAndLetters[random.nextInt(n)]; - } - return new String(buf); - } -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/SimpleTest.java b/src/integration-test/java/org/xbib/elasticsearch/SimpleTest.java deleted file mode 100644 index 0af13df..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/SimpleTest.java +++ /dev/null @@ -1,59 +0,0 @@ -package org.xbib.elasticsearch; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.junit.Assert.assertEquals; - -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; -import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; -import org.junit.Test; - -/** - * - */ -public class SimpleTest extends NodeTestUtils { - - protected Settings getNodeSettings() { - return settingsBuilder() - .put("path.home", System.getProperty("path.home")) - .put("index.analysis.analyzer.default.filter.0", "lowercase") - .put("index.analysis.analyzer.default.filter.1", "trim") - .put("index.analysis.analyzer.default.tokenizer", "keyword") - .build(); - } - - @Test - public void test() throws Exception { - try { - DeleteIndexRequestBuilder deleteIndexRequestBuilder = - new DeleteIndexRequestBuilder(client("1"), DeleteIndexAction.INSTANCE, "test"); - deleteIndexRequestBuilder.execute().actionGet(); - } catch (Exception e) { - // ignore - } - IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client("1"), IndexAction.INSTANCE); - indexRequestBuilder - .setIndex("test") - .setType("test") - .setId("1") - .setSource(jsonBuilder().startObject().field("field", - "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8").endObject()) - .setRefresh(true) - .execute() - .actionGet(); - String doc = client("1").prepareSearch("test") - .setTypes("test") - .setQuery(matchQuery("field", - "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8")) - .execute() - .actionGet() - .getHits().getAt(0).getSourceAsString(); - - assertEquals(doc, - "{\"field\":\"1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8\"}"); - } -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/WildcardTest.java b/src/integration-test/java/org/xbib/elasticsearch/WildcardTest.java deleted file mode 100644 index 6e252d1..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/WildcardTest.java +++ /dev/null @@ -1,69 +0,0 @@ -package org.xbib.elasticsearch; - -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; - -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilder; -import org.junit.Test; - -import java.io.IOException; - -/** - * - */ -public class WildcardTest extends NodeTestUtils { - - protected Settings getNodeSettings() { - return settingsBuilder() - .put("cluster.name", getClusterName()) - .put("cluster.routing.allocation.disk.threshold_enabled", false) - .put("discovery.zen.multicast.enabled", false) - .put("http.enabled", false) - .put("path.home", System.getProperty("path.home")) - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 0) - .build(); - } - - @Test - public void testWildcard() throws Exception { - index(client("1"), "1", "010"); - index(client("1"), "2", "0*0"); - // exact - validateCount(client("1"), queryStringQuery("010").defaultField("field"), 1); - validateCount(client("1"), queryStringQuery("0\\*0").defaultField("field"), 1); - // pattern - validateCount(client("1"), queryStringQuery("0*0").defaultField("field"), 1); // 2? - validateCount(client("1"), queryStringQuery("0?0").defaultField("field"), 1); // 2? - validateCount(client("1"), queryStringQuery("0**0").defaultField("field"), 1); // 2? - validateCount(client("1"), queryStringQuery("0??0").defaultField("field"), 0); - validateCount(client("1"), queryStringQuery("*10").defaultField("field"), 1); - validateCount(client("1"), queryStringQuery("*1*").defaultField("field"), 1); - validateCount(client("1"), queryStringQuery("*\\*0").defaultField("field"), 0); // 1? - validateCount(client("1"), queryStringQuery("*\\**").defaultField("field"), 0); // 1? - } - - private void index(Client client, String id, String fieldValue) throws IOException { - client.index(indexRequest() - .index("index").type("type").id(id) - .source(jsonBuilder().startObject().field("field", fieldValue).endObject()) - .refresh(true)).actionGet(); - } - - private long count(Client client, QueryBuilder queryBuilder) { - return client.prepareSearch("index").setTypes("type") - .setQuery(queryBuilder) - .execute().actionGet().getHits().getTotalHits(); - } - - private void validateCount(Client client, QueryBuilder queryBuilder, long expectedHits) { - final long actualHits = count(client, queryBuilder); - if (actualHits != expectedHits) { - throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits); - } - } -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClientTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClientTest.java deleted file mode 100644 index 77b004f..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClientTest.java +++ /dev/null @@ -1,208 +0,0 @@ -package org.xbib.elasticsearch.extras.client.node; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.junit.Before; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -/** - * - */ -public class BulkNodeClientTest extends NodeTestUtils { - - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeClientTest.class.getSimpleName()); - - private static final Long MAX_ACTIONS = 1000L; - - private static final Long NUM_ACTIONS = 1234L; - - @Before - public void startNodes() { - try { - super.startNodes(); - startNode("2"); - } catch (Throwable t) { - logger.error("startNodes failed", t); - } - } - - @Test - public void testNewIndexNodeClient() throws Exception { - final BulkNodeClient client = Clients.builder() - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - client.newIndex("test"); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - @Test - public void testMappingNodeClient() throws Exception { - final BulkNodeClient client = Clients.builder() - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - XContentBuilder builder = jsonBuilder() - .startObject() - .startObject("test") - .startObject("properties") - .startObject("location") - .field("type", "geo_point") - .endObject() - .endObject() - .endObject() - .endObject(); - client.mapping("test", builder.string()); - client.newIndex("test"); - GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); - GetMappingsResponse getMappingsResponse = - client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); - logger.info("mappings={}", getMappingsResponse.getMappings()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - @Test - public void testSingleDocNodeClient() { - final BulkNodeClient client = Clients.builder() - .put(Clients.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(30)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - try { - client.newIndex("test"); - client.index("test", "test", "1", "{ \"name\" : \"Hello World\"}"); // single doc ingest - client.flushIngest(); - client.waitForResponses("30s"); - } catch (InterruptedException e) { - // ignore - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } catch (ExecutionException e) { - logger.error(e.getMessage(), e); - } finally { - assertEquals(1, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - @Test - public void testRandomDocsNodeClient() throws Exception { - long numactions = NUM_ACTIONS; - final BulkNodeClient client = Clients.builder() - .put(Clients.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - assertEquals(numactions, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - @Test - public void testThreadedRandomDocsNodeClient() throws Exception { - int maxthreads = Runtime.getRuntime().availableProcessors(); - Long maxactions = MAX_ACTIONS; - final Long maxloop = NUM_ACTIONS; - logger.info("NodeClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); - final BulkNodeClient client = Clients.builder() - .put(Clients.MAX_ACTIONS_PER_REQUEST, maxactions) - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - try { - client.newIndex("test") - .startBulk("test", -1, 1000); - ThreadPoolExecutor pool = EsExecutors.newFixed("bulk-nodeclient-test", maxthreads, 30, - EsExecutors.daemonThreadFactory("bulk-nodeclient-test")); - final CountDownLatch latch = new CountDownLatch(maxthreads); - for (int i = 0; i < maxthreads; i++) { - pool.execute(new Runnable() { - public void run() { - for (int i = 0; i < maxloop; i++) { - client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); - } - latch.countDown(); - } - }); - } - logger.info("waiting for max 30 seconds..."); - latch.await(30, TimeUnit.SECONDS); - logger.info("flush..."); - client.flushIngest(); - client.waitForResponses("30s"); - logger.info("got all responses, thread pool shutdown..."); - pool.shutdown(); - logger.info("pool is shut down"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.stopBulk("test"); - assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setQuery(QueryBuilders.matchAllQuery()).setSize(0); - assertEquals(maxthreads * maxloop, - searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); - client.shutdown(); - } - } - -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClusterBlockTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClusterBlockTest.java deleted file mode 100644 index 09c628d..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClusterBlockTest.java +++ /dev/null @@ -1,53 +0,0 @@ -package org.xbib.elasticsearch.extras.client.node; - -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.junit.Before; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; - -/** - * - */ -public class BulkNodeClusterBlockTest extends NodeTestUtils { - - private static final ESLogger logger = ESLoggerFactory.getLogger("test"); - - @Before - public void startNodes() { - try { - setClusterName(); - startNode("1"); - findNodeAddress(); - // do not wait for green health state - logger.info("ready"); - } catch (Throwable t) { - logger.error("startNodes failed", t); - } - } - - protected Settings getNodeSettings() { - return Settings.settingsBuilder() - .put(super.getNodeSettings()) - .put("discovery.zen.minimum_master_nodes", 2) // block until we have two nodes - .build(); - } - - @Test(expected = ClusterBlockException.class) - public void testClusterBlock() throws Exception { - BulkRequestBuilder brb = client("1").prepareBulk(); - XContentBuilder builder = jsonBuilder().startObject().field("field1", "value1").endObject(); - String jsonString = builder.string(); - IndexRequestBuilder irb = client("1").prepareIndex("test", "test", "1").setSource(jsonString); - brb.add(irb); - brb.execute().actionGet(); - } - -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeDuplicateIDTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeDuplicateIDTest.java deleted file mode 100644 index 7c11526..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeDuplicateIDTest.java +++ /dev/null @@ -1,62 +0,0 @@ -package org.xbib.elasticsearch.extras.client.node; - -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; - -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.junit.Assert.*; - -/** - * - */ -public class BulkNodeDuplicateIDTest extends NodeTestUtils { - - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeDuplicateIDTest.class.getSimpleName()); - - private static final Long MAX_ACTIONS = 1000L; - - private static final Long NUM_ACTIONS = 12345L; - - @Test - public void testDuplicateDocIDs() throws Exception { - long numactions = NUM_ACTIONS; - final BulkNodeClient client = Clients.builder() - .put(Clients.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setTypes("test") - .setQuery(matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("hits = {}", hits); - assertTrue(hits < NUM_ACTIONS); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - assertEquals(numactions, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeUpdateReplicaLevelTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeUpdateReplicaLevelTest.java deleted file mode 100644 index 5dc9202..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeUpdateReplicaLevelTest.java +++ /dev/null @@ -1,66 +0,0 @@ -package org.xbib.elasticsearch.extras.client.node; - -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -/** - * - */ -public class BulkNodeUpdateReplicaLevelTest extends NodeTestUtils { - - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeUpdateReplicaLevelTest.class.getSimpleName()); - - @Test - public void testUpdateReplicaLevel() throws Exception { - - int numberOfShards = 2; - int replicaLevel = 3; - - // we need 3 nodes for replica level 3 - startNode("2"); - startNode("3"); - - int shardsAfterReplica; - - Settings settings = Settings.settingsBuilder() - .put("index.number_of_shards", numberOfShards) - .put("index.number_of_replicas", 0) - .build(); - - final BulkNodeClient client = Clients.builder() - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - - try { - client.newIndex("replicatest", settings, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 12345; i++) { - client.index("replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); - assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } - -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClientTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClientTest.java deleted file mode 100644 index c7c82e0..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClientTest.java +++ /dev/null @@ -1,201 +0,0 @@ -package org.xbib.elasticsearch.extras.client.transport; - -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.index.query.QueryBuilders; -import org.junit.Before; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; - -import java.io.IOException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -/** - * - */ -public class BulkTransportClientTest extends NodeTestUtils { - - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportClientTest.class.getSimpleName()); - - private static final Long MAX_ACTIONS = 1000L; - - private static final Long NUM_ACTIONS = 1234L; - - @Before - public void startNodes() { - try { - super.startNodes(); - startNode("2"); - } catch (Throwable t) { - logger.error("startNodes failed", t); - } - } - - @Test - public void testBulkClient() throws IOException { - final BulkTransportClient client = Clients.builder() - .put(getSettings()) - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkTransportClient(); - client.newIndex("test"); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - try { - client.deleteIndex("test") - .newIndex("test") - .deleteIndex("test"); - } catch (NoNodeAvailableException e) { - logger.error("no node available"); - } finally { - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - @Test - public void testSingleDocBulkClient() throws IOException { - final BulkTransportClient client = Clients.builder() - .put(getSettings()) - .put(Clients.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkTransportClient(); - try { - client.newIndex("test"); - client.index("test", "test", "1", "{ \"name\" : \"Hello World\"}"); // single doc ingest - client.flushIngest(); - client.waitForResponses("30s"); - } catch (InterruptedException e) { - // ignore - } catch (ExecutionException e) { - logger.error(e.getMessage(), e); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - assertEquals(1, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - @Test - public void testRandomDocsBulkClient() throws IOException { - long numactions = NUM_ACTIONS; - final BulkTransportClient client = Clients.builder() - .put(getSettings()) - .put(Clients.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkTransportClient(); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - } catch (InterruptedException e) { - // ignore - } catch (ExecutionException e) { - logger.error(e.getMessage(), e); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - assertEquals(numactions, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - @Test - public void testThreadedRandomDocsBulkClient() throws Exception { - int maxthreads = Runtime.getRuntime().availableProcessors(); - long maxactions = MAX_ACTIONS; - final long maxloop = NUM_ACTIONS; - - Settings settingsForIndex = Settings.settingsBuilder() - .put("index.number_of_shards", 2) - .put("index.number_of_replicas", 1) - .build(); - - final BulkTransportClient client = Clients.builder() - .put(getSettings()) - .put(Clients.MAX_ACTIONS_PER_REQUEST, maxactions) - .put(Clients.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) // = disable autoflush for this test - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkTransportClient(); - try { - client.newIndex("test", settingsForIndex, null) - .startBulk("test", -1, 1000); - ThreadPoolExecutor pool = - EsExecutors.newFixed("bulkclient-test", maxthreads, 30, EsExecutors.daemonThreadFactory("bulkclient-test")); - final CountDownLatch latch = new CountDownLatch(maxthreads); - for (int i = 0; i < maxthreads; i++) { - pool.execute(() -> { - for (int i1 = 0; i1 < maxloop; i1++) { - client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); - } - latch.countDown(); - }); - } - logger.info("waiting for max 30 seconds..."); - latch.await(30, TimeUnit.SECONDS); - logger.info("client flush ..."); - client.flushIngest(); - client.waitForResponses("30s"); - logger.info("thread pool to be shut down ..."); - pool.shutdown(); - logger.info("poot shut down"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.stopBulk("test"); - assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - // to avoid NPE at org.elasticsearch.action.search.SearchRequest.writeTo(SearchRequest.java:580) - .setIndices("_all") - .setQuery(QueryBuilders.matchAllQuery()) - .setSize(0); - assertEquals(maxthreads * maxloop, - searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); - client.shutdown(); - } - } - -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportDuplicateIDTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportDuplicateIDTest.java deleted file mode 100644 index c087601..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportDuplicateIDTest.java +++ /dev/null @@ -1,60 +0,0 @@ -package org.xbib.elasticsearch.extras.client.transport; - -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; - -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.junit.Assert.*; - -public class BulkTransportDuplicateIDTest extends NodeTestUtils { - - private final static ESLogger logger = ESLoggerFactory.getLogger(BulkTransportDuplicateIDTest.class.getSimpleName()); - - private final static Long MAX_ACTIONS = 1000L; - - private final static Long NUM_ACTIONS = 12345L; - - @Test - public void testDuplicateDocIDs() throws Exception { - long numactions = NUM_ACTIONS; - final BulkTransportClient client = Clients.builder() - .put(getSettings()) - .put(Clients.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkTransportClient(); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", randomString(1), "{ \"name\" : \"" + randomString(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setTypes("test") - .setQuery(matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("hits = {}", hits); - assertTrue(hits < NUM_ACTIONS); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - assertEquals(numactions, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportUpdateReplicaLevelTest.java b/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportUpdateReplicaLevelTest.java deleted file mode 100644 index 1f56df8..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportUpdateReplicaLevelTest.java +++ /dev/null @@ -1,68 +0,0 @@ -package org.xbib.elasticsearch.extras.client.transport; - -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -/** - * - */ -public class BulkTransportUpdateReplicaLevelTest extends NodeTestUtils { - - private static final ESLogger logger = - ESLoggerFactory.getLogger(BulkTransportUpdateReplicaLevelTest.class.getSimpleName()); - - @Test - public void testUpdateReplicaLevel() throws Exception { - - int numberOfShards = 2; - int replicaLevel = 3; - - // we need 3 nodes for replica level 3 - startNode("2"); - startNode("3"); - - int shardsAfterReplica; - - Settings settings = Settings.settingsBuilder() - .put("index.number_of_shards", numberOfShards) - .put("index.number_of_replicas", 0) - .build(); - - final BulkTransportClient client = Clients.builder() - .put(getSettings()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .toBulkTransportClient(); - - try { - client.newIndex("replicatest", settings, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 12345; i++) { - client.index("replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); - assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } - -} diff --git a/src/integration-test/java/org/xbib/elasticsearch/package-info.java b/src/integration-test/java/org/xbib/elasticsearch/package-info.java deleted file mode 100644 index 2958ce1..0000000 --- a/src/integration-test/java/org/xbib/elasticsearch/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Test classes for testing Elasticsearch. - */ -package org.xbib.elasticsearch; \ No newline at end of file diff --git a/src/integration-test/java/suites/BulkNodeTestSuite.java b/src/integration-test/java/suites/BulkNodeTestSuite.java deleted file mode 100644 index caac820..0000000 --- a/src/integration-test/java/suites/BulkNodeTestSuite.java +++ /dev/null @@ -1,23 +0,0 @@ -package suites; - -import org.junit.runner.RunWith; -import org.junit.runners.Suite; -import org.xbib.elasticsearch.extras.client.node.BulkNodeClientTest; -import org.xbib.elasticsearch.extras.client.node.BulkNodeDuplicateIDTest; -import org.xbib.elasticsearch.extras.client.node.BulkNodeIndexAliasTest; -import org.xbib.elasticsearch.extras.client.node.BulkNodeReplicaTest; -import org.xbib.elasticsearch.extras.client.node.BulkNodeUpdateReplicaLevelTest; - -/** - * - */ -@RunWith(ListenerSuite.class) -@Suite.SuiteClasses({ - BulkNodeClientTest.class, - BulkNodeDuplicateIDTest.class, - BulkNodeReplicaTest.class, - BulkNodeUpdateReplicaLevelTest.class, - BulkNodeIndexAliasTest.class -}) -public class BulkNodeTestSuite { -} diff --git a/src/integration-test/java/suites/BulkTransportTestSuite.java b/src/integration-test/java/suites/BulkTransportTestSuite.java deleted file mode 100644 index f429dfc..0000000 --- a/src/integration-test/java/suites/BulkTransportTestSuite.java +++ /dev/null @@ -1,22 +0,0 @@ -package suites; - -import org.junit.runner.RunWith; -import org.junit.runners.Suite; -import org.xbib.elasticsearch.extras.client.transport.BulkTransportClientTest; -import org.xbib.elasticsearch.extras.client.transport.BulkTransportDuplicateIDTest; -import org.xbib.elasticsearch.extras.client.transport.BulkTransportReplicaTest; -import org.xbib.elasticsearch.extras.client.transport.BulkTransportUpdateReplicaLevelTest; - -/** - * - */ -@RunWith(ListenerSuite.class) -@Suite.SuiteClasses({ - BulkTransportClientTest.class, - BulkTransportDuplicateIDTest.class, - BulkTransportReplicaTest.class, - BulkTransportUpdateReplicaLevelTest.class -}) -public class BulkTransportTestSuite { - -} diff --git a/src/integration-test/java/suites/ListenerSuite.java b/src/integration-test/java/suites/ListenerSuite.java deleted file mode 100644 index c02d371..0000000 --- a/src/integration-test/java/suites/ListenerSuite.java +++ /dev/null @@ -1,23 +0,0 @@ -package suites; - -import org.junit.runner.Runner; -import org.junit.runner.notification.RunNotifier; -import org.junit.runners.Suite; -import org.junit.runners.model.InitializationError; -import org.junit.runners.model.RunnerBuilder; - -public class ListenerSuite extends Suite { - - private final TestListener listener = new TestListener(); - - public ListenerSuite(Class klass, RunnerBuilder builder) throws InitializationError { - super(klass, builder); - } - - @Override - protected void runChild(Runner runner, RunNotifier notifier) { - notifier.addListener(listener); - runner.run(notifier); - notifier.removeListener(listener); - } -} diff --git a/src/integration-test/java/suites/MiscTestSuite.java b/src/integration-test/java/suites/MiscTestSuite.java deleted file mode 100644 index ea23630..0000000 --- a/src/integration-test/java/suites/MiscTestSuite.java +++ /dev/null @@ -1,21 +0,0 @@ -package suites; - -import org.junit.runner.RunWith; -import org.junit.runners.Suite; -import org.xbib.elasticsearch.AliasTest; -import org.xbib.elasticsearch.SearchTest; -import org.xbib.elasticsearch.SimpleTest; -import org.xbib.elasticsearch.WildcardTest; - -/** - * - */ -@RunWith(ListenerSuite.class) -@Suite.SuiteClasses({ - SimpleTest.class, - AliasTest.class, - SearchTest.class, - WildcardTest.class -}) -public class MiscTestSuite { -} diff --git a/src/integration-test/java/suites/TestListener.java b/src/integration-test/java/suites/TestListener.java deleted file mode 100644 index 7e24527..0000000 --- a/src/integration-test/java/suites/TestListener.java +++ /dev/null @@ -1,44 +0,0 @@ -package suites; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.runner.Description; -import org.junit.runner.Result; -import org.junit.runner.notification.Failure; -import org.junit.runner.notification.RunListener; - -/** - * - */ -public class TestListener extends RunListener { - - private static final Logger logger = LogManager.getLogger("test.listener"); - - public void testRunStarted(Description description) throws java.lang.Exception { - logger.info("number of tests to execute: {}", description.testCount()); - } - - public void testRunFinished(Result result) throws java.lang.Exception { - logger.info("number of tests executed: {}", result.getRunCount()); - } - - public void testStarted(Description description) throws java.lang.Exception { - logger.info("starting execution of {} {}", - description.getClassName(), description.getMethodName()); - } - - public void testFinished(Description description) throws java.lang.Exception { - logger.info("finished execution of {} {}", - description.getClassName(), description.getMethodName()); - } - - public void testFailure(Failure failure) throws java.lang.Exception { - logger.info("failed execution of tests: {}", - failure.getMessage()); - } - - public void testIgnored(Description description) throws java.lang.Exception { - logger.info("execution of test ignored: {}", - description.getClassName(), description.getMethodName()); - } -} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/Clients.java b/src/main/java/org/xbib/elasticsearch/extras/client/Clients.java deleted file mode 100644 index daa4981..0000000 --- a/src/main/java/org/xbib/elasticsearch/extras/client/Clients.java +++ /dev/null @@ -1,105 +0,0 @@ -package org.xbib.elasticsearch.extras.client; - -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.xbib.elasticsearch.extras.client.node.BulkNodeClient; -import org.xbib.elasticsearch.extras.client.transport.BulkTransportClient; -import org.xbib.elasticsearch.extras.client.transport.MockTransportClient; - -/** - * - */ -public final class Clients implements Parameters { - - private final Settings.Builder settingsBuilder; - - private BulkMetric metric; - - private BulkControl control; - - public Clients() { - settingsBuilder = Settings.builder(); - } - - public static Clients builder() { - return new Clients(); - } - - public Clients put(String key, String value) { - settingsBuilder.put(key, value); - return this; - } - - public Clients put(String key, Integer value) { - settingsBuilder.put(key, value); - return this; - } - - public Clients put(String key, Long value) { - settingsBuilder.put(key, value); - return this; - } - - public Clients put(String key, Double value) { - settingsBuilder.put(key, value); - return this; - } - - public Clients put(String key, ByteSizeValue value) { - settingsBuilder.put(key, value); - return this; - } - - public Clients put(String key, TimeValue value) { - settingsBuilder.put(key, value); - return this; - } - - public Clients put(Settings settings) { - settingsBuilder.put(settings); - return this; - } - - public Clients setMetric(BulkMetric metric) { - this.metric = metric; - return this; - } - - public Clients setControl(BulkControl control) { - this.control = control; - return this; - } - - public BulkNodeClient toBulkNodeClient(Client client) { - Settings settings = settingsBuilder.build(); - return new BulkNodeClient() - .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) - .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) - .maxVolumePerRequest(settings.get(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) - .flushIngestInterval(settings.get(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) - .init(client, metric, control); - } - - public BulkTransportClient toBulkTransportClient() { - Settings settings = settingsBuilder.build(); - return new BulkTransportClient() - .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) - .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) - .maxVolumePerRequest(settings.get(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) - .flushIngestInterval(settings.get(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) - .init(settings, metric, control); - } - - public MockTransportClient toMockTransportClient() { - Settings settings = settingsBuilder.build(); - return new MockTransportClient() - .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) - .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) - .maxVolumePerRequest(settings.get(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) - .flushIngestInterval(settings.get(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) - .init(settings, metric, control); - } - -} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClient.java deleted file mode 100644 index 0f387b6..0000000 --- a/src/main/java/org/xbib/elasticsearch/extras/client/node/BulkNodeClient.java +++ /dev/null @@ -1,513 +0,0 @@ -package org.xbib.elasticsearch.extras.client.node; - -import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.create.CreateIndexAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkProcessor; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.Plugin; -import org.xbib.elasticsearch.extras.client.AbstractClient; -import org.xbib.elasticsearch.extras.client.BulkControl; -import org.xbib.elasticsearch.extras.client.BulkMetric; -import org.xbib.elasticsearch.extras.client.ClientMethods; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Collection; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -/** - * - */ -public class BulkNodeClient extends AbstractClient implements ClientMethods { - - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkNodeClient.class.getName()); - - private int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST; - - private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS; - - private ByteSizeValue maxVolume; - - private TimeValue flushInterval; - - private Node node; - - private ElasticsearchClient client; - - private BulkProcessor bulkProcessor; - - private BulkMetric metric; - - private BulkControl control; - - private Throwable throwable; - - private boolean closed; - - @Override - public BulkNodeClient maxActionsPerRequest(int maxActionsPerRequest) { - this.maxActionsPerRequest = maxActionsPerRequest; - return this; - } - - @Override - public BulkNodeClient maxConcurrentRequests(int maxConcurrentRequests) { - this.maxConcurrentRequests = maxConcurrentRequests; - return this; - } - - @Override - public BulkNodeClient maxVolumePerRequest(String maxVolume) { - this.maxVolume = ByteSizeValue.parseBytesSizeValue(maxVolume, "maxVolumePerRequest"); - return this; - } - - @Override - public BulkNodeClient flushIngestInterval(String flushInterval) { - this.flushInterval = TimeValue.parseTimeValue(flushInterval, TimeValue.timeValueSeconds(5), "flushIngestInterval"); - return this; - } - - @Override - public BulkNodeClient init(ElasticsearchClient client, - final BulkMetric metric, final BulkControl control) { - this.client = client; - this.metric = metric; - this.control = control; - if (metric != null) { - metric.start(); - } - BulkProcessor.Listener listener = new BulkProcessor.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) { - long l = -1; - if (metric != null) { - metric.getCurrentIngest().inc(); - l = metric.getCurrentIngest().getCount(); - int n = request.numberOfActions(); - metric.getSubmitted().inc(n); - metric.getCurrentIngestNumDocs().inc(n); - metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes()); - } - logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]", - executionId, - request.numberOfActions(), - request.estimatedSizeInBytes(), - l); - } - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - long l = -1; - if (metric != null) { - metric.getCurrentIngest().dec(); - l = metric.getCurrentIngest().getCount(); - metric.getSucceeded().inc(response.getItems().length); - } - int n = 0; - for (BulkItemResponse itemResponse : response.getItems()) { - if (metric != null) { - metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId()); - } - if (itemResponse.isFailed()) { - n++; - if (metric != null) { - metric.getSucceeded().dec(1); - metric.getFailed().inc(1); - } - } - } - if (metric != null) { - logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests", - executionId, - metric.getSucceeded().getCount(), - metric.getFailed().getCount(), - response.getTook().millis(), - l); - } - if (n > 0) { - logger.error("bulk [{}] failed with {} failed items, failure message = {}", - executionId, n, response.buildFailureMessage()); - } else { - if (metric != null) { - metric.getCurrentIngestNumDocs().dec(response.getItems().length); - } - } - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Throwable failure) { - if (metric != null) { - metric.getCurrentIngest().dec(); - } - throwable = failure; - closed = true; - logger.error("after bulk [" + executionId + "] error", failure); - } - }; - BulkProcessor.Builder builder = BulkProcessor.builder((Client) client, listener) - .setBulkActions(maxActionsPerRequest) - .setConcurrentRequests(maxConcurrentRequests) - .setFlushInterval(flushInterval); - if (maxVolume != null) { - builder.setBulkSize(maxVolume); - } - this.bulkProcessor = builder.build(); - this.closed = false; - return this; - } - - @Override - public BulkNodeClient init(Settings settings, BulkMetric metric, BulkControl control) throws IOException { - createClient(settings); - this.metric = metric; - this.control = control; - return this; - } - - @Override - public ElasticsearchClient client() { - return client; - } - - @Override - protected synchronized void createClient(Settings settings) throws IOException { - if (client != null) { - logger.warn("client is open, closing..."); - client.threadPool().shutdown(); - client = null; - node.close(); - } - if (settings != null) { - String version = System.getProperty("os.name") - + " " + System.getProperty("java.vm.name") - + " " + System.getProperty("java.vm.vendor") - + " " + System.getProperty("java.runtime.version") - + " " + System.getProperty("java.vm.version"); - Settings effectiveSettings = Settings.builder().put(settings) - .put("node.client", true) - .put("node.master", false) - .put("node.data", false).build(); - logger.info("creating node client on {} with effective settings {}", - version, effectiveSettings.getAsMap()); - Collection> plugins = Collections.emptyList(); - this.node = new BulkNode(new Environment(effectiveSettings), plugins); - node.start(); - this.client = node.client(); - } - } - - @Override - public BulkMetric getMetric() { - return metric; - } - - @Override - public BulkNodeClient index(String index, String type, String id, String source) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(index, type, id); - } - bulkProcessor.add(new IndexRequest(index).type(type).id(id).create(false).source(source)); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of index request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkNodeClient bulkIndex(IndexRequest indexRequest) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id()); - } - bulkProcessor.add(indexRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of index request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkNodeClient delete(String index, String type, String id) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(index, type, id); - } - bulkProcessor.add(new DeleteRequest(index).type(type).id(id)); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of delete failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkNodeClient bulkDelete(DeleteRequest deleteRequest) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); - } - bulkProcessor.add(deleteRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of delete failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkNodeClient update(String index, String type, String id, String source) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(index, type, id); - } - bulkProcessor.add(new UpdateRequest().index(index).type(type).id(id).upsert(source)); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of update request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkNodeClient bulkUpdate(UpdateRequest updateRequest) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); - } - bulkProcessor.add(updateRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of update request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkNodeClient flushIngest() { - if (closed) { - throwClose(); - } - logger.debug("flushing bulk processor"); - bulkProcessor.flush(); - return this; - } - - @Override - public BulkNodeClient waitForResponses(String maxWaitTime) throws InterruptedException, ExecutionException { - if (closed) { - throwClose(); - } - while (!bulkProcessor.awaitClose(TimeValue.parseTimeValue(maxWaitTime, TimeValue.timeValueSeconds(30), - "maxWaitTime").getMillis(), TimeUnit.MILLISECONDS)) { - logger.warn("still waiting for responses"); - } - return this; - } - - @Override - public BulkNodeClient startBulk(String index, long startRefreshIntervalMillis, long stopRefreshItervalMillis) - throws IOException { - if (control == null) { - return this; - } - if (!control.isBulk(index)) { - control.startBulk(index, startRefreshIntervalMillis, stopRefreshItervalMillis); - updateIndexSetting(index, "refresh_interval", startRefreshIntervalMillis + "ms"); - } - return this; - } - - @Override - public BulkNodeClient stopBulk(String index) throws IOException { - if (control == null) { - return this; - } - if (control.isBulk(index)) { - updateIndexSetting(index, "refresh_interval", control.getStopBulkRefreshIntervals().get(index) + "ms"); - control.finishBulk(index); - } - return this; - } - - @Override - public synchronized void shutdown() { - try { - if (bulkProcessor != null) { - logger.debug("closing bulk processor..."); - bulkProcessor.close(); - } - if (control != null && control.indices() != null && !control.indices().isEmpty()) { - logger.debug("stopping bulk mode for indices {}...", control.indices()); - for (String index : ImmutableSet.copyOf(control.indices())) { - stopBulk(index); - } - metric.stop(); - } - if (node != null) { - logger.debug("closing node..."); - node.close(); - } - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - } - - @Override - public BulkNodeClient newIndex(String index) { - return newIndex(index, null, null); - } - - @Override - public BulkNodeClient newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException { - resetSettings(); - setting(settings); - mapping(type, mappings); - return newIndex(index, settings(), mappings()); - } - - @Override - public BulkNodeClient newIndex(String index, Settings settings, Map mappings) { - if (closed) { - throwClose(); - } - if (client == null) { - logger.warn("no client for create index"); - return this; - } - if (index == null) { - logger.warn("no index name given to create index"); - return this; - } - CreateIndexRequestBuilder createIndexRequestBuilder = - new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index); - if (settings != null) { - logger.info("settings = {}", settings.getAsStructuredMap()); - createIndexRequestBuilder.setSettings(settings); - } - if (mappings != null) { - for (Map.Entry entry : mappings.entrySet()) { - String type = entry.getKey(); - String mapping = entry.getValue(); - logger.info("found mapping for {}", type); - createIndexRequestBuilder.addMapping(type, mapping); - } - } - createIndexRequestBuilder.execute().actionGet(); - logger.info("index {} created", index); - return this; - } - - @Override - public BulkNodeClient newMapping(String index, String type, Map mapping) { - PutMappingRequestBuilder putMappingRequestBuilder = - new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE) - .setIndices(index) - .setType(type) - .setSource(mapping); - putMappingRequestBuilder.execute().actionGet(); - logger.info("mapping created for index {} and type {}", index, type); - return this; - } - - @Override - public BulkNodeClient deleteIndex(String index) { - if (closed) { - throwClose(); - } - if (client == null) { - logger.warn("no client"); - return this; - } - if (index == null) { - logger.warn("no index name given to delete index"); - return this; - } - DeleteIndexRequestBuilder deleteIndexRequestBuilder = - new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index); - deleteIndexRequestBuilder.execute().actionGet(); - return this; - } - - @Override - public boolean hasThrowable() { - return throwable != null; - } - - @Override - public Throwable getThrowable() { - return throwable; - } - - public Settings getSettings() { - return settings(); - } - - @Override - public Settings.Builder getSettingsBuilder() { - return settingsBuilder(); - } - - private static void throwClose() { - throw new ElasticsearchException("client is closed"); - } - - private class BulkNode extends Node { - - BulkNode(Environment env, Collection> classpathPlugins) { - super(env, Version.CURRENT, classpathPlugins); - } - } - -} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/package-info.java b/src/main/java/org/xbib/elasticsearch/extras/client/package-info.java deleted file mode 100644 index c231c60..0000000 --- a/src/main/java/org/xbib/elasticsearch/extras/client/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for Elasticsearch client extras. - */ -package org.xbib.elasticsearch.extras.client; diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClient.java deleted file mode 100644 index ac37781..0000000 --- a/src/main/java/org/xbib/elasticsearch/extras/client/transport/BulkTransportClient.java +++ /dev/null @@ -1,564 +0,0 @@ -package org.xbib.elasticsearch.extras.client.transport; - -import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.create.CreateIndexAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.xbib.elasticsearch.extras.client.AbstractClient; -import org.xbib.elasticsearch.extras.client.BulkControl; -import org.xbib.elasticsearch.extras.client.BulkMetric; -import org.xbib.elasticsearch.extras.client.BulkProcessor; -import org.xbib.elasticsearch.extras.client.ClientMethods; -import org.xbib.elasticsearch.extras.client.NetworkUtils; - -import java.io.IOException; -import java.io.InputStream; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -/** - * Transport client with addtitional methods using the BulkProcessor. - */ -public class BulkTransportClient extends AbstractClient implements ClientMethods { - - private static final ESLogger logger = ESLoggerFactory.getLogger(BulkTransportClient.class.getName()); - - private int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST; - - private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS; - - private ByteSizeValue maxVolumePerRequest; - - private TimeValue flushInterval; - - private BulkProcessor bulkProcessor; - - private Throwable throwable; - - private boolean closed; - - private TransportClient client; - - private BulkMetric metric; - - private BulkControl control; - - private boolean ignoreBulkErrors; - - private boolean isShutdown; - - @Override - public BulkTransportClient init(ElasticsearchClient client, BulkMetric metric, BulkControl control) throws IOException { - return init(findSettings(), metric, control); - } - - @Override - public BulkTransportClient init(Settings settings, final BulkMetric metric, final BulkControl control) { - createClient(settings); - this.metric = metric; - this.control = control; - if (metric != null) { - metric.start(); - } - resetSettings(); - BulkProcessor.Listener listener = new BulkProcessor.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) { - long l = -1L; - if (metric != null) { - metric.getCurrentIngest().inc(); - l = metric.getCurrentIngest().getCount(); - int n = request.numberOfActions(); - metric.getSubmitted().inc(n); - metric.getCurrentIngestNumDocs().inc(n); - metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes()); - } - logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]", - executionId, - request.numberOfActions(), - request.estimatedSizeInBytes(), - l); - } - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - long l = -1L; - if (metric != null) { - metric.getCurrentIngest().dec(); - l = metric.getCurrentIngest().getCount(); - metric.getSucceeded().inc(response.getItems().length); - } - int n = 0; - for (BulkItemResponse itemResponse : response.getItems()) { - if (metric != null) { - metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId()); - if (itemResponse.isFailed()) { - n++; - metric.getSucceeded().dec(1); - metric.getFailed().inc(1); - } - } - } - if (metric != null) { - logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] [concurrent requests={}]", - executionId, - metric.getSucceeded().getCount(), - metric.getFailed().getCount(), - response.getTook().millis(), - l); - } - if (n > 0) { - logger.error("bulk [{}] failed with {} failed items, failure message = {}", - executionId, n, response.buildFailureMessage()); - } else { - if (metric != null) { - metric.getCurrentIngestNumDocs().dec(response.getItems().length); - } - } - } - - @Override - public void afterBulk(long executionId, BulkRequest requst, Throwable failure) { - if (metric != null) { - metric.getCurrentIngest().dec(); - } - throwable = failure; - if (!ignoreBulkErrors) { - closed = true; - } - logger.error("bulk [" + executionId + "] error", failure); - } - }; - BulkProcessor.Builder builder = BulkProcessor.builder(client, listener) - .setBulkActions(maxActionsPerRequest) - .setConcurrentRequests(maxConcurrentRequests) - .setFlushInterval(flushInterval); - if (maxVolumePerRequest != null) { - builder.setBulkSize(maxVolumePerRequest); - } - this.bulkProcessor = builder.build(); - try { - Collection addrs = findAddresses(settings); - if (!connect(addrs, settings.getAsBoolean("autodiscover", false))) { - throw new NoNodeAvailableException("no cluster nodes available, check settings " - + settings.getAsMap()); - } - } catch (IOException e) { - logger.error(e.getMessage(), e); - } - this.closed = false; - return this; - } - - @Override - public ClientMethods newMapping(String index, String type, Map mapping) { - new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE) - .setIndices(index) - .setType(type) - .setSource(mapping) - .execute().actionGet(); - logger.info("mapping created for index {} and type {}", index, type); - return this; - } - - @Override - protected void createClient(Settings settings) { - if (client != null) { - logger.warn("client is open, closing..."); - client.close(); - client.threadPool().shutdown(); - client = null; - } - if (settings != null) { - String version = System.getProperty("os.name") - + " " + System.getProperty("java.vm.name") - + " " + System.getProperty("java.vm.vendor") - + " " + System.getProperty("java.runtime.version") - + " " + System.getProperty("java.vm.version"); - logger.info("creating transport client on {} with effective settings {}", - version, settings.getAsMap()); - this.client = TransportClient.builder() - .settings(settings) - .build(); - this.ignoreBulkErrors = settings.getAsBoolean("ignoreBulkErrors", true); - } - } - - public boolean isShutdown() { - return isShutdown; - } - - @Override - public BulkTransportClient maxActionsPerRequest(int maxActionsPerRequest) { - this.maxActionsPerRequest = maxActionsPerRequest; - return this; - } - - @Override - public BulkTransportClient maxConcurrentRequests(int maxConcurrentRequests) { - this.maxConcurrentRequests = maxConcurrentRequests; - return this; - } - - @Override - public BulkTransportClient maxVolumePerRequest(String maxVolumePerRequest) { - this.maxVolumePerRequest = ByteSizeValue.parseBytesSizeValue(maxVolumePerRequest, "maxVolumePerRequest"); - return this; - } - - @Override - public BulkTransportClient flushIngestInterval(String flushInterval) { - this.flushInterval = TimeValue.parseTimeValue(flushInterval, TimeValue.timeValueSeconds(5), "flushIngestInterval"); - return this; - } - - @Override - public ElasticsearchClient client() { - return client; - } - - @Override - public BulkMetric getMetric() { - return metric; - } - - @Override - public ClientMethods newIndex(String index) { - if (closed) { - throwClose(); - } - return newIndex(index, null, null); - } - - @Override - public ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException { - resetSettings(); - setting(settings); - mapping(type, mappings); - return newIndex(index, settings(), mappings()); - } - - @Override - public ClientMethods newIndex(String index, Settings settings, Map mappings) { - if (closed) { - throwClose(); - } - if (index == null) { - logger.warn("no index name given to create index"); - return this; - } - CreateIndexRequestBuilder createIndexRequestBuilder = - new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index); - if (settings != null) { - logger.info("settings = {}", settings.getAsStructuredMap()); - createIndexRequestBuilder.setSettings(settings); - } - if (mappings != null) { - for (Map.Entry entry : mappings.entrySet()) { - String type = entry.getKey(); - String mapping = entry.getValue(); - logger.info("found mapping for {}", type); - createIndexRequestBuilder.addMapping(type, mapping); - } - } - createIndexRequestBuilder.execute().actionGet(); - logger.info("index {} created", index); - return this; - } - - @Override - public ClientMethods deleteIndex(String index) { - if (closed) { - throwClose(); - } - if (index == null) { - logger.warn("no index name given to delete index"); - return this; - } - new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index).execute().actionGet(); - return this; - } - - @Override - public ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) - throws IOException { - if (control == null) { - return this; - } - if (!control.isBulk(index)) { - control.startBulk(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds); - updateIndexSetting(index, "refresh_interval", startRefreshIntervalSeconds + "s"); - } - return this; - } - - @Override - public ClientMethods stopBulk(String index) throws IOException { - if (control == null) { - return this; - } - if (control.isBulk(index)) { - updateIndexSetting(index, "refresh_interval", control.getStopBulkRefreshIntervals().get(index) + "s"); - control.finishBulk(index); - } - return this; - } - - @Override - public BulkTransportClient index(String index, String type, String id, String source) { - if (closed) { - throwClose(); - } - try { - metric.getCurrentIngest().inc(index, type, id); - bulkProcessor.add(new IndexRequest().index(index).type(type).id(id).create(false).source(source)); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of index request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkTransportClient bulkIndex(IndexRequest indexRequest) { - if (closed) { - throwClose(); - } - try { - metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id()); - bulkProcessor.add(indexRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of index request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkTransportClient delete(String index, String type, String id) { - if (closed) { - throwClose(); - } - try { - metric.getCurrentIngest().inc(index, type, id); - bulkProcessor.add(new DeleteRequest().index(index).type(type).id(id)); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of delete request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkTransportClient bulkDelete(DeleteRequest deleteRequest) { - if (closed) { - throwClose(); - } - try { - metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); - bulkProcessor.add(deleteRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of delete request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkTransportClient update(String index, String type, String id, String source) { - if (closed) { - throwClose(); - } - try { - metric.getCurrentIngest().inc(index, type, id); - bulkProcessor.add(new UpdateRequest().index(index).type(type).id(id).upsert(source)); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of update request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public BulkTransportClient bulkUpdate(UpdateRequest updateRequest) { - if (closed) { - throwClose(); - } - try { - metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); - bulkProcessor.add(updateRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of update request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public synchronized BulkTransportClient flushIngest() { - if (closed) { - throwClose(); - } - logger.debug("flushing bulk processor"); - bulkProcessor.flush(); - return this; - } - - @Override - public synchronized BulkTransportClient waitForResponses(String maxWaitTime) - throws InterruptedException, ExecutionException { - if (closed) { - throwClose(); - } - bulkProcessor.awaitClose(TimeValue.parseTimeValue(maxWaitTime, - TimeValue.timeValueSeconds(30), "maxWaitTime").getMillis(), TimeUnit.MILLISECONDS); - return this; - } - - @Override - public synchronized void shutdown() { - if (closed) { - shutdownClient(); - throwClose(); - } - try { - if (bulkProcessor != null) { - logger.debug("closing bulk processor..."); - bulkProcessor.close(); - } - if (control != null && control.indices() != null && !control.indices().isEmpty()) { - logger.debug("stopping bulk mode for indices {}...", control.indices()); - for (String index : ImmutableSet.copyOf(control.indices())) { - stopBulk(index); - } - metric.stop(); - } - logger.debug("shutting down..."); - shutdownClient(); - logger.debug("shutting down completed"); - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - } - - @Override - public boolean hasThrowable() { - return throwable != null; - } - - @Override - public Throwable getThrowable() { - return throwable; - } - - private Settings findSettings() { - Settings.Builder settingsBuilder = Settings.settingsBuilder(); - settingsBuilder.put("host", "localhost"); - try { - String hostname = NetworkUtils.getLocalAddress().getHostName(); - logger.debug("the hostname is {}", hostname); - settingsBuilder.put("host", hostname) - .put("port", 9300); - } catch (Exception e) { - logger.warn(e.getMessage(), e); - } - return settingsBuilder.build(); - } - - private Collection findAddresses(Settings settings) throws IOException { - String[] hostnames = settings.getAsArray("host", new String[]{"localhost"}); - int port = settings.getAsInt("port", 9300); - Collection addresses = new ArrayList<>(); - for (String hostname : hostnames) { - String[] splitHost = hostname.split(":", 2); - if (splitHost.length == 2) { - String host = splitHost[0]; - InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); - try { - port = Integer.parseInt(splitHost[1]); - } catch (Exception e) { - logger.warn(e.getMessage(), e); - } - addresses.add(new InetSocketTransportAddress(inetAddress, port)); - } - if (splitHost.length == 1) { - String host = splitHost[0]; - InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); - addresses.add(new InetSocketTransportAddress(inetAddress, port)); - } - } - return addresses; - } - - private static void throwClose() { - throw new ElasticsearchException("client is closed"); - } - - private void shutdownClient() { - if (client != null) { - logger.debug("shutdown started"); - client.close(); - client.threadPool().shutdown(); - client = null; - logger.debug("shutdown complete"); - } - isShutdown = true; - } - - private boolean connect(Collection addresses, boolean autodiscover) { - logger.info("trying to connect to {}", addresses); - client.addTransportAddresses(addresses); - if (client.connectedNodes() != null) { - List nodes = client.connectedNodes(); - if (!nodes.isEmpty()) { - logger.info("connected to {}", nodes); - if (autodiscover) { - logger.info("trying to auto-discover all cluster nodes..."); - ClusterStateRequestBuilder clusterStateRequestBuilder = - new ClusterStateRequestBuilder(client, ClusterStateAction.INSTANCE); - ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); - DiscoveryNodes discoveryNodes = clusterStateResponse.getState().getNodes(); - client.addDiscoveryNodes(discoveryNodes); - logger.info("after auto-discovery connected to {}", client.connectedNodes()); - } - return true; - } - return false; - } - return false; - } -} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/MockTransportClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/MockTransportClient.java deleted file mode 100644 index ed0fcc7..0000000 --- a/src/main/java/org/xbib/elasticsearch/extras/client/transport/MockTransportClient.java +++ /dev/null @@ -1,155 +0,0 @@ -package org.xbib.elasticsearch.extras.client.transport; - -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.settings.Settings; -import org.xbib.elasticsearch.extras.client.BulkControl; -import org.xbib.elasticsearch.extras.client.BulkMetric; - -import java.io.IOException; -import java.util.Map; - -/** - * Mock client, it does not perform actions on a cluster. - * Useful for testing or dry runs. - */ -public class MockTransportClient extends BulkTransportClient { - - @Override - public ElasticsearchClient client() { - return null; - } - - @Override - public MockTransportClient init(ElasticsearchClient client, BulkMetric metric, BulkControl control) { - return this; - } - - @Override - public MockTransportClient init(Settings settings, BulkMetric metric, BulkControl control) { - return this; - } - - @Override - public MockTransportClient maxActionsPerRequest(int maxActions) { - return this; - } - - @Override - public MockTransportClient maxConcurrentRequests(int maxConcurrentRequests) { - return this; - } - - @Override - public MockTransportClient maxVolumePerRequest(String maxVolumePerRequest) { - return this; - } - - @Override - public MockTransportClient flushIngestInterval(String interval) { - return this; - } - - @Override - public MockTransportClient index(String index, String type, String id, String source) { - return this; - } - - @Override - public MockTransportClient delete(String index, String type, String id) { - return this; - } - - @Override - public MockTransportClient update(String index, String type, String id, String source) { - return this; - } - - @Override - public MockTransportClient bulkIndex(IndexRequest indexRequest) { - return this; - } - - @Override - public MockTransportClient bulkDelete(DeleteRequest deleteRequest) { - return this; - } - - @Override - public MockTransportClient bulkUpdate(UpdateRequest updateRequest) { - return this; - } - - @Override - public MockTransportClient flushIngest() { - return this; - } - - @Override - public MockTransportClient waitForResponses(String timeValue) throws InterruptedException { - return this; - } - - @Override - public MockTransportClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) { - return this; - } - - @Override - public MockTransportClient stopBulk(String index) { - return this; - } - - @Override - public MockTransportClient deleteIndex(String index) { - return this; - } - - @Override - public MockTransportClient newIndex(String index) { - return this; - } - - @Override - public MockTransportClient newMapping(String index, String type, Map mapping) { - return this; - } - - @Override - public void putMapping(String index) { - // mockup method - } - - @Override - public void refreshIndex(String index) { - // mockup method - } - - @Override - public void flushIndex(String index) { - // mockup method - } - - @Override - public void waitForCluster(String healthColor, String timeValue) throws IOException { - // mockup method - } - - @Override - public int waitForRecovery(String index) throws IOException { - return -1; - } - - @Override - public int updateReplicaLevel(String index, int level) throws IOException { - return -1; - } - - @Override - public void shutdown() { - // mockup method - } - -} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/TransportClient.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/TransportClient.java deleted file mode 100644 index 3912ce7..0000000 --- a/src/main/java/org/xbib/elasticsearch/extras/client/transport/TransportClient.java +++ /dev/null @@ -1,445 +0,0 @@ -package org.xbib.elasticsearch.extras.client.transport; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; - -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.Version; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionModule; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.GenericAction; -import org.elasticsearch.action.TransportActionNodeProxy; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessRequest; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.client.transport.ClientTransportModule; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterNameModule; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.indices.breaker.CircuitBreakerModule; -import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsModule; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.transport.FutureTransportResponseHandler; -import org.elasticsearch.transport.TransportModule; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Stripped-down transport client without node sampling and without retrying. - * - * Merged together: original TransportClient, TransportClientNodesServce, TransportClientProxy - - * Configurable connect ping interval setting added. - */ -public class TransportClient extends AbstractClient { - - private static final String CLIENT_TYPE = "transport"; - - private final Injector injector; - - private final ProxyActionMap proxyActionMap; - - private final long pingTimeout; - - private final ClusterName clusterName; - - private final TransportService transportService; - - private final Version minCompatibilityVersion; - - private final Headers headers; - - private final AtomicInteger tempNodeId = new AtomicInteger(); - - private final AtomicInteger nodeCounter = new AtomicInteger(); - - private final Object mutex = new Object(); - - private volatile List listedNodes = Collections.emptyList(); - - private volatile List nodes = Collections.emptyList(); - - private volatile List filteredNodes = Collections.emptyList(); - - private volatile boolean closed; - - private TransportClient(Injector injector) { - super(injector.getInstance(Settings.class), injector.getInstance(ThreadPool.class), - injector.getInstance(Headers.class)); - this.injector = injector; - this.clusterName = injector.getInstance(ClusterName.class); - this.transportService = injector.getInstance(TransportService.class); - this.minCompatibilityVersion = injector.getInstance(Version.class).minimumCompatibilityVersion(); - this.headers = injector.getInstance(Headers.class); - this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis(); - this.proxyActionMap = injector.getInstance(ProxyActionMap.class); - } - - public static Builder builder() { - return new Builder(); - } - - /** - * Returns the current registered transport addresses to use. - * - * @return list of transport addresess - */ - public List transportAddresses() { - List lstBuilder = new ArrayList<>(); - for (DiscoveryNode listedNode : listedNodes) { - lstBuilder.add(listedNode.address()); - } - return Collections.unmodifiableList(lstBuilder); - } - - /** - * Returns the current connected transport nodes that this client will use. - * The nodes include all the nodes that are currently alive based on the transport - * addresses provided. - * - * @return list of nodes - */ - public List connectedNodes() { - return this.nodes; - } - - /** - * The list of filtered nodes that were not connected to, for example, due to - * mismatch in cluster name. - * - * @return list of nodes - */ - public List filteredNodes() { - return this.filteredNodes; - } - - /** - * Returns the listed nodes in the transport client (ones added to it). - * - * @return list of nodes - */ - public List listedNodes() { - return this.listedNodes; - } - - /** - * Adds a list of transport addresses that will be used to connect to. - * The Node this transport address represents will be used if its possible to connect to it. - * If it is unavailable, it will be automatically connected to once it is up. - * In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}. - * - * @param discoveryNodes nodes - * @return this transport client - */ - public TransportClient addDiscoveryNodes(DiscoveryNodes discoveryNodes) { - Collection addresses = new ArrayList<>(); - for (DiscoveryNode discoveryNode : discoveryNodes) { - addresses.add((InetSocketTransportAddress) discoveryNode.address()); - } - addTransportAddresses(addresses); - return this; - } - - public TransportClient addTransportAddresses(Collection transportAddresses) { - synchronized (mutex) { - if (closed) { - throw new IllegalStateException("transport client is closed, can't add addresses"); - } - List filtered = new ArrayList<>(transportAddresses.size()); - for (TransportAddress transportAddress : transportAddresses) { - boolean found = false; - for (DiscoveryNode otherNode : listedNodes) { - if (otherNode.address().equals(transportAddress)) { - found = true; - logger.debug("address [{}] already exists with [{}], ignoring...", transportAddress, otherNode); - break; - } - } - if (!found) { - filtered.add(transportAddress); - } - } - if (filtered.isEmpty()) { - return this; - } - List discoveryNodeList = new ArrayList<>(); - discoveryNodeList.addAll(listedNodes()); - for (TransportAddress transportAddress : filtered) { - DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeId.incrementAndGet(), transportAddress, - minCompatibilityVersion); - logger.debug("adding address [{}]", node); - discoveryNodeList.add(node); - } - listedNodes = Collections.unmodifiableList(discoveryNodeList); - connect(); - } - return this; - } - - /** - * Removes a transport address from the list of transport addresses that are used to connect to. - * - * @param transportAddress transport address to remove - * @return this transport client - */ - public TransportClient removeTransportAddress(TransportAddress transportAddress) { - synchronized (mutex) { - if (closed) { - throw new IllegalStateException("transport client is closed, can't remove an address"); - } - List builder = new ArrayList<>(); - for (DiscoveryNode otherNode : listedNodes) { - if (!otherNode.address().equals(transportAddress)) { - builder.add(otherNode); - } else { - logger.debug("removing address [{}]", otherNode); - } - } - listedNodes = Collections.unmodifiableList(builder); - } - return this; - } - - @Override - @SuppressWarnings("rawtypes") - public void close() { - synchronized (mutex) { - if (closed) { - return; - } - closed = true; - for (DiscoveryNode node : nodes) { - transportService.disconnectFromNode(node); - } - for (DiscoveryNode listedNode : listedNodes) { - transportService.disconnectFromNode(listedNode); - } - nodes = Collections.emptyList(); - } - injector.getInstance(TransportService.class).close(); - for (Class plugin : injector.getInstance(PluginsService.class).nodeServices()) { - injector.getInstance(plugin).close(); - } - try { - ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS); - } catch (Exception e) { - logger.debug(e.getMessage(), e); - } - injector.getInstance(PageCacheRecycler.class).close(); - } - - private void connect() { - Set newNodes = new HashSet<>(); - Set newFilteredNodes = new HashSet<>(); - for (DiscoveryNode listedNode : listedNodes) { - if (!transportService.nodeConnected(listedNode)) { - try { - logger.trace("connecting to listed node (light) [{}]", listedNode); - transportService.connectToNodeLight(listedNode); - } catch (Exception e) { - logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode); - continue; - } - } - try { - LivenessResponse livenessResponse = transportService.submitRequest(listedNode, - TransportLivenessAction.NAME, headers.applyTo(new LivenessRequest()), - TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE) - .withTimeout(pingTimeout).build(), - new FutureTransportResponseHandler() { - @Override - public LivenessResponse newInstance() { - return new LivenessResponse(); - } - }).txGet(); - if (!clusterName.equals(livenessResponse.getClusterName())) { - logger.warn("node {} not part of the cluster {}, ignoring...", listedNode, clusterName); - newFilteredNodes.add(listedNode); - } else if (livenessResponse.getDiscoveryNode() != null) { - DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode(); - newNodes.add(new DiscoveryNode(nodeWithInfo.name(), nodeWithInfo.id(), nodeWithInfo.getHostName(), - nodeWithInfo.getHostAddress(), listedNode.address(), nodeWithInfo.attributes(), - nodeWithInfo.version())); - } else { - logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", - listedNode); - newNodes.add(listedNode); - } - } catch (Exception e) { - logger.info("failed to get node info for {}, disconnecting...", e, listedNode); - transportService.disconnectFromNode(listedNode); - } - } - for (Iterator it = newNodes.iterator(); it.hasNext(); ) { - DiscoveryNode node = it.next(); - if (!transportService.nodeConnected(node)) { - try { - logger.trace("connecting to node [{}]", node); - transportService.connectToNode(node); - } catch (Exception e) { - it.remove(); - logger.debug("failed to connect to discovered node [" + node + "]", e); - } - } - } - this.nodes = Collections.unmodifiableList(new ArrayList<>(newNodes)); - this.filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); - } - - @Override - @SuppressWarnings({"unchecked", "rawtypes"}) - protected > - void doExecute(Action action, final R request, final ActionListener listener) { - final TransportActionNodeProxy proxyAction = proxyActionMap.getProxies().get(action); - if (proxyAction == null) { - throw new IllegalStateException("undefined action " + action); - } - List nodeList = this.nodes; - if (nodeList.isEmpty()) { - throw new NoNodeAvailableException("none of the configured nodes are available: " + this.listedNodes); - } - int index = nodeCounter.incrementAndGet(); - if (index < 0) { - index = 0; - nodeCounter.set(0); - } - // try once and never more - try { - proxyAction.execute(nodeList.get(index % nodeList.size()), request, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } - - /** - * - */ - public static class Builder { - - private Settings settings = Settings.EMPTY; - private List> pluginClasses = new ArrayList<>(); - - public Builder settings(Settings.Builder settings) { - return settings(settings.build()); - } - - public Builder settings(Settings settings) { - this.settings = settings; - return this; - } - - public Builder addPlugin(Class pluginClass) { - pluginClasses.add(pluginClass); - return this; - } - - public TransportClient build() { - Settings transportClientSettings = settingsBuilder() - .put("transport.ping.schedule", this.settings.get("ping.interval", "30s")) - .put(InternalSettingsPreparer.prepareSettings(this.settings)) - .put("network.server", false) - .put("node.client", true) - .put(CLIENT_TYPE_SETTING, CLIENT_TYPE) - .build(); - PluginsService pluginsService = new PluginsService(transportClientSettings, null, null, pluginClasses); - this.settings = pluginsService.updatedSettings(); - Version version = Version.CURRENT; - final ThreadPool threadPool = new ThreadPool(transportClientSettings); - boolean success = false; - try { - ModulesBuilder modules = new ModulesBuilder(); - modules.add(new Version.Module(version)); - // plugin modules must be added here, before others or we can get crazy injection errors... - for (Module pluginModule : pluginsService.nodeModules()) { - modules.add(pluginModule); - } - modules.add(new PluginsModule(pluginsService)); - modules.add(new SettingsModule(this.settings)); - modules.add(new NetworkModule()); - modules.add(new ClusterNameModule(this.settings)); - modules.add(new ThreadPoolModule(threadPool)); - modules.add(new TransportModule(this.settings)); - modules.add(new SearchModule() { - @Override - protected void configure() { - // noop - } - }); - modules.add(new ActionModule(true)); - modules.add(new ClientTransportModule()); - modules.add(new CircuitBreakerModule(this.settings)); - pluginsService.processModules(modules); - Injector injector = modules.createInjector(); - injector.getInstance(TransportService.class).start(); - TransportClient transportClient = new TransportClient(injector); - success = true; - return transportClient; - } finally { - if (!success) { - ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); - } - } - } - } - - /** - * The {@link ProxyActionMap} must be declared public. - */ - @SuppressWarnings({"unchecked", "rawtypes"}) - public static class ProxyActionMap { - - private final ImmutableMap proxies; - - @Inject - public ProxyActionMap(Settings settings, TransportService transportService, Map actions) { - MapBuilder actionsBuilder = new MapBuilder<>(); - for (GenericAction action : actions.values()) { - if (action instanceof Action) { - actionsBuilder.put((Action) action, new TransportActionNodeProxy(settings, action, transportService)); - } - } - this.proxies = actionsBuilder.immutableMap(); - } - - public ImmutableMap getProxies() { - return proxies; - } - } - -} diff --git a/src/main/java/org/xbib/elasticsearch/extras/client/transport/package-info.java b/src/main/java/org/xbib/elasticsearch/extras/client/transport/package-info.java deleted file mode 100644 index ac6a50d..0000000 --- a/src/main/java/org/xbib/elasticsearch/extras/client/transport/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for Elasticsearch transport client extras. - */ -package org.xbib.elasticsearch.extras.client.transport; diff --git a/transport/build.gradle b/transport/build.gradle new file mode 100644 index 0000000..582d3ec --- /dev/null +++ b/transport/build.gradle @@ -0,0 +1,65 @@ +buildscript { + repositories { + jcenter() + maven { + url 'http://xbib.org/repository' + } + } + dependencies { + classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" + } +} + +apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' + +configurations { + main + tests +} + +dependencies { + compile project(':common') + testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" + testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" +} + +jar { + baseName "${rootProject.name}-transport" +} + +/* +task testJar(type: Jar, dependsOn: testClasses) { + baseName = "${project.archivesBaseName}-tests" + from sourceSets.test.output +} +*/ + +artifacts { + main jar + tests testJar + archives sourcesJar, javadocJar +} + +esTest { + enabled = true + // test with the jars, not the classes, for security manager + //classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files + systemProperty 'tests.security.manager', 'true' + // maybe we like some extra security policy for our code + systemProperty 'tests.security.policy', '/extra-security.policy' +} +esTest.dependsOn jar, testJar + +randomizedTest { + enabled = false +} + +test { + enabled = false + jvmArgs "-javaagent:" + configurations.alpnagent.asPath + systemProperty 'path.home', projectDir.absolutePath + testLogging { + showStandardStreams = true + exceptionFormat = 'full' + } +} diff --git a/transport/config/checkstyle/checkstyle.xml b/transport/config/checkstyle/checkstyle.xml new file mode 100644 index 0000000..52fe33c --- /dev/null +++ b/transport/config/checkstyle/checkstyle.xml @@ -0,0 +1,323 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/transport/src/docs/asciidoc/css/foundation.css b/transport/src/docs/asciidoc/css/foundation.css new file mode 100644 index 0000000..27be611 --- /dev/null +++ b/transport/src/docs/asciidoc/css/foundation.css @@ -0,0 +1,684 @@ +/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ +/* ========================================================================== HTML5 display definitions ========================================================================== */ +/** Correct `block` display not defined in IE 8/9. */ +article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } + +/** Correct `inline-block` display not defined in IE 8/9. */ +audio, canvas, video { display: inline-block; } + +/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ +audio:not([controls]) { display: none; height: 0; } + +/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ +[hidden], template { display: none; } + +script { display: none !important; } + +/* ========================================================================== Base ========================================================================== */ +/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ +html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } + +/** Remove default margin. */ +body { margin: 0; } + +/* ========================================================================== Links ========================================================================== */ +/** Remove the gray background color from active links in IE 10. */ +a { background: transparent; } + +/** Address `outline` inconsistency between Chrome and other browsers. */ +a:focus { outline: thin dotted; } + +/** Improve readability when focused and also mouse hovered in all browsers. */ +a:active, a:hover { outline: 0; } + +/* ========================================================================== Typography ========================================================================== */ +/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ +h1 { font-size: 2em; margin: 0.67em 0; } + +/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ +abbr[title] { border-bottom: 1px dotted; } + +/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ +b, strong { font-weight: bold; } + +/** Address styling not present in Safari 5 and Chrome. */ +dfn { font-style: italic; } + +/** Address differences between Firefox and other browsers. */ +hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } + +/** Address styling not present in IE 8/9. */ +mark { background: #ff0; color: #000; } + +/** Correct font family set oddly in Safari 5 and Chrome. */ +code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } + +/** Improve readability of pre-formatted text in all browsers. */ +pre { white-space: pre-wrap; } + +/** Set consistent quote types. */ +q { quotes: "\201C" "\201D" "\2018" "\2019"; } + +/** Address inconsistent and variable font size in all browsers. */ +small { font-size: 80%; } + +/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ +sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } + +sup { top: -0.5em; } + +sub { bottom: -0.25em; } + +/* ========================================================================== Embedded content ========================================================================== */ +/** Remove border when inside `a` element in IE 8/9. */ +img { border: 0; } + +/** Correct overflow displayed oddly in IE 9. */ +svg:not(:root) { overflow: hidden; } + +/* ========================================================================== Figures ========================================================================== */ +/** Address margin not present in IE 8/9 and Safari 5. */ +figure { margin: 0; } + +/* ========================================================================== Forms ========================================================================== */ +/** Define consistent border, margin, and padding. */ +fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } + +/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ +legend { border: 0; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ +button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } + +/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ +button, input { line-height: normal; } + +/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ +button, select { text-transform: none; } + +/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ +button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } + +/** Re-set default cursor for disabled elements. */ +button[disabled], html input[disabled] { cursor: default; } + +/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ +input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } + +/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ +input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } + +/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ +input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } + +/** Remove inner padding and border in Firefox 4+. */ +button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } + +/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ +textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } + +/* ========================================================================== Tables ========================================================================== */ +/** Remove most spacing between table cells. */ +table { border-collapse: collapse; border-spacing: 0; } + +meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } + +meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } + +meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } + +*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } + +html, body { font-size: 100%; } + +body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } + +a:hover { cursor: pointer; } + +img, object, embed { max-width: 100%; height: auto; } + +object, embed { height: 100%; } + +img { -ms-interpolation-mode: bicubic; } + +#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } + +.left { float: left !important; } + +.right { float: right !important; } + +.text-left { text-align: left !important; } + +.text-right { text-align: right !important; } + +.text-center { text-align: center !important; } + +.text-justify { text-align: justify !important; } + +.hide { display: none; } + +.antialiased { -webkit-font-smoothing: antialiased; } + +img { display: inline-block; vertical-align: middle; } + +textarea { height: auto; min-height: 50px; } + +select { width: 100%; } + +object, svg { display: inline-block; vertical-align: middle; } + +.center { margin-left: auto; margin-right: auto; } + +.spread { width: 100%; } + +p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } + +.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } + +/* Typography resets */ +div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } + +/* Default Link Styles */ +a { color: #2ba6cb; text-decoration: none; line-height: inherit; } +a:hover, a:focus { color: #2795b6; } +a img { border: none; } + +/* Default paragraph styles */ +p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } +p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } + +/* Default header styles */ +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } +h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } + +h1 { font-size: 2.125em; } + +h2 { font-size: 1.6875em; } + +h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } + +h4 { font-size: 1.125em; } + +h5 { font-size: 1.125em; } + +h6 { font-size: 1em; } + +hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } + +/* Helpful Typography Defaults */ +em, i { font-style: italic; line-height: inherit; } + +strong, b { font-weight: bold; line-height: inherit; } + +small { font-size: 60%; line-height: inherit; } + +code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } + +/* Lists */ +ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } + +ul, ol { margin-left: 1.5em; } +ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } + +/* Unordered Lists */ +ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } +ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } +ul.square { list-style-type: square; } +ul.circle { list-style-type: circle; } +ul.disc { list-style-type: disc; } +ul.no-bullet { list-style: none; } + +/* Ordered Lists */ +ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } + +/* Definition Lists */ +dl dt { margin-bottom: 0.3125em; font-weight: bold; } +dl dd { margin-bottom: 1.25em; } + +/* Abbreviations */ +abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } + +abbr { text-transform: none; } + +/* Blockquotes */ +blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } +blockquote cite { display: block; font-size: 0.8125em; color: #555555; } +blockquote cite:before { content: "\2014 \0020"; } +blockquote cite a, blockquote cite a:visited { color: #555555; } + +blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } + +/* Microformats */ +.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } +.vcard li { margin: 0; display: block; } +.vcard .fn { font-weight: bold; font-size: 0.9375em; } + +.vevent .summary { font-weight: bold; } +.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } + +@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + h1 { font-size: 2.75em; } + h2 { font-size: 2.3125em; } + h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } + h4 { font-size: 1.4375em; } } +/* Tables */ +table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } +table thead, table tfoot { background: whitesmoke; font-weight: bold; } +table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } +table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } +table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } +table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } + +body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } + +h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } + +.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } +.clearfix:after, .float-group:after { clear: both; } + +*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } +*:not(pre) > code.nobreak { word-wrap: normal; } +*:not(pre) > code.nowrap { white-space: nowrap; } + +pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } + +em em { font-style: normal; } + +strong strong { font-weight: normal; } + +.keyseq { color: #555555; } + +kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } + +.keyseq kbd:first-child { margin-left: 0; } + +.keyseq kbd:last-child { margin-right: 0; } + +.menuseq, .menu { color: #090909; } + +b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } + +b.button:before { content: "["; padding: 0 3px 0 2px; } + +b.button:after { content: "]"; padding: 0 2px 0 3px; } + +#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } +#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } +#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } + +#content { margin-top: 1.25em; } + +#content:before { content: none; } + +#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } +#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } +#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } +#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } +#header .details span:first-child { margin-left: -0.125em; } +#header .details span.email a { color: #6f6f6f; } +#header .details br { display: none; } +#header .details br + span:before { content: "\00a0\2013\00a0"; } +#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } +#header .details br + span#revremark:before { content: "\00a0|\00a0"; } +#header #revnumber { text-transform: capitalize; } +#header #revnumber:after { content: "\00a0"; } + +#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } + +#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } +#toc > ul { margin-left: 0.125em; } +#toc ul.sectlevel0 > li > a { font-style: italic; } +#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } +#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } +#toc li { line-height: 1.3334; margin-top: 0.3334em; } +#toc a { text-decoration: none; } +#toc a:active { text-decoration: underline; } + +#toctitle { color: #6f6f6f; font-size: 1.2em; } + +@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } + body.toc2 { padding-left: 15em; padding-right: 0; } + #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } + #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } + #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } + #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } + #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } + body.toc2.toc-right { padding-left: 0; padding-right: 15em; } + body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } +@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } + #toc.toc2 { width: 20em; } + #toc.toc2 #toctitle { font-size: 1.375em; } + #toc.toc2 > ul { font-size: 0.95em; } + #toc.toc2 ul ul { padding-left: 1.25em; } + body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } +#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +#content #toc > :first-child { margin-top: 0; } +#content #toc > :last-child { margin-bottom: 0; } + +#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } + +#footer-text { color: #dddddd; line-height: 1.44; } + +.sect1 { padding-bottom: 0.625em; } + +@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } +.sect1 + .sect1 { border-top: 1px solid #dddddd; } + +#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } +#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } +#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } +#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } +#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } + +.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } + +.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } + +table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } + +.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } + +table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } + +.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } +.admonitionblock > table td.icon { text-align: center; width: 80px; } +.admonitionblock > table td.icon img { max-width: initial; } +.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } +.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } +.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } + +.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } +.exampleblock > .content > :first-child { margin-top: 0; } +.exampleblock > .content > :last-child { margin-bottom: 0; } + +.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } +.sidebarblock > :first-child { margin-top: 0; } +.sidebarblock > :last-child { margin-bottom: 0; } +.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } + +.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } + +.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } +.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } + +.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } +.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } +@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } +@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } + +.literalblock.output pre { color: #eeeeee; background-color: black; } + +.listingblock pre.highlightjs { padding: 0; } +.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } + +.listingblock > .content { position: relative; } + +.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } + +.listingblock:hover code[data-lang]:before { display: block; } + +.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } + +.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } + +table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } + +table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } + +table.pyhltable td.code { padding-left: .75em; padding-right: 0; } + +pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } + +pre.pygments .lineno { display: inline-block; margin-right: .25em; } + +table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } + +.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } +.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } +.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } +.quoteblock blockquote { margin: 0; padding: 0; border: 0; } +.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } +.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } +.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } +.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } +.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } +.quoteblock .quoteblock blockquote:before { display: none; } + +.verseblock { margin: 0 1em 1.25em 1em; } +.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } +.verseblock pre strong { font-weight: 400; } +.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } + +.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } +.quoteblock .attribution br, .verseblock .attribution br { display: none; } +.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } + +.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } +.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } +.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } + +table.tableblock { max-width: 100%; border-collapse: separate; } +table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } + +table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } + +table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } + +table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } + +table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } + +table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } + +table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } + +table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } + +table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } + +table.frame-all { border-width: 1px; } + +table.frame-sides { border-width: 0 1px; } + +table.frame-topbot { border-width: 1px 0; } + +th.halign-left, td.halign-left { text-align: left; } + +th.halign-right, td.halign-right { text-align: right; } + +th.halign-center, td.halign-center { text-align: center; } + +th.valign-top, td.valign-top { vertical-align: top; } + +th.valign-bottom, td.valign-bottom { vertical-align: bottom; } + +th.valign-middle, td.valign-middle { vertical-align: middle; } + +table thead th, table tfoot th { font-weight: bold; } + +tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } + +tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } + +p.tableblock > code:only-child { background: none; padding: 0; } + +p.tableblock { font-size: 1em; } + +td > div.verse { white-space: pre; } + +ol { margin-left: 1.75em; } + +ul li ol { margin-left: 1.5em; } + +dl dd { margin-left: 1.125em; } + +dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } + +ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } + +ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } + +ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } + +ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } + +ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } + +ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } +ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } +ul.inline > li > * { display: block; } + +.unstyled dl dt { font-weight: normal; font-style: normal; } + +ol.arabic { list-style-type: decimal; } + +ol.decimal { list-style-type: decimal-leading-zero; } + +ol.loweralpha { list-style-type: lower-alpha; } + +ol.upperalpha { list-style-type: upper-alpha; } + +ol.lowerroman { list-style-type: lower-roman; } + +ol.upperroman { list-style-type: upper-roman; } + +ol.lowergreek { list-style-type: lower-greek; } + +.hdlist > table, .colist > table { border: 0; background: none; } +.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } + +td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } + +td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } + +.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } + +.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } +.colist > table tr > td:first-of-type img { max-width: initial; } +.colist > table tr > td:last-of-type { padding: 0.25em 0; } + +.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } + +.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } +.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } +.imageblock > .title { margin-bottom: 0; } +.imageblock.thumb, .imageblock.th { border-width: 6px; } +.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } + +.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } +.image.left { margin-right: 0.625em; } +.image.right { margin-left: 0.625em; } + +a.image { text-decoration: none; display: inline-block; } +a.image object { pointer-events: none; } + +sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } +sup.footnote a, sup.footnoteref a { text-decoration: none; } +sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } + +#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } +#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } +#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } +#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } +#footnotes .footnote:last-of-type { margin-bottom: 0; } +#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } + +.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } +.gist .file-data > table td.line-data { width: 99%; } + +div.unbreakable { page-break-inside: avoid; } + +.big { font-size: larger; } + +.small { font-size: smaller; } + +.underline { text-decoration: underline; } + +.overline { text-decoration: overline; } + +.line-through { text-decoration: line-through; } + +.aqua { color: #00bfbf; } + +.aqua-background { background-color: #00fafa; } + +.black { color: black; } + +.black-background { background-color: black; } + +.blue { color: #0000bf; } + +.blue-background { background-color: #0000fa; } + +.fuchsia { color: #bf00bf; } + +.fuchsia-background { background-color: #fa00fa; } + +.gray { color: #606060; } + +.gray-background { background-color: #7d7d7d; } + +.green { color: #006000; } + +.green-background { background-color: #007d00; } + +.lime { color: #00bf00; } + +.lime-background { background-color: #00fa00; } + +.maroon { color: #600000; } + +.maroon-background { background-color: #7d0000; } + +.navy { color: #000060; } + +.navy-background { background-color: #00007d; } + +.olive { color: #606000; } + +.olive-background { background-color: #7d7d00; } + +.purple { color: #600060; } + +.purple-background { background-color: #7d007d; } + +.red { color: #bf0000; } + +.red-background { background-color: #fa0000; } + +.silver { color: #909090; } + +.silver-background { background-color: #bcbcbc; } + +.teal { color: #006060; } + +.teal-background { background-color: #007d7d; } + +.white { color: #bfbfbf; } + +.white-background { background-color: #fafafa; } + +.yellow { color: #bfbf00; } + +.yellow-background { background-color: #fafa00; } + +span.icon > .fa { cursor: default; } + +.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } +.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } +.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } +.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } +.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } +.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } + +.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } +.conum[data-value] * { color: #fff !important; } +.conum[data-value] + b { display: none; } +.conum[data-value]:after { content: attr(data-value); } +pre .conum[data-value] { position: relative; top: -0.125em; } + +b.conum * { color: inherit !important; } + +.conum:not([data-value]):empty { display: none; } + +.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/transport/src/docs/asciidoclet/overview.adoc b/transport/src/docs/asciidoclet/overview.adoc new file mode 100644 index 0000000..7947331 --- /dev/null +++ b/transport/src/docs/asciidoclet/overview.adoc @@ -0,0 +1,4 @@ += Elasticsearch Java client +Jörg Prante +Version 5.4.0.0 + diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufBytesReference.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufBytesReference.java new file mode 100644 index 0000000..e0b96d6 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufBytesReference.java @@ -0,0 +1,74 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.buffer.ByteBuf; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; + +final class ByteBufBytesReference extends BytesReference { + + private final ByteBuf buffer; + private final int length; + private final int offset; + + ByteBufBytesReference(ByteBuf buffer, int length) { + this.buffer = buffer; + this.length = length; + this.offset = buffer.readerIndex(); + assert length <= buffer.readableBytes() : "length[" + length +"] > " + buffer.readableBytes(); + } + + @Override + public byte get(int index) { + return buffer.getByte(offset + index); + } + + @Override + public int length() { + return length; + } + + @Override + public BytesReference slice(int from, int length) { + return new ByteBufBytesReference(buffer.slice(offset + from, length), length); + } + + @Override + public StreamInput streamInput() { + return new ByteBufStreamInput(buffer.duplicate(), length); + } + + @Override + public void writeTo(OutputStream os) throws IOException { + buffer.getBytes(offset, os, length); + } + + ByteBuf toByteBuf() { + return buffer.duplicate(); + } + + @Override + public String utf8ToString() { + return buffer.toString(offset, length, StandardCharsets.UTF_8); + } + + @Override + public BytesRef toBytesRef() { + if (buffer.hasArray()) { + return new BytesRef(buffer.array(), buffer.arrayOffset() + offset, length); + } + final byte[] copy = new byte[length]; + buffer.getBytes(offset, copy); + return new BytesRef(copy); + } + + @Override + public long ramBytesUsed() { + return buffer.capacity(); + } + +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufStreamInput.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufStreamInput.java new file mode 100644 index 0000000..1dadaea --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufStreamInput.java @@ -0,0 +1,131 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.buffer.ByteBuf; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.EOFException; +import java.io.IOException; + +/** + * A Netty {@link io.netty.buffer.ByteBuf} based {@link org.elasticsearch.common.io.stream.StreamInput}. + */ +class ByteBufStreamInput extends StreamInput { + + private final ByteBuf buffer; + private final int endIndex; + + ByteBufStreamInput(ByteBuf buffer, int length) { + if (length > buffer.readableBytes()) { + throw new IndexOutOfBoundsException(); + } + this.buffer = buffer; + int startIndex = buffer.readerIndex(); + endIndex = startIndex + length; + buffer.markReaderIndex(); + } + + @Override + public BytesReference readBytesReference(int length) throws IOException { + // NOTE: It is unsafe to share a reference of the internal structure, so we + // use the default implementation which will copy the bytes. It is unsafe because + // a netty ByteBuf might be pooled which requires a manual release to prevent + // memory leaks. + return super.readBytesReference(length); + } + + @Override + public BytesRef readBytesRef(int length) throws IOException { + // NOTE: It is unsafe to share a reference of the internal structure, so we + // use the default implementation which will copy the bytes. It is unsafe because + // a netty ByteBuf might be pooled which requires a manual release to prevent + // memory leaks. + return super.readBytesRef(length); + } + + @Override + public int available() throws IOException { + return endIndex - buffer.readerIndex(); + } + + @Override + protected void ensureCanReadBytes(int length) throws EOFException { + int bytesAvailable = endIndex - buffer.readerIndex(); + if (bytesAvailable < length) { + throw new EOFException("tried to read: " + length + " bytes but only " + bytesAvailable + " remaining"); + } + } + + @Override + public void mark(int readlimit) { + buffer.markReaderIndex(); + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public int read() throws IOException { + if (available() == 0) { + return -1; + } + return buffer.readByte() & 0xff; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (len == 0) { + return 0; + } + int available = available(); + if (available == 0) { + return -1; + } + + len = Math.min(available, len); + buffer.readBytes(b, off, len); + return len; + } + + @Override + public void reset() throws IOException { + buffer.resetReaderIndex(); + } + + @Override + public long skip(long n) throws IOException { + if (n > Integer.MAX_VALUE) { + return skipBytes(Integer.MAX_VALUE); + } else { + return skipBytes((int) n); + } + } + + public int skipBytes(int n) throws IOException { + int nBytes = Math.min(available(), n); + buffer.skipBytes(nBytes); + return nBytes; + } + + + @Override + public byte readByte() throws IOException { + return buffer.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + int read = read(b, offset, len); + if (read < len) { + throw new IndexOutOfBoundsException(); + } + } + + @Override + public void close() throws IOException { + // nothing to do here + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/CompressibleBytesOutputStream.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/CompressibleBytesOutputStream.java new file mode 100644 index 0000000..3318068 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/CompressibleBytesOutputStream.java @@ -0,0 +1,87 @@ +package org.xbib.elasticsearch.client.transport; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStream; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.zip.DeflaterOutputStream; + +/** + * This class exists to provide a stream with optional compression. This is useful as using compression + * requires that the underlying {@link DeflaterOutputStream} be closed to write EOS bytes. However, the + * {@link BytesStream} should not be closed yet, as we have not used the bytes. This class handles these + * intricacies. + * + * {@link CompressibleBytesOutputStream#materializeBytes()} should be called when all the bytes have been + * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. + * The underlying {@link BytesReference} will be returned. + * + * {@link CompressibleBytesOutputStream#close()} should be called when the bytes are no longer needed and + * can be safely released. + */ +final class CompressibleBytesOutputStream extends StreamOutput { + + private final StreamOutput stream; + private final BytesStream bytesStreamOutput; + private final boolean shouldCompress; + + CompressibleBytesOutputStream(BytesStream bytesStreamOutput, boolean shouldCompress) throws IOException { + this.bytesStreamOutput = bytesStreamOutput; + this.shouldCompress = shouldCompress; + if (shouldCompress) { + this.stream = CompressorFactory.COMPRESSOR.streamOutput(Streams.flushOnCloseStream(bytesStreamOutput)); + } else { + this.stream = bytesStreamOutput; + } + } + + /** + * This method ensures that compression is complete and returns the underlying bytes. + * + * @return bytes underlying the stream + * @throws IOException if an exception occurs when writing or flushing + */ + BytesReference materializeBytes() throws IOException { + // If we are using compression the stream needs to be closed to ensure that EOS marker bytes are written. + // The actual ReleasableBytesStreamOutput will not be closed yet as it is wrapped in flushOnCloseStream when + // passed to the deflater stream. + if (shouldCompress) { + stream.close(); + } + + return bytesStreamOutput.bytes(); + } + + @Override + public void writeByte(byte b) throws IOException { + stream.write(b); + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + stream.writeBytes(b, offset, length); + } + + @Override + public void flush() throws IOException { + stream.flush(); + } + + @Override + public void close() throws IOException { + if (stream == bytesStreamOutput) { + IOUtils.close(stream); + } else { + IOUtils.close(stream, bytesStreamOutput); + } + } + + @Override + public void reset() { + throw new UnsupportedOperationException(); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ConnectionProfile.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ConnectionProfile.java new file mode 100644 index 0000000..ed36792 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ConnectionProfile.java @@ -0,0 +1,209 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.transport.TransportRequestOptions; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A connection profile describes how many connection are established to specific node for each of the available request types. + * ({@link org.elasticsearch.transport.TransportRequestOptions.Type}). This allows to tailor a connection towards a specific usage. + */ +public final class ConnectionProfile { + + /** + * Builds a connection profile that is dedicated to a single channel type. Use this + * when opening single use connections + */ + public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, + @Nullable TimeValue connectTimeout, + @Nullable TimeValue handshakeTimeout) { + Builder builder = new Builder(); + builder.addConnections(1, channelType); + final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); + otherTypes.remove(channelType); + builder.addConnections(0, otherTypes.stream().toArray(TransportRequestOptions.Type[]::new)); + if (connectTimeout != null) { + builder.setConnectTimeout(connectTimeout); + } + if (handshakeTimeout != null) { + builder.setHandshakeTimeout(handshakeTimeout); + } + return builder.build(); + } + + private final List handles; + private final int numConnections; + private final TimeValue connectTimeout; + private final TimeValue handshakeTimeout; + + private ConnectionProfile(List handles, int numConnections, TimeValue connectTimeout, TimeValue handshakeTimeout) + { + this.handles = handles; + this.numConnections = numConnections; + this.connectTimeout = connectTimeout; + this.handshakeTimeout = handshakeTimeout; + } + + /** + * A builder to build a new {@link ConnectionProfile} + */ + public static class Builder { + private final List handles = new ArrayList<>(); + private final Set addedTypes = EnumSet.noneOf(TransportRequestOptions.Type.class); + private int offset = 0; + private TimeValue connectTimeout; + private TimeValue handshakeTimeout; + + /** create an empty builder */ + public Builder() { + } + + /** copy constructor, using another profile as a base */ + public Builder(ConnectionProfile source) { + handles.addAll(source.getHandles()); + offset = source.getNumConnections(); + handles.forEach(th -> addedTypes.addAll(th.types)); + connectTimeout = source.getConnectTimeout(); + handshakeTimeout = source.getHandshakeTimeout(); + } + /** + * Sets a connect timeout for this connection profile + */ + public void setConnectTimeout(TimeValue connectTimeout) { + if (connectTimeout.millis() < 0) { + throw new IllegalArgumentException("connectTimeout must be non-negative but was: " + connectTimeout); + } + this.connectTimeout = connectTimeout; + } + + /** + * Sets a handshake timeout for this connection profile + */ + public void setHandshakeTimeout(TimeValue handshakeTimeout) { + if (handshakeTimeout.millis() < 0) { + throw new IllegalArgumentException("handshakeTimeout must be non-negative but was: " + handshakeTimeout); + } + this.handshakeTimeout = handshakeTimeout; + } + + /** + * Adds a number of connections for one or more types. Each type can only be added once. + * @param numConnections the number of connections to use in the pool for the given connection types + * @param types a set of types that should share the given number of connections + */ + public void addConnections(int numConnections, TransportRequestOptions.Type... types) { + if (types == null || types.length == 0) { + throw new IllegalArgumentException("types must not be null"); + } + for (TransportRequestOptions.Type type : types) { + if (addedTypes.contains(type)) { + throw new IllegalArgumentException("type [" + type + "] is already registered"); + } + } + addedTypes.addAll(Arrays.asList(types)); + handles.add(new ConnectionTypeHandle(offset, numConnections, EnumSet.copyOf(Arrays.asList(types)))); + offset += numConnections; + } + + /** + * Creates a new {@link ConnectionProfile} based on the added connections. + * @throws IllegalStateException if any of the {@link org.elasticsearch.transport.TransportRequestOptions.Type} enum is missing + */ + public ConnectionProfile build() { + EnumSet types = EnumSet.allOf(TransportRequestOptions.Type.class); + types.removeAll(addedTypes); + if (types.isEmpty() == false) { + throw new IllegalStateException("not all types are added for this connection profile - missing types: " + types); + } + return new ConnectionProfile(Collections.unmodifiableList(handles), offset, connectTimeout, handshakeTimeout); + } + + } + + /** + * Returns the connect timeout or null if no explicit timeout is set on this profile. + */ + public TimeValue getConnectTimeout() { + return connectTimeout; + } + + /** + * Returns the handshake timeout or null if no explicit timeout is set on this profile. + */ + public TimeValue getHandshakeTimeout() { + return handshakeTimeout; + } + + /** + * Returns the total number of connections for this profile + */ + public int getNumConnections() { + return numConnections; + } + + /** + * Returns the number of connections per type for this profile. This might return a count that is shared with other types such + * that the sum of all connections per type might be higher than {@link #getNumConnections()}. For instance if + * {@link org.elasticsearch.transport.TransportRequestOptions.Type#BULK} shares connections with + * {@link org.elasticsearch.transport.TransportRequestOptions.Type#REG} they will return both the same number of connections from + * this method but the connections are not distinct. + */ + public int getNumConnectionsPerType(TransportRequestOptions.Type type) { + for (ConnectionTypeHandle handle : handles) { + if (handle.getTypes().contains(type)) { + return handle.length; + } + } + throw new AssertionError("no handle found for type: " + type); + } + + /** + * Returns the type handles for this connection profile + */ + List getHandles() { + return Collections.unmodifiableList(handles); + } + + /** + * Connection type handle encapsulates the logic which connection + */ + static final class ConnectionTypeHandle { + public final int length; + public final int offset; + private final Set types; + private final AtomicInteger counter = new AtomicInteger(); + + private ConnectionTypeHandle(int offset, int length, Set types) { + this.length = length; + this.offset = offset; + this.types = types; + } + + /** + * Returns one of the channels out configured for this handle. The channel is selected in a round-robin + * fashion. + */ + T getChannel(List channels) { + if (length == 0) { + throw new IllegalStateException("can't select channel size is 0 for types: " + types); + } + assert channels.size() >= offset + length : "illegal size: " + channels.size() + " expected >= " + (offset + length); + return channels.get(offset + Math.floorMod(counter.incrementAndGet(), length)); + } + + /** + * Returns all types for this handle + */ + Set getTypes() { + return types; + } + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ESLoggingHandler.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ESLoggingHandler.java new file mode 100644 index 0000000..5ed6ef6 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ESLoggingHandler.java @@ -0,0 +1,108 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.logging.LogLevel; +import io.netty.handler.logging.LoggingHandler; +import org.elasticsearch.Version; +import org.elasticsearch.common.compress.Compressor; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.transport.TcpHeader; +import org.elasticsearch.transport.TransportStatus; + +import java.io.IOException; + +final class ESLoggingHandler extends LoggingHandler { + + ESLoggingHandler() { + super(LogLevel.TRACE); + } + + @Override + protected String format(final ChannelHandlerContext ctx, final String eventName, final Object arg) { + if (arg instanceof ByteBuf) { + try { + return format(ctx, eventName, (ByteBuf) arg); + } catch (final Exception e) { + // we really do not want to allow a bug in the formatting handling to escape + logger.trace("an exception occurred formatting a trace message", e); + // we are going to let this be formatted via the default formatting + return super.format(ctx, eventName, arg); + } + } else { + return super.format(ctx, eventName, arg); + } + } + + private static final int MESSAGE_LENGTH_OFFSET = TcpHeader.MARKER_BYTES_SIZE; + private static final int REQUEST_ID_OFFSET = MESSAGE_LENGTH_OFFSET + TcpHeader.MESSAGE_LENGTH_SIZE; + private static final int STATUS_OFFSET = REQUEST_ID_OFFSET + TcpHeader.REQUEST_ID_SIZE; + private static final int VERSION_ID_OFFSET = STATUS_OFFSET + TcpHeader.STATUS_SIZE; + private static final int ACTION_OFFSET = VERSION_ID_OFFSET + TcpHeader.VERSION_ID_SIZE; + + private String format(final ChannelHandlerContext ctx, final String eventName, final ByteBuf arg) throws IOException { + final int readableBytes = arg.readableBytes(); + if (readableBytes == 0) { + return super.format(ctx, eventName, arg); + } else if (readableBytes >= 2) { + final StringBuilder sb = new StringBuilder(); + sb.append(ctx.channel().toString()); + final int offset = arg.readerIndex(); + // this might be an ES message, check the header + if (arg.getByte(offset) == (byte) 'E' && arg.getByte(offset + 1) == (byte) 'S') { + if (readableBytes == TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE) { + final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET); + if (length == TcpTransport.PING_DATA_SIZE) { + sb.append(" [ping]").append(' ').append(eventName).append(": ").append(readableBytes).append('B'); + return sb.toString(); + } + } + else if (readableBytes >= TcpHeader.HEADER_SIZE) { + // we are going to try to decode this as an ES message + final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET); + final long requestId = arg.getLong(offset + REQUEST_ID_OFFSET); + final byte status = arg.getByte(offset + STATUS_OFFSET); + final boolean isRequest = TransportStatus.isRequest(status); + final String type = isRequest ? "request" : "response"; + final String version = Version.fromId(arg.getInt(offset + VERSION_ID_OFFSET)).toString(); + sb.append(" [length: ").append(length); + sb.append(", request id: ").append(requestId); + sb.append(", type: ").append(type); + sb.append(", version: ").append(version); + if (isRequest) { + // it looks like an ES request, try to decode the action + final int remaining = readableBytes - ACTION_OFFSET; + final ByteBuf slice = arg.slice(offset + ACTION_OFFSET, remaining); + // the stream might be compressed + try (StreamInput in = in(status, slice, remaining)) { + // the first bytes in the message is the context headers + try (ThreadContext context = new ThreadContext(Settings.EMPTY)) { + context.readHeaders(in); + } + // now we can decode the action name + sb.append(", action: ").append(in.readString()); + } + } + sb.append(']'); + sb.append(' ').append(eventName).append(": ").append(readableBytes).append('B'); + return sb.toString(); + } + } + } + // we could not decode this as an ES message, use the default formatting + return super.format(ctx, eventName, arg); + } + + private StreamInput in(final Byte status, final ByteBuf slice, final int remaining) throws IOException { + final ByteBufStreamInput in = new ByteBufStreamInput(slice, remaining); + if (TransportStatus.isCompress(status)) { + final Compressor compressor = CompressorFactory.compressor(Netty4Utils.toBytesReference(slice)); + return compressor.streamInput(in); + } else { + return in; + } + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/MockTransportBulkClient.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/MockTransportBulkClient.java new file mode 100644 index 0000000..04321a8 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/MockTransportBulkClient.java @@ -0,0 +1,151 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.xbib.elasticsearch.client.BulkControl; +import org.xbib.elasticsearch.client.BulkMetric; + +import java.io.IOException; +import java.util.Map; + +/** + * Mock client, it does not perform actions on a cluster. + * Useful for testing or dry runs. + */ +public class MockTransportBulkClient extends TransportBulkClient { + + @Override + public ElasticsearchClient client() { + return null; + } + + @Override + public MockTransportBulkClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { + return this; + } + + @Override + public MockTransportBulkClient maxActionsPerRequest(int maxActions) { + return this; + } + + @Override + public MockTransportBulkClient maxConcurrentRequests(int maxConcurrentRequests) { + return this; + } + + @Override + public MockTransportBulkClient maxVolumePerRequest(ByteSizeValue maxVolumePerRequest) { + return this; + } + + @Override + public MockTransportBulkClient flushIngestInterval(TimeValue interval) { + return this; + } + + @Override + public MockTransportBulkClient index(String index, String type, String id, boolean create, String source) { + return this; + } + + @Override + public MockTransportBulkClient delete(String index, String type, String id) { + return this; + } + + @Override + public MockTransportBulkClient update(String index, String type, String id, String source) { + return this; + } + + @Override + public MockTransportBulkClient indexRequest(IndexRequest indexRequest) { + return this; + } + + @Override + public MockTransportBulkClient deleteRequest(DeleteRequest deleteRequest) { + return this; + } + + @Override + public MockTransportBulkClient updateRequest(UpdateRequest updateRequest) { + return this; + } + + @Override + public MockTransportBulkClient flushIngest() { + return this; + } + + @Override + public MockTransportBulkClient waitForResponses(TimeValue timeValue) throws InterruptedException { + return this; + } + + @Override + public MockTransportBulkClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) { + return this; + } + + @Override + public MockTransportBulkClient stopBulk(String index) { + return this; + } + + @Override + public MockTransportBulkClient deleteIndex(String index) { + return this; + } + + @Override + public MockTransportBulkClient newIndex(String index) { + return this; + } + + @Override + public MockTransportBulkClient newMapping(String index, String type, Map mapping) { + return this; + } + + @Override + public void putMapping(String index) { + // mockup method + } + + @Override + public void refreshIndex(String index) { + // mockup method + } + + @Override + public void flushIndex(String index) { + // mockup method + } + + @Override + public void waitForCluster(String healthColor, TimeValue timeValue) throws IOException { + // mockup method + } + + @Override + public int waitForRecovery(String index) throws IOException { + return -1; + } + + @Override + public int updateReplicaLevel(String index, int level) throws IOException { + return -1; + } + + @Override + public void shutdown() { + // mockup method + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4InternalESLogger.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4InternalESLogger.java new file mode 100644 index 0000000..33429bf --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4InternalESLogger.java @@ -0,0 +1,168 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.util.internal.logging.AbstractInternalLogger; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.SuppressLoggerChecks; +import org.elasticsearch.common.logging.Loggers; + +@SuppressLoggerChecks(reason = "safely delegates to logger") +class Netty4InternalESLogger extends AbstractInternalLogger { + + private final Logger logger; + + Netty4InternalESLogger(final String name) { + super(name); + this.logger = Loggers.getLogger(name); + } + + @Override + public boolean isTraceEnabled() { + return logger.isTraceEnabled(); + } + + @Override + public void trace(String msg) { + logger.trace(msg); + } + + @Override + public void trace(String format, Object arg) { + logger.trace(format, arg); + } + + @Override + public void trace(String format, Object argA, Object argB) { + logger.trace(format, argA, argB); + } + + @Override + public void trace(String format, Object... arguments) { + logger.trace(format, arguments); + } + + @Override + public void trace(String msg, Throwable t) { + logger.trace(msg, t); + } + + @Override + public boolean isDebugEnabled() { + return logger.isDebugEnabled(); + } + + @Override + public void debug(String msg) { + logger.debug(msg); + } + + @Override + public void debug(String format, Object arg) { + logger.debug(format, arg); + } + + @Override + public void debug(String format, Object argA, Object argB) { + logger.debug(format, argA, argB); + } + + @Override + public void debug(String format, Object... arguments) { + logger.debug(format, arguments); + } + + @Override + public void debug(String msg, Throwable t) { + logger.debug(msg, t); + } + + @Override + public boolean isInfoEnabled() { + return logger.isInfoEnabled(); + } + + @Override + public void info(String msg) { + logger.info(msg); + } + + @Override + public void info(String format, Object arg) { + logger.info(format, arg); + } + + @Override + public void info(String format, Object argA, Object argB) { + logger.info(format, argA, argB); + } + + @Override + public void info(String format, Object... arguments) { + logger.info(format, arguments); + } + + @Override + public void info(String msg, Throwable t) { + logger.info(msg, t); + } + + @Override + public boolean isWarnEnabled() { + return logger.isWarnEnabled(); + } + + @Override + public void warn(String msg) { + logger.warn(msg); + } + + @Override + public void warn(String format, Object arg) { + logger.warn(format, arg); + } + + @Override + public void warn(String format, Object... arguments) { + logger.warn(format, arguments); + } + + @Override + public void warn(String format, Object argA, Object argB) { + logger.warn(format, argA, argB); + } + + @Override + public void warn(String msg, Throwable t) { + logger.warn(msg, t); + } + + @Override + public boolean isErrorEnabled() { + return logger.isErrorEnabled(); + } + + @Override + public void error(String msg) { + logger.error(msg); + } + + @Override + public void error(String format, Object arg) { + logger.error(format, arg); + } + + @Override + public void error(String format, Object argA, Object argB) { + logger.error(format, argA, argB); + } + + @Override + public void error(String format, Object... arguments) { + logger.error(format, arguments); + } + + @Override + public void error(String msg, Throwable t) { + logger.error(msg, t); + } + +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4MessageChannelHandler.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4MessageChannelHandler.java new file mode 100644 index 0000000..4126944 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4MessageChannelHandler.java @@ -0,0 +1,58 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.util.Attribute; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.transport.TcpHeader; +import org.elasticsearch.transport.Transports; + +import java.net.InetSocketAddress; + +/** + * A handler (must be the last one!) that does size based frame decoding and forwards the actual message + * to the relevant action. + */ +final class Netty4MessageChannelHandler extends ChannelDuplexHandler { + + private final Netty4Transport transport; + private final String profileName; + + Netty4MessageChannelHandler(Netty4Transport transport, String profileName) { + this.transport = transport; + this.profileName = profileName; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + Transports.assertTransportThread(); + if (!(msg instanceof ByteBuf)) { + ctx.fireChannelRead(msg); + return; + } + final ByteBuf buffer = (ByteBuf) msg; + final int remainingMessageSize = buffer.getInt(buffer.readerIndex() - TcpHeader.MESSAGE_LENGTH_SIZE); + final int expectedReaderIndex = buffer.readerIndex() + remainingMessageSize; + try { + Channel channel = ctx.channel(); + InetSocketAddress remoteAddress = (InetSocketAddress) channel.remoteAddress(); + // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh + // buffer, or in the cumulative buffer, which is cleaned each time so it could be bigger than the actual size + BytesReference reference = Netty4Utils.toBytesReference(buffer, remainingMessageSize); + Attribute channelAttribute = channel.attr(Netty4Transport.CHANNEL_KEY); + transport.messageReceived(reference, channelAttribute.get(), profileName, remoteAddress, remainingMessageSize); + } finally { + // Set the expected position of the buffer, no matter what happened + buffer.readerIndex(expectedReaderIndex); + } + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + Netty4Utils.maybeDie(cause); + transport.exceptionCaught(ctx, cause); + } + +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Plugin.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Plugin.java new file mode 100644 index 0000000..cb0b450 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Plugin.java @@ -0,0 +1,77 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.netty4.Netty4HttpServerTransport; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +public class Netty4Plugin extends Plugin implements NetworkPlugin { + + static { + Netty4Utils.setup(); + } + + public static final String NETTY_TRANSPORT_NAME = "netty4"; + public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; + + @Override + public List> getSettings() { + return Arrays.asList( + Netty4HttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT, + Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, + Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN, + Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX, + Netty4Transport.WORKER_COUNT, + Netty4Transport.NETTY_RECEIVE_PREDICTOR_SIZE, + Netty4Transport.NETTY_RECEIVE_PREDICTOR_MIN, + Netty4Transport.NETTY_RECEIVE_PREDICTOR_MAX, + Netty4Transport.NETTY_BOSS_COUNT + ); + } + + @Override + public Settings additionalSettings() { + return Settings.builder() + // here we set the netty4 transport and http transport as the default. This is a set once setting + // ie. if another plugin does that as well the server will fail - only one default network can exist! + .put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), NETTY_HTTP_TRANSPORT_NAME) + .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), NETTY_TRANSPORT_NAME) + .build(); + } + + @Override + public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + return Collections.singletonMap(NETTY_TRANSPORT_NAME, () -> new Netty4Transport(settings, threadPool, networkService, bigArrays, + namedWriteableRegistry, circuitBreakerService)); + } + + @Override + public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher) { + return Collections.singletonMap(NETTY_HTTP_TRANSPORT_NAME, + () -> new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher)); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4SizeHeaderFrameDecoder.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4SizeHeaderFrameDecoder.java new file mode 100644 index 0000000..bf18d0f --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4SizeHeaderFrameDecoder.java @@ -0,0 +1,30 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.TooLongFrameException; +import org.elasticsearch.transport.TcpHeader; + +import java.util.List; + +final class Netty4SizeHeaderFrameDecoder extends ByteToMessageDecoder { + + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { + try { + boolean continueProcessing = TcpTransport.validateMessageHeader(Netty4Utils.toBytesReference(in)); + final ByteBuf message = in.skipBytes(TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); + if (!continueProcessing) return; + out.add(message); + } catch (IllegalArgumentException ex) { + throw new TooLongFrameException(ex); + } catch (IllegalStateException ex) { + /* decode will be called until the ByteBuf is fully consumed; when it is fully + * consumed, transport#validateMessageHeader will throw an IllegalStateException which + * is okay, it means we have finished consuming the ByteBuf and we can get out + */ + } + } + +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Transport.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Transport.java new file mode 100644 index 0000000..6e7df8b --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Transport.java @@ -0,0 +1,339 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.AdaptiveRecvByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.FixedRecvByteBufAllocator; +import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.util.AttributeKey; +import io.netty.util.concurrent.Future; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequestOptions; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.common.settings.Setting.byteSizeSetting; +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + +/** + * There are 4 types of connections per node, low/med/high/ping. Low if for batch oriented APIs (like recovery or + * batch) with high payload that will cause regular request. (like search or single index) to take + * longer. Med is for the typical search / single doc index. And High for things like cluster state. Ping is reserved for + * sending out ping requests to other nodes. + */ +public class Netty4Transport extends TcpTransport { + + static { + Netty4Utils.setup(); + } + + public static final Setting WORKER_COUNT = + new Setting<>("transport.netty.worker_count", + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope); + + public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( + "transport.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = + byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = + byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting NETTY_BOSS_COUNT = + intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope); + + + protected final RecvByteBufAllocator recvByteBufAllocator; + protected final int workerCount; + protected final ByteSizeValue receivePredictorMin; + protected final ByteSizeValue receivePredictorMax; + protected volatile Bootstrap bootstrap; + protected final Map serverBootstraps = newConcurrentMap(); + + public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { + super("netty", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); + Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); + this.workerCount = WORKER_COUNT.get(settings); + + // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one + this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings); + this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings); + if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) { + recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.getBytes()); + } else { + recvByteBufAllocator = new AdaptiveRecvByteBufAllocator((int) receivePredictorMin.getBytes(), + (int) receivePredictorMin.getBytes(), (int) receivePredictorMax.getBytes()); + } + } + + @Override + protected void doStart() { + boolean success = false; + try { + bootstrap = createBootstrap(); + if (NetworkService.NETWORK_SERVER.get(settings)) { + for (ProfileSettings profileSettings : profileSettings) { + createServerBootstrap(profileSettings); + bindServer(profileSettings); + } + } + super.doStart(); + success = true; + } finally { + if (success == false) { + doStop(); + } + } + } + + private Bootstrap createBootstrap() { + final Bootstrap bootstrap = new Bootstrap(); + bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX))); + bootstrap.channel(NioSocketChannel.class); + + bootstrap.handler(getClientChannelInitializer()); + + bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(defaultConnectionProfile.getConnectTimeout().millis())); + bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); + bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings)); + + final ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + bootstrap.option(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + bootstrap.option(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + + final boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings); + bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); + + bootstrap.validate(); + + return bootstrap; + } + + private void createServerBootstrap(ProfileSettings profileSettings) { + String name = profileSettings.profileName; + if (logger.isDebugEnabled()) { + logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " + + "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", + name, workerCount, profileSettings.portOrRange, profileSettings.bindHosts, profileSettings.publishHosts, compress, + defaultConnectionProfile.getConnectTimeout(), + defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY), + defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK), + defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.REG), + defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE), + defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.PING), + receivePredictorMin, receivePredictorMax); + } + + + final ThreadFactory workerFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, name); + + final ServerBootstrap serverBootstrap = new ServerBootstrap(); + + serverBootstrap.group(new NioEventLoopGroup(workerCount, workerFactory)); + serverBootstrap.channel(NioServerSocketChannel.class); + + serverBootstrap.childHandler(getServerChannelInitializer(name)); + + serverBootstrap.childOption(ChannelOption.TCP_NODELAY, profileSettings.tcpNoDelay); + serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, profileSettings.tcpKeepAlive); + + if (profileSettings.sendBufferSize.getBytes() != -1) { + serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(profileSettings.sendBufferSize.getBytes())); + } + + if (profileSettings.receiveBufferSize.getBytes() != -1) { + serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(profileSettings.receiveBufferSize.bytesAsInt())); + } + + serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + + serverBootstrap.option(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); + serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); + serverBootstrap.validate(); + + serverBootstraps.put(name, serverBootstrap); + } + + protected ChannelHandler getServerChannelInitializer(String name) { + return new ServerChannelInitializer(name); + } + + protected ChannelHandler getClientChannelInitializer() { + return new ClientChannelInitializer(); + } + + static final AttributeKey CHANNEL_KEY = AttributeKey.newInstance("es-channel-client"); + + protected final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class); + final Throwable t = unwrapped != null ? unwrapped : cause; + Channel channel = ctx.channel(); + onException(channel.attr(CHANNEL_KEY).get(), t instanceof Exception ? (Exception) t : new ElasticsearchException(t)); + } + + @Override + protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener listener) + throws IOException { + ChannelFuture channelFuture = bootstrap.connect(node.getAddress().address()); + Channel channel = channelFuture.channel(); + if (channel == null) { + Netty4Utils.maybeDie(channelFuture.cause()); + throw new IOException(channelFuture.cause()); + } + addClosedExceptionLogger(channel); + + NettyTcpChannel nettyChannel = new NettyTcpChannel(channel); + channel.attr(CHANNEL_KEY).set(nettyChannel); + + channelFuture.addListener(f -> { + if (f.isSuccess()) { + listener.onResponse(null); + } else { + Throwable cause = f.cause(); + if (cause instanceof Error) { + Netty4Utils.maybeDie(cause); + listener.onFailure(new Exception(cause)); + } else { + listener.onFailure((Exception) cause); + } + } + }); + + return nettyChannel; + } + + @Override + protected NettyTcpChannel bind(String name, InetSocketAddress address) { + Channel channel = serverBootstraps.get(name).bind(address).syncUninterruptibly().channel(); + NettyTcpChannel esChannel = new NettyTcpChannel(channel); + channel.attr(CHANNEL_KEY).set(esChannel); + return esChannel; + } + + ScheduledPing getPing() { + return scheduledPing; + } + + @Override + @SuppressForbidden(reason = "debug") + protected void stopInternal() { + Releasables.close(() -> { + final List>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size()); + for (final Map.Entry entry : serverBootstraps.entrySet()) { + serverBootstrapCloseFutures.add( + Tuple.tuple(entry.getKey(), entry.getValue().config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS))); + } + for (final Tuple> future : serverBootstrapCloseFutures) { + future.v2().awaitUninterruptibly(); + if (!future.v2().isSuccess()) { + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause()); + } + } + serverBootstraps.clear(); + + if (bootstrap != null) { + bootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); + bootstrap = null; + } + }); + } + + protected class ClientChannelInitializer extends ChannelInitializer { + + @Override + protected void initChannel(Channel ch) throws Exception { + ch.pipeline().addLast("logging", new ESLoggingHandler()); + ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); + // using a dot as a prefix means this cannot come from any settings parsed + ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, ".client")); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + Netty4Utils.maybeDie(cause); + super.exceptionCaught(ctx, cause); + } + } + + protected class ServerChannelInitializer extends ChannelInitializer { + + protected final String name; + + protected ServerChannelInitializer(String name) { + this.name = name; + } + + @Override + protected void initChannel(Channel ch) throws Exception { + addClosedExceptionLogger(ch); + NettyTcpChannel nettyTcpChannel = new NettyTcpChannel(ch); + ch.attr(CHANNEL_KEY).set(nettyTcpChannel); + serverAcceptedChannel(nettyTcpChannel); + ch.pipeline().addLast("logging", new ESLoggingHandler()); + ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); + ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name)); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + Netty4Utils.maybeDie(cause); + super.exceptionCaught(ctx, cause); + } + } + + private void addClosedExceptionLogger(Channel channel) { + channel.closeFuture().addListener(f -> { + if (f.isSuccess() == false) { + logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", channel), f.cause()); + } + }); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Utils.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Utils.java new file mode 100644 index 0000000..355f4c9 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Utils.java @@ -0,0 +1,164 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.util.NettyRuntime; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.ESLoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +public class Netty4Utils { + + static { + InternalLoggerFactory.setDefaultFactory(new InternalLoggerFactory() { + + @Override + public InternalLogger newInstance(final String name) { + return new Netty4InternalESLogger(name); + } + + }); + } + + public static void setup() { + + } + + private static AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); + + /** + * Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools). + * + * @param availableProcessors the number of available processors + * @throws IllegalStateException if available processors was set previously and the specified value does not match the already-set value + */ + public static void setAvailableProcessors(final int availableProcessors) { + // we set this to false in tests to avoid tests that randomly set processors from stepping on each other + final boolean set = Booleans.parseBoolean(System.getProperty("es.set.netty.runtime.available.processors", "true")); + if (!set) { + return; + } + + try { + NettyRuntime.setAvailableProcessors(availableProcessors); + } catch (IllegalStateException e) { + // + } + } + + /** + * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. + */ + public static ByteBuf toByteBuf(final BytesReference reference) { + if (reference.length() == 0) { + return Unpooled.EMPTY_BUFFER; + } + if (reference instanceof ByteBufBytesReference) { + return ((ByteBufBytesReference) reference).toByteBuf(); + } else { + final BytesRefIterator iterator = reference.iterator(); + // usually we have one, two, or three components from the header, the message, and a buffer + final List buffers = new ArrayList<>(3); + try { + BytesRef slice; + while ((slice = iterator.next()) != null) { + buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + final CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); + composite.addComponents(true, buffers); + return composite; + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + } + + /** + * Wraps the given ChannelBuffer with a BytesReference + */ + public static BytesReference toBytesReference(final ByteBuf buffer) { + return toBytesReference(buffer, buffer.readableBytes()); + } + + /** + * Wraps the given ChannelBuffer with a BytesReference of a given size + */ + static BytesReference toBytesReference(final ByteBuf buffer, final int size) { + return new ByteBufBytesReference(buffer, size); + } + + public static void closeChannels(final Collection channels) throws IOException { + IOException closingExceptions = null; + final List futures = new ArrayList<>(); + for (final Channel channel : channels) { + try { + if (channel != null && channel.isOpen()) { + futures.add(channel.close()); + } + } catch (Exception e) { + if (closingExceptions == null) { + closingExceptions = new IOException("failed to close channels"); + } + closingExceptions.addSuppressed(e); + } + } + for (final ChannelFuture future : futures) { + future.awaitUninterruptibly(); + } + + if (closingExceptions != null) { + throw closingExceptions; + } + } + + /** + * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be + * caught and bubbles up to the uncaught exception handler. + * + * @param cause the throwable to test + */ + public static void maybeDie(final Throwable cause) { + final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); + final Optional maybeError = ExceptionsHelper.maybeError(cause, logger); + if (maybeError.isPresent()) { + /* + * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many + * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up + * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap + * the exception so as to not lose the original cause during exit. + */ + try { + // try to log the current stack trace + final StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); + final String formatted = Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); + logger.error("fatal error on the network layer\n{}", formatted); + } finally { + new Thread( + () -> { + throw maybeError.get(); + }) + .start(); + } + } + } + +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NettyTcpChannel.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NettyTcpChannel.java new file mode 100644 index 0000000..b0e5f73 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NettyTcpChannel.java @@ -0,0 +1,92 @@ +package org.xbib.elasticsearch.client.transport; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPromise; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TransportException; + +import java.net.InetSocketAddress; +import java.util.concurrent.CompletableFuture; + +public class NettyTcpChannel implements TcpChannel { + + private final Channel channel; + private final CompletableFuture closeContext = new CompletableFuture<>(); + + NettyTcpChannel(Channel channel) { + this.channel = channel; + this.channel.closeFuture().addListener(f -> { + if (f.isSuccess()) { + closeContext.complete(null); + } else { + Throwable cause = f.cause(); + if (cause instanceof Error) { + Netty4Utils.maybeDie(cause); + closeContext.completeExceptionally(cause); + } else { + closeContext.completeExceptionally(cause); + } + } + }); + } + + @Override + public void close() { + channel.close(); + } + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.whenComplete(ActionListener.toBiConsumer(listener)); + } + + @Override + public void setSoLinger(int value) { + channel.config().setOption(ChannelOption.SO_LINGER, value); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) channel.localAddress(); + } + + @Override + public void sendMessage(BytesReference reference, ActionListener listener) { + ChannelPromise writePromise = channel.newPromise(); + writePromise.addListener(f -> { + if (f.isSuccess()) { + listener.onResponse(null); + } else { + final Throwable cause = f.cause(); + Netty4Utils.maybeDie(cause); + assert cause instanceof Exception; + listener.onFailure((Exception) cause); + } + }); + channel.writeAndFlush(Netty4Utils.toByteBuf(reference), writePromise); + + if (channel.eventLoop().isShutdown()) { + listener.onFailure(new TransportException("Cannot send message, event loop is shutting down.")); + } + } + + public Channel getLowLevelChannel() { + return channel; + } + + @Override + public String toString() { + return "NettyTcpChannel{" + + "localAddress=" + getLocalAddress() + + ", remoteAddress=" + channel.remoteAddress() + + '}'; + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkModule.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkModule.java new file mode 100644 index 0000000..820353e --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkModule.java @@ -0,0 +1,237 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.action.support.replication.ReplicationTask; +import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.tasks.RawTaskStatus; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Supplier; + +/** + * A module to handle registering and binding all network related classes. + */ +public final class NetworkModule { + + public static final String TRANSPORT_TYPE_KEY = "transport.type"; + public static final String HTTP_TYPE_KEY = "http.type"; + public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default"; + public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default"; + + public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_DEFAULT_KEY, + Property.NodeScope); + public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); + public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); + public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); + public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope); + + private final Settings settings; + private final boolean transportClient; + + private static final List namedWriteables = new ArrayList<>(); + private static final List namedXContents = new ArrayList<>(); + + static { + registerAllocationCommand(CancelAllocationCommand::new, CancelAllocationCommand::fromXContent, + CancelAllocationCommand.COMMAND_NAME_FIELD); + registerAllocationCommand(MoveAllocationCommand::new, MoveAllocationCommand::fromXContent, + MoveAllocationCommand.COMMAND_NAME_FIELD); + registerAllocationCommand(AllocateReplicaAllocationCommand::new, AllocateReplicaAllocationCommand::fromXContent, + AllocateReplicaAllocationCommand.COMMAND_NAME_FIELD); + registerAllocationCommand(AllocateEmptyPrimaryAllocationCommand::new, AllocateEmptyPrimaryAllocationCommand::fromXContent, + AllocateEmptyPrimaryAllocationCommand.COMMAND_NAME_FIELD); + registerAllocationCommand(AllocateStalePrimaryAllocationCommand::new, AllocateStalePrimaryAllocationCommand::fromXContent, + AllocateStalePrimaryAllocationCommand.COMMAND_NAME_FIELD); + namedWriteables.add( + new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new)); + namedWriteables.add( + new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new)); + } + + private final Map> transportFactories = new HashMap<>(); + private final Map> transportHttpFactories = new HashMap<>(); + private final List transportIntercetors = new ArrayList<>(); + + /** + * Creates a network module that custom networking classes can be plugged into. + * @param settings The settings for the node + * @param transportClient True if only transport classes should be allowed to be registered, false otherwise. + */ + public NetworkModule(Settings settings, boolean transportClient, List plugins, ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { + this.settings = settings; + this.transportClient = transportClient; + for (NetworkPlugin plugin : plugins) { + if (transportClient == false && HTTP_ENABLED.get(settings)) { + Map> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, + circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher); + for (Map.Entry> entry : httpTransportFactory.entrySet()) { + registerHttpTransport(entry.getKey(), entry.getValue()); + } + } + Map> transportFactory = plugin.getTransports(settings, threadPool, bigArrays, pageCacheRecycler, + circuitBreakerService, namedWriteableRegistry, networkService); + for (Map.Entry> entry : transportFactory.entrySet()) { + registerTransport(entry.getKey(), entry.getValue()); + } + List transportInterceptors = plugin.getTransportInterceptors(namedWriteableRegistry, + threadPool.getThreadContext()); + for (TransportInterceptor interceptor : transportInterceptors) { + registerTransportInterceptor(interceptor); + } + } + } + + public boolean isTransportClient() { + return transportClient; + } + + /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ + private void registerTransport(String key, Supplier factory) { + if (transportFactories.putIfAbsent(key, factory) != null) { + throw new IllegalArgumentException("transport for name: " + key + " is already registered"); + } + } + + /** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */ + // TODO: we need another name than "http transport"....so confusing with transportClient... + private void registerHttpTransport(String key, Supplier factory) { + if (transportClient) { + throw new IllegalArgumentException("Cannot register http transport " + key + " for transport client"); + } + if (transportHttpFactories.putIfAbsent(key, factory) != null) { + throw new IllegalArgumentException("transport for name: " + key + " is already registered"); + } + } + + /** + * Register an allocation command. + *

+ * This lives here instead of the more aptly named ClusterModule because the Transport client needs these to be registered. + *

+ * @param reader the reader to read it from a stream + * @param parser the parser to read it from XContent + * @param commandName the names under which the command should be parsed. The {@link ParseField#getPreferredName()} is special because + * it is the name under which the command's reader is registered. + */ + private static void registerAllocationCommand(Writeable.Reader reader, + CheckedFunction parser, ParseField commandName) { + namedXContents.add(new NamedXContentRegistry.Entry(AllocationCommand.class, commandName, parser)); + namedWriteables.add(new NamedWriteableRegistry.Entry(AllocationCommand.class, commandName.getPreferredName(), reader)); + } + + public static List getNamedWriteables() { + return Collections.unmodifiableList(namedWriteables); + } + + public static List getNamedXContents() { + return Collections.unmodifiableList(namedXContents); + } + + public Supplier getHttpServerTransportSupplier() { + final String name; + if (HTTP_TYPE_SETTING.exists(settings)) { + name = HTTP_TYPE_SETTING.get(settings); + } else { + name = HTTP_DEFAULT_TYPE_SETTING.get(settings); + } + final Supplier factory = transportHttpFactories.get(name); + if (factory == null) { + throw new IllegalStateException("Unsupported http.type [" + name + "]"); + } + return factory; + } + + public boolean isHttpEnabled() { + return transportClient == false && HTTP_ENABLED.get(settings); + } + + public Supplier getTransportSupplier() { + final String name; + if (TRANSPORT_TYPE_SETTING.exists(settings)) { + name = TRANSPORT_TYPE_SETTING.get(settings); + } else { + name = TRANSPORT_DEFAULT_TYPE_SETTING.get(settings); + } + final Supplier factory = transportFactories.get(name); + if (factory == null) { + throw new IllegalStateException("Unsupported transport.type [" + name + "] factories = " + transportFactories); + } + return factory; + } + + /** + * Registers a new {@link TransportInterceptor} + */ + private void registerTransportInterceptor(TransportInterceptor interceptor) { + this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); + } + + /** + * Returns a composite {@link TransportInterceptor} containing all registered interceptors + * @see #registerTransportInterceptor(TransportInterceptor) + */ + public TransportInterceptor getTransportInterceptor() { + return new CompositeTransportInterceptor(this.transportIntercetors); + } + + static final class CompositeTransportInterceptor implements TransportInterceptor { + final List transportInterceptors; + + private CompositeTransportInterceptor(List transportInterceptors) { + this.transportInterceptors = new ArrayList<>(transportInterceptors); + } + + @Override + public TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, + TransportRequestHandler actualHandler) { + for (TransportInterceptor interceptor : this.transportInterceptors) { + actualHandler = interceptor.interceptHandler(action, executor, forceExecution, actualHandler); + } + return actualHandler; + } + + @Override + public AsyncSender interceptSender(AsyncSender sender) { + for (TransportInterceptor interceptor : this.transportInterceptors) { + sender = interceptor.interceptSender(sender); + } + return sender; + } + } + +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkPlugin.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkPlugin.java new file mode 100644 index 0000000..f566813 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkPlugin.java @@ -0,0 +1,61 @@ +package org.xbib.elasticsearch.client.transport; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.threadpool.ThreadPool; + +/** + * Plugin for extending network and transport related classes + */ +public interface NetworkPlugin { + + /** + * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing + * transport (inter-node) requests. This must not return null + * + * @param namedWriteableRegistry registry of all named writeables registered + * @param threadContext a {@link ThreadContext} of the current nodes or clients {@link ThreadPool} that can be used to set additional + * headers in the interceptors + */ + default List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext) { + return Collections.emptyList(); + } + + /** + * Returns a map of {@link Transport} suppliers. + * See {@link org.elasticsearch.common.network.NetworkModule#TRANSPORT_TYPE_KEY} to configure a specific implementation. + */ + default Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + return Collections.emptyMap(); + } + + /** + * Returns a map of {@link HttpServerTransport} suppliers. + * See {@link org.elasticsearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. + */ + default Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher) { + return Collections.emptyMap(); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterConnection.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterConnection.java new file mode 100644 index 0000000..2251158 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterConnection.java @@ -0,0 +1,728 @@ +package org.xbib.elasticsearch.client.transport; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponseHandler; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the + * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not + * fully connected with the current node. From a connection perspective a local cluster forms a bi-directional star network while in the + * remote case we only connect to a subset of the nodes in the cluster in an uni-directional fashion. + * + * This class also handles the discovery of nodes from the remote cluster. The initial list of seed nodes is only used to discover all nodes + * in the remote cluster and connects to all eligible nodes, for details see {@link RemoteClusterService#REMOTE_NODE_ATTRIBUTE}. + * + * In the case of a disconnection, this class will issue a re-connect task to establish at most + * {@link RemoteClusterService#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of + * connections per cluster has been reached. + */ +final class RemoteClusterConnection extends AbstractComponent implements TransportConnectionListener, Closeable { + + private final TransportService transportService; + private final ConnectionProfile remoteProfile; + private final ConnectedNodes connectedNodes; + private final String clusterAlias; + private final int maxNumRemoteConnections; + private final Predicate nodePredicate; + private volatile List seedNodes; + private volatile boolean skipUnavailable; + private final ConnectHandler connectHandler; + private SetOnce remoteClusterName = new SetOnce<>(); + + /** + * Creates a new {@link RemoteClusterConnection} + * @param settings the nodes settings object + * @param clusterAlias the configured alias of the cluster to connect to + * @param seedNodes a list of seed nodes to discover eligible nodes from + * @param transportService the local nodes transport service + * @param maxNumRemoteConnections the maximum number of connections to the remote cluster + * @param nodePredicate a predicate to filter eligible remote nodes to connect to + */ + RemoteClusterConnection(Settings settings, String clusterAlias, List seedNodes, + TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { + super(settings); + this.transportService = transportService; + this.maxNumRemoteConnections = maxNumRemoteConnections; + this.nodePredicate = nodePredicate; + this.clusterAlias = clusterAlias; + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.setConnectTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); + builder.addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING); // TODO make this configurable? + builder.addConnections(0, // we don't want this to be used for anything else but search + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.STATE, + TransportRequestOptions.Type.RECOVERY); + remoteProfile = builder.build(); + connectedNodes = new ConnectedNodes(clusterAlias); + this.seedNodes = Collections.unmodifiableList(seedNodes); + this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE + .getConcreteSettingForNamespace(clusterAlias).get(settings); + this.connectHandler = new ConnectHandler(); + transportService.addConnectionListener(this); + } + + /** + * Updates the list of seed nodes for this cluster connection + */ + synchronized void updateSeedNodes(List seedNodes, ActionListener connectListener) { + this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); + connectHandler.connect(connectListener); + } + + /** + * Updates the skipUnavailable flag that can be dynamically set for each remote cluster + */ + void updateSkipUnavailable(boolean skipUnavailable) { + this.skipUnavailable = skipUnavailable; + } + + @Override + public void onNodeDisconnected(DiscoveryNode node) { + boolean remove = connectedNodes.remove(node); + if (remove && connectedNodes.size() < maxNumRemoteConnections) { + // try to reconnect and fill up the slot of the disconnected node + connectHandler.forceConnect(); + } + } + + /** + * Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end. + */ + public void fetchSearchShards(ClusterSearchShardsRequest searchRequest, + ActionListener listener) { + + final ActionListener searchShardsListener; + final Consumer onConnectFailure; + if (skipUnavailable) { + onConnectFailure = (exception) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY); + searchShardsListener = ActionListener.wrap(listener::onResponse, (e) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY)); + } else { + onConnectFailure = listener::onFailure; + searchShardsListener = listener; + } + // in case we have no connected nodes we try to connect and if we fail we either notify the listener or not depending on + // the skip_unavailable setting + ensureConnected(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, searchShardsListener), onConnectFailure)); + } + + /** + * Ensures that this cluster is connected. If the cluster is connected this operation + * will invoke the listener immediately. + */ + public void ensureConnected(ActionListener voidActionListener) { + if (connectedNodes.size() == 0) { + connectHandler.connect(voidActionListener); + } else { + voidActionListener.onResponse(null); + } + } + + private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, + final ActionListener listener) { + final DiscoveryNode node = connectedNodes.get(); + transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, + new TransportResponseHandler() { + + @Override + public ClusterSearchShardsResponse newInstance() { + return new ClusterSearchShardsResponse(); + } + + @Override + public void handleResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { + listener.onResponse(clusterSearchShardsResponse); + } + + @Override + public void handleException(TransportException e) { + listener.onFailure(e); + } + + @Override + public String executor() { + return ThreadPool.Names.SEARCH; + } + }); + } + + /** + * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function + * that returns null if the node ID is not found. + */ + void collectNodes(ActionListener> listener) { + Runnable runnable = () -> { + final ClusterStateRequest request = new ClusterStateRequest(); + request.clear(); + request.nodes(true); + request.local(true); // run this on the node that gets the request it's as good as any other + final DiscoveryNode node = connectedNodes.get(); + transportService.sendRequest(node, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + new TransportResponseHandler() { + @Override + public ClusterStateResponse newInstance() { + return new ClusterStateResponse(); + } + + @Override + public void handleResponse(ClusterStateResponse response) { + DiscoveryNodes nodes = response.getState().nodes(); + listener.onResponse(nodes::get); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + }; + try { + // just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener + // this will cause some back pressure on the search end and eventually will cause rejections but that's fine + // we can't proceed with a search on a cluster level. + // in the future we might want to just skip the remote nodes in such a case but that can already be implemented on the + // caller end since they provide the listener. + ensureConnected(ActionListener.wrap((x) -> runnable.run(), listener::onFailure)); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + + /** + * Returns a connection to the remote cluster. This connection might be a proxy connection that redirects internally to the + * given node. + */ + Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { + DiscoveryNode discoveryNode = connectedNodes.get(); + Transport.Connection connection = transportService.getConnection(discoveryNode); + return new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return remoteClusterNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + connection.sendRequest(requestId, TransportActionProxy.getProxyAction(action), + TransportActionProxy.wrapRequest(remoteClusterNode, request), options); + } + + @Override + public void close() throws IOException { + assert false: "proxy connections must not be closed"; + } + + @Override + public Version getVersion() { + return connection.getVersion(); + } + }; + } + + Transport.Connection getConnection() { + DiscoveryNode discoveryNode = connectedNodes.get(); + return transportService.getConnection(discoveryNode); + } + + @Override + public void close() throws IOException { + connectHandler.close(); + } + + public boolean isClosed() { + return connectHandler.isClosed(); + } + + /** + * The connect handler manages node discovery and the actual connect to the remote cluster. + * There is at most one connect job running at any time. If such a connect job is triggered + * while another job is running the provided listeners are queued and batched up until the current running job returns. + * + * The handler has a built-in queue that can hold up to 100 connect attempts and will reject requests once the queue is full. + * In a scenario when a remote cluster becomes unavailable we will queue requests up but if we can't connect quick enough + * we will just reject the connect trigger which will lead to failing searches. + */ + private class ConnectHandler implements Closeable { + private final Semaphore running = new Semaphore(1); + private final AtomicBoolean closed = new AtomicBoolean(false); + private final BlockingQueue> queue = new ArrayBlockingQueue<>(100); + private final CancellableThreads cancellableThreads = new CancellableThreads(); + + /** + * Triggers a connect round iff there are pending requests queued up and if there is no + * connect round currently running. + */ + void maybeConnect() { + connect(null); + } + + /** + * Triggers a connect round unless there is one running already. If there is a connect round running, the listener will either + * be queued or rejected and failed. + */ + void connect(ActionListener connectListener) { + connect(connectListener, false); + } + + /** + * Triggers a connect round unless there is one already running. In contrast to {@link #maybeConnect()} will this method also + * trigger a connect round if there is no listener queued up. + */ + void forceConnect() { + connect(null, true); + } + + private void connect(ActionListener connectListener, boolean forceRun) { + final boolean runConnect; + final Collection> toNotify; + synchronized (queue) { + if (connectListener != null && queue.offer(connectListener) == false) { + connectListener.onFailure(new RejectedExecutionException("connect queue is full")); + return; + } + if (forceRun == false && queue.isEmpty()) { + return; + } + runConnect = running.tryAcquire(); + if (runConnect) { + toNotify = new ArrayList<>(); + queue.drainTo(toNotify); + if (closed.get()) { + running.release(); + ActionListener.onFailure(toNotify, new AlreadyClosedException("connect handler is already closed")); + return; + } + } else { + toNotify = Collections.emptyList(); + } + } + if (runConnect) { + forkConnect(toNotify); + } + } + + private void forkConnect(final Collection> toNotify) { + ThreadPool threadPool = transportService.getThreadPool(); + ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); + executor.submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + synchronized (queue) { + running.release(); + } + try { + ActionListener.onFailure(toNotify, e); + } finally { + maybeConnect(); + } + } + + @Override + protected void doRun() throws Exception { + ActionListener listener = ActionListener.wrap((x) -> { + synchronized (queue) { + running.release(); + } + try { + ActionListener.onResponse(toNotify, x); + } finally { + maybeConnect(); + } + + }, (e) -> { + synchronized (queue) { + running.release(); + } + try { + ActionListener.onFailure(toNotify, e); + } finally { + maybeConnect(); + } + }); + collectRemoteNodes(seedNodes.iterator(), transportService, listener); + } + }); + + } + + void collectRemoteNodes(Iterator seedNodes, + final TransportService transportService, ActionListener listener) { + if (Thread.currentThread().isInterrupted()) { + listener.onFailure(new InterruptedException("remote connect thread got interrupted")); + } + try { + if (seedNodes.hasNext()) { + cancellableThreads.executeIO(() -> { + final DiscoveryNode seedNode = seedNodes.next(); + final DiscoveryNode handshakeNode; + Transport.Connection connection = transportService.openConnection(seedNode, + ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); + boolean success = false; + try { + try { + handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), + (c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get())); + } catch (IllegalStateException ex) { + logger.warn((Supplier) () -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + + "cluster name {}", connection.getNode(), remoteClusterName.get()), ex); + throw ex; + } + if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { + transportService.connectToNode(handshakeNode, remoteProfile); + connectedNodes.add(handshakeNode); + } + ClusterStateRequest request = new ClusterStateRequest(); + request.clear(); + request.nodes(true); + // here we pass on the connection since we can only close it once the sendRequest returns otherwise + // due to the async nature (it will return before it's actually sent) this can cause the request to fail + // due to an already closed connection. + ThreadPool threadPool = transportService.getThreadPool(); + ThreadContext threadContext = threadPool.getThreadContext(); + TransportService.ContextRestoreResponseHandler responseHandler = new TransportService + .ContextRestoreResponseHandler<>(threadContext.newRestorableContext(false), + new SniffClusterStateResponseHandler(transportService, connection, listener, seedNodes, + cancellableThreads)); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any + // existing context information. + threadContext.markAsSystemContext(); + transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + responseHandler); + } + success = true; + } finally { + if (success == false) { + connection.close(); + } + } + }); + } else { + listener.onFailure(new IllegalStateException("no seed node left")); + } + } catch (CancellableThreads.ExecutionCancelledException ex) { + listener.onFailure(ex); // we got canceled - fail the listener and step out + } catch (ConnectTransportException | IOException | IllegalStateException ex) { + // ISE if we fail the handshake with an version incompatible node + if (seedNodes.hasNext()) { + logger.debug((Supplier) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", + clusterAlias), ex); + collectRemoteNodes(seedNodes, transportService, listener); + } else { + listener.onFailure(ex); + } + } + } + + @Override + public void close() throws IOException { + try { + if (closed.compareAndSet(false, true)) { + cancellableThreads.cancel("connect handler is closed"); + running.acquire(); // acquire the semaphore to ensure all connections are closed and all thread joined + running.release(); + maybeConnect(); // now go and notify pending listeners + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + final boolean isClosed() { + return closed.get(); + } + + /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ + private class SniffClusterStateResponseHandler implements TransportResponseHandler { + + private final TransportService transportService; + private final Transport.Connection connection; + private final ActionListener listener; + private final Iterator seedNodes; + private final CancellableThreads cancellableThreads; + + SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, + ActionListener listener, Iterator seedNodes, + CancellableThreads cancellableThreads) { + this.transportService = transportService; + this.connection = connection; + this.listener = listener; + this.seedNodes = seedNodes; + this.cancellableThreads = cancellableThreads; + } + + @Override + public ClusterStateResponse newInstance() { + return new ClusterStateResponse(); + } + + @Override + public void handleResponse(ClusterStateResponse response) { + assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; + try { + if (remoteClusterName.get() == null) { + assert response.getClusterName().value() != null; + remoteClusterName.set(response.getClusterName()); + } + try (Closeable theConnection = connection) { // the connection is unused - see comment in #collectRemoteNodes + // we have to close this connection before we notify listeners - this is mainly needed for test correctness + // since if we do it afterwards we might fail assertions that check if all high level connections are closed. + // from a code correctness perspective we could also close it afterwards. This try/with block will + // maintain the possibly exceptions thrown from within the try block and suppress the ones that are possible thrown + // by closing the connection + cancellableThreads.executeIO(() -> { + DiscoveryNodes nodes = response.getState().nodes(); + Iterable nodesIter = nodes.getNodes()::valuesIt; + for (DiscoveryNode node : nodesIter) { + if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { + try { + transportService.connectToNode(node, remoteProfile); // noop if node is connected + connectedNodes.add(node); + } catch (ConnectTransportException | IllegalStateException ex) { + // ISE if we fail the handshake with an version incompatible node + // fair enough we can't connect just move on + logger.debug((Supplier) + () -> new ParameterizedMessage("failed to connect to node {}", node), ex); + } + } + } + }); + } + listener.onResponse(null); + } catch (CancellableThreads.ExecutionCancelledException ex) { + listener.onFailure(ex); // we got canceled - fail the listener and step out + } catch (Exception ex) { + logger.warn((Supplier) + () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", + clusterAlias), ex); + collectRemoteNodes(seedNodes, transportService, listener); + } + } + + @Override + public void handleException(TransportException exp) { + assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; + logger.warn((Supplier) + () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), + exp); + try { + IOUtils.closeWhileHandlingException(connection); + } finally { + // once the connection is closed lets try the next node + collectRemoteNodes(seedNodes, transportService, listener); + } + } + + @Override + public String executor() { + return ThreadPool.Names.MANAGEMENT; + } + } + } + + boolean assertNoRunningConnections() { // for testing only + assert connectHandler.running.availablePermits() == 1; + return true; + } + + boolean isNodeConnected(final DiscoveryNode node) { + return connectedNodes.contains(node); + } + + DiscoveryNode getConnectedNode() { + return connectedNodes.get(); + } + + void addConnectedNode(DiscoveryNode node) { + connectedNodes.add(node); + } + + /** + * Fetches connection info for this connection + */ + public void getConnectionInfo(ActionListener listener) { + final Optional anyNode = connectedNodes.getAny(); + if (anyNode.isPresent() == false) { + // not connected we return immediately + RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias, + Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0, + RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); + listener.onResponse(remoteConnectionStats); + } else { + NodesInfoRequest request = new NodesInfoRequest(); + request.clear(); + request.http(true); + + transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler() { + @Override + public NodesInfoResponse newInstance() { + return new NodesInfoResponse(); + } + + @Override + public void handleResponse(NodesInfoResponse response) { + Collection httpAddresses = new HashSet<>(); + for (NodeInfo info : response.getNodes()) { + if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) { + httpAddresses.add(info.getHttp().getAddress().publishAddress()); + } + } + + if (httpAddresses.size() < maxNumRemoteConnections) { + // just in case non of the connected nodes have http enabled we get other http enabled nodes instead. + for (NodeInfo info : response.getNodes()) { + if (nodePredicate.test(info.getNode()) && info.getHttp() != null) { + httpAddresses.add(info.getHttp().getAddress().publishAddress()); + } + if (httpAddresses.size() == maxNumRemoteConnections) { + break; // once we have enough return... + } + } + } + RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias, + seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()), new ArrayList<>(httpAddresses), + maxNumRemoteConnections, connectedNodes.size(), + RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); + listener.onResponse(remoteConnectionInfo); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + } + + } + + int getNumNodesConnected() { + return connectedNodes.size(); + } + + private static class ConnectedNodes implements Supplier { + + private final Set nodeSet = new HashSet<>(); + private final String clusterAlias; + + private Iterator currentIterator = null; + + private ConnectedNodes(String clusterAlias) { + this.clusterAlias = clusterAlias; + } + + @Override + public synchronized DiscoveryNode get() { + ensureIteratorAvailable(); + if (currentIterator.hasNext()) { + return currentIterator.next(); + } else { + throw new IllegalStateException("No node available for cluster: " + clusterAlias); + } + } + + synchronized boolean remove(DiscoveryNode node) { + final boolean setRemoval = nodeSet.remove(node); + if (setRemoval) { + currentIterator = null; + } + return setRemoval; + } + + synchronized boolean add(DiscoveryNode node) { + final boolean added = nodeSet.add(node); + if (added) { + currentIterator = null; + } + return added; + } + + synchronized int size() { + return nodeSet.size(); + } + + synchronized boolean contains(DiscoveryNode node) { + return nodeSet.contains(node); + } + + synchronized Optional getAny() { + ensureIteratorAvailable(); + if (currentIterator.hasNext()) { + return Optional.of(currentIterator.next()); + } else { + return Optional.empty(); + } + } + + private synchronized void ensureIteratorAvailable() { + if (currentIterator == null) { + currentIterator = nodeSet.iterator(); + } else if (currentIterator.hasNext() == false && nodeSet.isEmpty() == false) { + // iterator rollover + currentIterator = nodeSet.iterator(); + } + } + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterService.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterService.java new file mode 100644 index 0000000..aaa0c41 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterService.java @@ -0,0 +1,385 @@ +package org.xbib.elasticsearch.client.transport; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.TransportException; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.settings.Setting.boolSetting; + +/** + * Basic service for accessing remote clusters via gateway nodes + */ +public final class RemoteClusterService extends RemoteClusterAware implements Closeable { + + /** + * The maximum number of connections that will be established to a remote cluster. For instance if there is only a single + * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. + */ + public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting("search.remote.connections_per_cluster", + 3, 1, Setting.Property.NodeScope); + + /** + * The initial connect timeout for remote cluster connections + */ + public static final Setting REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = + Setting.positiveTimeSetting("search.remote.initial_connect_timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); + + /** + * The name of a node attribute to select nodes that should be connected to in the remote cluster. + * For instance a node can be configured with node.attr.gateway: true in order to be eligible as a gateway node between + * clusters. In that case search.remote.node.attr: gateway can be used to filter out other nodes in the remote cluster. + * The value of the setting is expected to be a boolean, true for nodes that can become gateways, false otherwise. + */ + public static final Setting REMOTE_NODE_ATTRIBUTE = Setting.simpleString("search.remote.node.attr", + Setting.Property.NodeScope); + + /** + * If true connecting to remote clusters is supported on this node. If false this node will not establish + * connections to any remote clusters configured. Search requests executed against this node (where this node is the coordinating node) + * will fail if remote cluster syntax is used as an index pattern. The default is true + */ + public static final Setting ENABLE_REMOTE_CLUSTERS = Setting.boolSetting("search.remote.connect", true, + Setting.Property.NodeScope); + + public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = + Setting.affixKeySetting("search.remote.", "skip_unavailable", + key -> boolSetting(key, false, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS); + + private final TransportService transportService; + private final int numRemoteConnections; + private volatile Map remoteClusters = Collections.emptyMap(); + + RemoteClusterService(Settings settings, TransportService transportService) { + super(settings); + this.transportService = transportService; + numRemoteConnections = REMOTE_CONNECTIONS_PER_CLUSTER.get(settings); + } + + /** + * This method updates the list of remote clusters. It's intended to be used as an update consumer on the settings infrastructure + * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes + * @param connectionListener a listener invoked once every configured cluster has been connected to + */ + private synchronized void updateRemoteClusters(Map> seeds, ActionListener connectionListener) { + if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) { + throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); + } + Map remoteClusters = new HashMap<>(); + if (seeds.isEmpty()) { + connectionListener.onResponse(null); + } else { + CountDown countDown = new CountDown(seeds.size()); + Predicate nodePredicate = (node) -> Version.CURRENT.isCompatible(node.getVersion()); + if (REMOTE_NODE_ATTRIBUTE.exists(settings)) { + // nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for + // cross cluster search + String attribute = REMOTE_NODE_ATTRIBUTE.get(settings); + nodePredicate = nodePredicate.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false"))); + } + remoteClusters.putAll(this.remoteClusters); + for (Map.Entry> entry : seeds.entrySet()) { + RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); + if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection + try { + IOUtils.close(remote); + } catch (IOException e) { + logger.warn("failed to close remote cluster connections for cluster: " + entry.getKey(), e); + } + remoteClusters.remove(entry.getKey()); + continue; + } + + if (remote == null) { // this is a new cluster we have to add a new representation + remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections, + nodePredicate); + remoteClusters.put(entry.getKey(), remote); + } + + // now update the seed nodes no matter if it's new or already existing + RemoteClusterConnection finalRemote = remote; + remote.updateSeedNodes(entry.getValue(), ActionListener.wrap( + response -> { + if (countDown.countDown()) { + connectionListener.onResponse(response); + } + }, + exception -> { + if (countDown.fastForward()) { + connectionListener.onFailure(exception); + } + if (finalRemote.isClosed() == false) { + logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception); + } + })); + } + } + this.remoteClusters = Collections.unmodifiableMap(remoteClusters); + } + + /** + * Returns true if at least one remote cluster is configured + */ + public boolean isCrossClusterSearchEnabled() { + return remoteClusters.isEmpty() == false; + } + + boolean isRemoteNodeConnected(final String remoteCluster, final DiscoveryNode node) { + return remoteClusters.get(remoteCluster).isNodeConnected(node); + } + + public Map groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate indexExists) { + Map originalIndicesMap = new HashMap<>(); + if (isCrossClusterSearchEnabled()) { + final Map> groupedIndices = groupClusterIndices(indices, indexExists); + for (Map.Entry> entry : groupedIndices.entrySet()) { + String clusterAlias = entry.getKey(); + List originalIndices = entry.getValue(); + originalIndicesMap.put(clusterAlias, + new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions)); + } + if (originalIndicesMap.containsKey(LOCAL_CLUSTER_GROUP_KEY) == false) { + originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions)); + } + } else { + originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(indices, indicesOptions)); + } + return originalIndicesMap; + } + + /** + * Returns true iff the given cluster is configured as a remote cluster. Otherwise false + */ + boolean isRemoteClusterRegistered(String clusterName) { + return remoteClusters.containsKey(clusterName); + } + + public void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, + Map remoteIndicesByCluster, + ActionListener> listener) { + final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); + final Map searchShardsResponses = new ConcurrentHashMap<>(); + final AtomicReference transportException = new AtomicReference<>(); + for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { + final String clusterName = entry.getKey(); + RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterName); + if (remoteClusterConnection == null) { + throw new IllegalArgumentException("no such remote cluster: " + clusterName); + } + final String[] indices = entry.getValue().indices(); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) + .indicesOptions(indicesOptions).local(true).preference(preference) + .routing(routing); + remoteClusterConnection.fetchSearchShards(searchShardsRequest, + new ActionListener() { + @Override + public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { + searchShardsResponses.put(clusterName, clusterSearchShardsResponse); + if (responsesCountDown.countDown()) { + TransportException exception = transportException.get(); + if (exception == null) { + listener.onResponse(searchShardsResponses); + } else { + listener.onFailure(transportException.get()); + } + } + } + + @Override + public void onFailure(Exception e) { + TransportException exception = new TransportException("unable to communicate with remote cluster [" + + clusterName + "]", e); + if (transportException.compareAndSet(null, exception) == false) { + exception = transportException.accumulateAndGet(exception, (previous, current) -> { + current.addSuppressed(previous); + return current; + }); + } + if (responsesCountDown.countDown()) { + listener.onFailure(exception); + } + } + }); + } + } + + /** + * Returns a connection to the given node on the given remote cluster + * @throws IllegalArgumentException if the remote cluster is unknown + */ + public Transport.Connection getConnection(DiscoveryNode node, String cluster) { + RemoteClusterConnection connection = remoteClusters.get(cluster); + if (connection == null) { + throw new IllegalArgumentException("no such remote cluster: " + cluster); + } + return connection.getConnection(node); + } + + /** + * Ensures that the given cluster alias is connected. If the cluster is connected this operation + * will invoke the listener immediately. + */ + public void ensureConnected(String clusterAlias, ActionListener listener) { + RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterAlias); + if (remoteClusterConnection == null) { + throw new IllegalArgumentException("no such remote cluster: " + clusterAlias); + } + remoteClusterConnection.ensureConnected(listener); + } + + public Transport.Connection getConnection(String cluster) { + RemoteClusterConnection connection = remoteClusters.get(cluster); + if (connection == null) { + throw new IllegalArgumentException("no such remote cluster: " + cluster); + } + return connection.getConnection(); + } + + @Override + protected Set getRemoteClusterNames() { + return this.remoteClusters.keySet(); + } + + @Override + public void listenForUpdates(ClusterSettings clusterSettings) { + super.listenForUpdates(clusterSettings); + clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, + (clusterAlias, value) -> {}); + } + + synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { + RemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); + if (remote != null) { + remote.updateSkipUnavailable(skipUnavailable); + } + } + + protected void updateRemoteCluster(String clusterAlias, List addresses) { + updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {})); + } + + void updateRemoteCluster( + final String clusterAlias, + final List addresses, + final ActionListener connectionListener) { + final List nodes = addresses.stream().map(address -> { + final TransportAddress transportAddress = new TransportAddress(address); + final String id = clusterAlias + "#" + transportAddress.toString(); + final Version version = Version.CURRENT.minimumCompatibilityVersion(); + return new DiscoveryNode(id, transportAddress, version); + }).collect(Collectors.toList()); + updateRemoteClusters(Collections.singletonMap(clusterAlias, nodes), connectionListener); + } + + /** + * Connects to all remote clusters in a blocking fashion. This should be called on node startup to establish an initial connection + * to all configured seed nodes. + */ + void initializeRemoteClusters() { + final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); + final PlainActionFuture future = new PlainActionFuture<>(); + Map> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); + updateRemoteClusters(seeds, future); + try { + future.get(timeValue.millis(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (TimeoutException ex) { + logger.warn("failed to connect to remote clusters within {}", timeValue.toString()); + } catch (Exception e) { + throw new IllegalStateException("failed to connect to remote clusters", e); + } + } + + @Override + public void close() throws IOException { + IOUtils.close(remoteClusters.values()); + } + + public void getRemoteConnectionInfos(ActionListener> listener) { + final Map remoteClusters = this.remoteClusters; + if (remoteClusters.isEmpty()) { + listener.onResponse(Collections.emptyList()); + } else { + final GroupedActionListener actionListener = new GroupedActionListener<>(listener, + remoteClusters.size(), Collections.emptyList()); + for (RemoteClusterConnection connection : remoteClusters.values()) { + connection.getConnectionInfo(actionListener); + } + } + } + + /** + * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link DiscoveryNode} + * function on success. + */ + public void collectNodes(Set clusters, ActionListener> listener) { + Map remoteClusters = this.remoteClusters; + for (String cluster : clusters) { + if (remoteClusters.containsKey(cluster) == false) { + listener.onFailure(new IllegalArgumentException("no such remote cluster: [" + cluster + "]")); + return; + } + } + + final Map> clusterMap = new HashMap<>(); + CountDown countDown = new CountDown(clusters.size()); + Function nullFunction = s -> null; + for (final String cluster : clusters) { + RemoteClusterConnection connection = remoteClusters.get(cluster); + connection.collectNodes(new ActionListener>() { + @Override + public void onResponse(Function nodeLookup) { + synchronized (clusterMap) { + clusterMap.put(cluster, nodeLookup); + } + if (countDown.countDown()) { + listener.onResponse((clusterAlias, nodeId) + -> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId)); + } + } + + @Override + public void onFailure(Exception e) { + if (countDown.fastForward()) { // we need to check if it's true since we could have multiple failures + listener.onFailure(e); + } + } + }); + } + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteConnectionInfo.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteConnectionInfo.java new file mode 100644 index 0000000..35c9759 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteConnectionInfo.java @@ -0,0 +1,112 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * This class encapsulates all remote cluster information to be rendered on + * _remote/info requests. + */ +public final class RemoteConnectionInfo implements ToXContentFragment, Writeable { + final List seedNodes; + final List httpAddresses; + final int connectionsPerCluster; + final TimeValue initialConnectionTimeout; + final int numNodesConnected; + final String clusterAlias; + final boolean skipUnavailable; + + RemoteConnectionInfo(String clusterAlias, List seedNodes, + List httpAddresses, + int connectionsPerCluster, int numNodesConnected, + TimeValue initialConnectionTimeout, boolean skipUnavailable) { + this.clusterAlias = clusterAlias; + this.seedNodes = seedNodes; + this.httpAddresses = httpAddresses; + this.connectionsPerCluster = connectionsPerCluster; + this.numNodesConnected = numNodesConnected; + this.initialConnectionTimeout = initialConnectionTimeout; + this.skipUnavailable = skipUnavailable; + } + + public RemoteConnectionInfo(StreamInput input) throws IOException { + seedNodes = input.readList(TransportAddress::new); + httpAddresses = input.readList(TransportAddress::new); + connectionsPerCluster = input.readVInt(); + initialConnectionTimeout = input.readTimeValue(); + numNodesConnected = input.readVInt(); + clusterAlias = input.readString(); + if (input.getVersion().onOrAfter(Version.V_6_1_0)) { + skipUnavailable = input.readBoolean(); + } else { + skipUnavailable = false; + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(clusterAlias); + { + builder.startArray("seeds"); + for (TransportAddress addr : seedNodes) { + builder.value(addr.toString()); + } + builder.endArray(); + builder.startArray("http_addresses"); + for (TransportAddress addr : httpAddresses) { + builder.value(addr.toString()); + } + builder.endArray(); + builder.field("connected", numNodesConnected > 0); + builder.field("num_nodes_connected", numNodesConnected); + builder.field("max_connections_per_cluster", connectionsPerCluster); + builder.field("initial_connect_timeout", initialConnectionTimeout); + builder.field("skip_unavailable", skipUnavailable); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(seedNodes); + out.writeList(httpAddresses); + out.writeVInt(connectionsPerCluster); + out.writeTimeValue(initialConnectionTimeout); + out.writeVInt(numNodesConnected); + out.writeString(clusterAlias); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeBoolean(skipUnavailable); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RemoteConnectionInfo that = (RemoteConnectionInfo) o; + return connectionsPerCluster == that.connectionsPerCluster && + numNodesConnected == that.numNodesConnected && + Objects.equals(seedNodes, that.seedNodes) && + Objects.equals(httpAddresses, that.httpAddresses) && + Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) && + Objects.equals(clusterAlias, that.clusterAlias) && + skipUnavailable == that.skipUnavailable; + } + + @Override + public int hashCode() { + return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout, + numNodesConnected, clusterAlias, skipUnavailable); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransport.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransport.java new file mode 100644 index 0000000..0a237e7 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransport.java @@ -0,0 +1,1808 @@ + +package org.xbib.elasticsearch.client.transport; + +import com.carrotsearch.hppc.IntHashSet; +import com.carrotsearch.hppc.IntSet; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.NotifyOnceListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.compress.Compressor; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.compress.NotCompressedException; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.network.NetworkUtils; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.PortsRange; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ActionNotFoundTransportException; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.transport.BytesTransportRequest; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NodeNotConnectedException; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.RequestHandlerRegistry; +import org.elasticsearch.transport.ResponseHandlerFailureTransportException; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TcpHeader; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportResponseOptions; +import org.elasticsearch.transport.TransportSerializationException; +import org.elasticsearch.transport.TransportStats; +import org.elasticsearch.transport.Transports; + +import java.io.Closeable; +import java.io.IOException; +import java.io.StreamCorruptedException; +import java.io.UncheckedIOException; +import java.net.BindException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.nio.channels.CancelledKeyException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.settings.Setting.affixKeySetting; +import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.common.settings.Setting.listSetting; +import static org.elasticsearch.common.settings.Setting.timeSetting; +import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; +import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; + +public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { + + public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker"; + public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; + + public static final Setting> HOST = + listSetting("transport.host", emptyList(), Function.identity(), Setting.Property.NodeScope); + public static final Setting> BIND_HOST = + listSetting("transport.bind_host", HOST, Function.identity(), Setting.Property.NodeScope); + public static final Setting> PUBLISH_HOST = + listSetting("transport.publish_host", HOST, Function.identity(), Setting.Property.NodeScope); + public static final Setting PORT = + new Setting<>("transport.tcp.port", "9300-9400", Function.identity(), Setting.Property.NodeScope); + public static final Setting PUBLISH_PORT = + intSetting("transport.publish_port", -1, -1, Setting.Property.NodeScope); + public static final String DEFAULT_PROFILE = "default"; + // the scheduled internal ping interval setting, defaults to disabled (-1) + public static final Setting PING_SCHEDULE = + timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_RECOVERY = + intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_BULK = + intSetting("transport.connections_per_node.bulk", 3, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_REG = + intSetting("transport.connections_per_node.reg", 6, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_STATE = + intSetting("transport.connections_per_node.state", 1, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_PING = + intSetting("transport.connections_per_node.ping", 1, 1, Setting.Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + timeSetting("transport.tcp.connect_timeout", NetworkService.TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope); + public static final Setting TCP_NO_DELAY = + boolSetting("transport.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope); + public static final Setting TCP_KEEP_ALIVE = + boolSetting("transport.tcp.keep_alive", NetworkService.TCP_KEEP_ALIVE, Setting.Property.NodeScope); + public static final Setting TCP_REUSE_ADDRESS = + boolSetting("transport.tcp.reuse_address", NetworkService.TCP_REUSE_ADDRESS, Setting.Property.NodeScope); + public static final Setting TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TCP_SEND_BUFFER_SIZE, + Setting.Property.NodeScope); + public static final Setting TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TCP_RECEIVE_BUFFER_SIZE, + Setting.Property.NodeScope); + + + public static final Setting.AffixSetting TCP_NO_DELAY_PROFILE = affixKeySetting("transport.profiles.", "tcp_no_delay", + key -> boolSetting(key, TcpTransport.TCP_NO_DELAY, Setting.Property.NodeScope)); + public static final Setting.AffixSetting TCP_KEEP_ALIVE_PROFILE = affixKeySetting("transport.profiles.", "tcp_keep_alive", + key -> boolSetting(key, TcpTransport.TCP_KEEP_ALIVE, Setting.Property.NodeScope)); + public static final Setting.AffixSetting TCP_REUSE_ADDRESS_PROFILE = affixKeySetting("transport.profiles.", "reuse_address", + key -> boolSetting(key, TcpTransport.TCP_REUSE_ADDRESS, Setting.Property.NodeScope)); + public static final Setting.AffixSetting TCP_SEND_BUFFER_SIZE_PROFILE = affixKeySetting("transport.profiles.", + "send_buffer_size", key -> Setting.byteSizeSetting(key, TcpTransport.TCP_SEND_BUFFER_SIZE, Setting.Property.NodeScope)); + public static final Setting.AffixSetting TCP_RECEIVE_BUFFER_SIZE_PROFILE = affixKeySetting("transport.profiles.", + "receive_buffer_size", key -> Setting.byteSizeSetting(key, TcpTransport.TCP_RECEIVE_BUFFER_SIZE, Setting.Property.NodeScope)); + + public static final Setting.AffixSetting> BIND_HOST_PROFILE = affixKeySetting("transport.profiles.", "bind_host", + key -> listSetting(key, BIND_HOST, Function.identity(), Setting.Property.NodeScope)); + public static final Setting.AffixSetting> PUBLISH_HOST_PROFILE = affixKeySetting("transport.profiles.", "publish_host", + key -> listSetting(key, PUBLISH_HOST, Function.identity(), Setting.Property.NodeScope)); + public static final Setting.AffixSetting PORT_PROFILE = affixKeySetting("transport.profiles.", "port", + key -> new Setting<>(key, PORT, Function.identity(), Setting.Property.NodeScope)); + public static final Setting.AffixSetting PUBLISH_PORT_PROFILE = affixKeySetting("transport.profiles.", "publish_port", + key -> intSetting(key, -1, -1, Setting.Property.NodeScope)); + + private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); + public static final int PING_DATA_SIZE = -1; + private final CircuitBreakerService circuitBreakerService; + // package visibility for tests + protected final ScheduledPing scheduledPing; + private final TimeValue pingSchedule; + protected final ThreadPool threadPool; + private final BigArrays bigArrays; + protected final NetworkService networkService; + protected final Set profileSettings; + + private volatile TransportService transportService; + + private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); + // node id to actual channel + private final ConcurrentMap connectedNodes = newConcurrentMap(); + private final Map> serverChannels = newConcurrentMap(); + private final Set acceptedChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + private final KeyedLock connectionLock = new KeyedLock<>(); + private final NamedWriteableRegistry namedWriteableRegistry; + + // this lock is here to make sure we close this transport and disconnect all the client nodes + // connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?) + private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); + protected final boolean compress; + private volatile BoundTransportAddress boundAddress; + private final String transportName; + protected final ConnectionProfile defaultConnectionProfile; + + private final ConcurrentMap pendingHandshakes = new ConcurrentHashMap<>(); + private final AtomicLong requestIdGenerator = new AtomicLong(); + private final CounterMetric numHandshakes = new CounterMetric(); + private static final String HANDSHAKE_ACTION_NAME = "internal:tcp/handshake"; + + private final MeanMetric readBytesMetric = new MeanMetric(); + private final MeanMetric transmittedBytesMetric = new MeanMetric(); + + public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + super(settings); + this.profileSettings = getProfileSettings(settings); + this.threadPool = threadPool; + this.bigArrays = bigArrays; + this.circuitBreakerService = circuitBreakerService; + this.scheduledPing = new ScheduledPing(); + this.pingSchedule = PING_SCHEDULE.get(settings); + this.namedWriteableRegistry = namedWriteableRegistry; + this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); + this.networkService = networkService; + this.transportName = transportName; + defaultConnectionProfile = buildDefaultConnectionProfile(settings); + } + + static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { + int connectionsPerNodeRecovery = CONNECTIONS_PER_NODE_RECOVERY.get(settings); + int connectionsPerNodeBulk = CONNECTIONS_PER_NODE_BULK.get(settings); + int connectionsPerNodeReg = CONNECTIONS_PER_NODE_REG.get(settings); + int connectionsPerNodeState = CONNECTIONS_PER_NODE_STATE.get(settings); + int connectionsPerNodePing = CONNECTIONS_PER_NODE_PING.get(settings); + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.setConnectTimeout(TCP_CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TCP_CONNECT_TIMEOUT.get(settings)); + builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); + builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); + // if we are not master eligible we don't need a dedicated channel to publish the state + builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); + // if we are not a data-node we don't need any dedicated channels for recovery + builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); + builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); + return builder.build(); + } + + @Override + protected void doStart() { + if (pingSchedule.millis() > 0) { + threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing); + } + } + + @Override + public CircuitBreaker getInFlightRequestBreaker() { + // We always obtain a fresh breaker to reflect changes to the breaker configuration. + return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); + } + + @Override + public void setTransportService(TransportService service) { + if (service.getRequestHandler(HANDSHAKE_ACTION_NAME) != null) { + throw new IllegalStateException(HANDSHAKE_ACTION_NAME + " is a reserved request handler and must not be registered"); + } + this.transportService = service; + } + + private static class HandshakeResponseHandler implements TransportResponseHandler { + final AtomicReference versionRef = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference exceptionRef = new AtomicReference<>(); + final TcpChannel channel; + + HandshakeResponseHandler(TcpChannel channel) { + this.channel = channel; + } + + @Override + public VersionHandshakeResponse newInstance() { + return new VersionHandshakeResponse(); + } + + @Override + public void handleResponse(VersionHandshakeResponse response) { + final boolean success = versionRef.compareAndSet(null, response.version); + latch.countDown(); + assert success; + } + + @Override + public void handleException(TransportException exp) { + final boolean success = exceptionRef.compareAndSet(null, exp); + latch.countDown(); + assert success; + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + } + + public class ScheduledPing extends AbstractLifecycleRunnable { + + /** + * The magic number (must be lower than 0) for a ping message. This is handled + * specifically in {@link TcpTransport#validateMessageHeader}. + */ + private final BytesReference pingHeader; + final CounterMetric successfulPings = new CounterMetric(); + final CounterMetric failedPings = new CounterMetric(); + + public ScheduledPing() { + super(lifecycle, logger); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeByte((byte) 'E'); + out.writeByte((byte) 'S'); + out.writeInt(PING_DATA_SIZE); + pingHeader = out.bytes(); + } catch (IOException e) { + throw new IllegalStateException(e.getMessage(), e); // won't happen + } + } + + @Override + protected void doRunInLifecycle() throws Exception { + for (Map.Entry entry : connectedNodes.entrySet()) { + DiscoveryNode node = entry.getKey(); + NodeChannels channels = entry.getValue(); + for (TcpChannel channel : channels.getChannels()) { + internalSendMessage(channel, pingHeader, new SendMetricListener(pingHeader.length()) { + @Override + protected void innerInnerOnResponse(Void v) { + successfulPings.inc(); + } + + @Override + protected void innerOnFailure(Exception e) { + if (channel.isOpen()) { + logger.debug( + (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); + failedPings.inc(); + } else { + logger.trace((Supplier) () -> + new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); + } + + } + }); + } + } + } + + public long getSuccessfulPings() { + return successfulPings.count(); + } + + public long getFailedPings() { + return failedPings.count(); + } + + @Override + protected void onAfterInLifecycle() { + try { + threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); + } catch (EsRejectedExecutionException ex) { + if (ex.isExecutorShutdown()) { + logger.debug("couldn't schedule new ping execution, executor is shutting down", ex); + } else { + throw ex; + } + } + } + + @Override + public void onFailure(Exception e) { + if (lifecycle.stoppedOrClosed()) { + logger.trace("failed to send ping transport message", e); + } else { + logger.warn("failed to send ping transport message", e); + } + } + } + + public final class NodeChannels implements Connection { + private final Map typeMapping; + private final List channels; + private final DiscoveryNode node; + private final AtomicBoolean closed = new AtomicBoolean(false); + private final Version version; + + NodeChannels(DiscoveryNode node, List channels, ConnectionProfile connectionProfile, Version handshakeVersion) { + this.node = node; + this.channels = Collections.unmodifiableList(channels); + assert channels.size() == connectionProfile.getNumConnections() : "expected channels size to be == " + + connectionProfile.getNumConnections() + " but was: [" + channels.size() + "]"; + typeMapping = new EnumMap<>(TransportRequestOptions.Type.class); + for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile.getHandles()) { + for (TransportRequestOptions.Type type : handle.getTypes()) + typeMapping.put(type, handle); + } + version = handshakeVersion; + } + + @Override + public Version getVersion() { + return version; + } + + public List getChannels() { + return channels; + } + + public TcpChannel channel(TransportRequestOptions.Type type) { + ConnectionProfile.ConnectionTypeHandle connectionTypeHandle = typeMapping.get(type); + if (connectionTypeHandle == null) { + throw new IllegalArgumentException("no type channel for [" + type + "]"); + } + return connectionTypeHandle.getChannel(channels); + } + + public boolean allChannelsOpen() { + return channels.stream().allMatch(TcpChannel::isOpen); + } + + @Override + public void close() { + if (closed.compareAndSet(false, true)) { + try { + if (lifecycle.stopped()) { + /* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't + * have a gazillion connections sitting in TIME_WAIT to free up resources quickly. + * This is really the only part where we close the connection from the server side + * otherwise the client (node) initiates the TCP closing sequence which doesn't cause + * these issues. Setting this by default from the beginning can have unexpected + * side-effects an should be avoided, our protocol is designed in a way that clients + * close connection which is how it should be*/ + + channels.forEach(c -> { + try { + c.setSoLinger(0); + } catch (IOException e) { + logger.warn(new ParameterizedMessage("unexpected exception when setting SO_LINGER on channel {}", c), e); + } + }); + } + + boolean block = lifecycle.stopped() && Transports.isTransportThread(Thread.currentThread()) == false; + TcpChannel.closeChannels(channels, block); + } finally { + transportService.onConnectionClosed(this); + } + } + } + + @Override + public DiscoveryNode getNode() { + return this.node; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + if (closed.get()) { + throw new NodeNotConnectedException(node, "connection already closed"); + } + TcpChannel channel = channel(options.type()); + sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), (byte) 0); + } + + boolean isClosed() { + return closed.get(); + } + } + + @Override + public boolean nodeConnected(DiscoveryNode node) { + return connectedNodes.containsKey(node); + } + + @Override + public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, + CheckedBiConsumer connectionValidator) + throws ConnectTransportException { + connectionProfile = resolveConnectionProfile(connectionProfile); + if (node == null) { + throw new ConnectTransportException(null, "can't connect to a null node"); + } + closeLock.readLock().lock(); // ensure we don't open connections while we are closing + try { + ensureOpen(); + try (Releasable ignored = connectionLock.acquire(node.getId())) { + NodeChannels nodeChannels = connectedNodes.get(node); + if (nodeChannels != null) { + return; + } + boolean success = false; + try { + nodeChannels = openConnection(node, connectionProfile); + connectionValidator.accept(nodeChannels, connectionProfile); + // we acquire a connection lock, so no way there is an existing connection + connectedNodes.put(node, nodeChannels); + if (logger.isDebugEnabled()) { + logger.debug("connected to node [{}]", node); + } + try { + transportService.onNodeConnected(node); + } finally { + if (nodeChannels.isClosed()) { + // we got closed concurrently due to a disconnect or some other event on the channel. + // the close callback will close the NodeChannel instance first and then try to remove + // the connection from the connected nodes. It will NOT acquire the connectionLock for + // the node to prevent any blocking calls on network threads. Yet, we still establish a happens + // before relationship to the connectedNodes.put since we check if we can remove the + // (DiscoveryNode, NodeChannels) tuple from the map after we closed. Here we check if it's closed an if so we + // try to remove it first either way one of the two wins even if the callback has run before we even added the + // tuple to the map since in that case we remove it here again + if (connectedNodes.remove(node, nodeChannels)) { + transportService.onNodeDisconnected(node); + } + throw new NodeNotConnectedException(node, "connection concurrently closed"); + } + } + success = true; + } catch (ConnectTransportException e) { + throw e; + } catch (Exception e) { + throw new ConnectTransportException(node, "general node connection failure", e); + } finally { + if (success == false) { // close the connection if there is a failure + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "failed to connect to [{}], cleaning dangling connections", node)); + IOUtils.closeWhileHandlingException(nodeChannels); + } + } + } + } finally { + closeLock.readLock().unlock(); + } + } + + /** + * takes a {@link ConnectionProfile} that have been passed as a parameter to the public methods + * and resolves it to a fully specified (i.e., no nulls) profile + */ + static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionProfile connectionProfile, + ConnectionProfile defaultConnectionProfile) { + Objects.requireNonNull(defaultConnectionProfile); + if (connectionProfile == null) { + return defaultConnectionProfile; + } else if (connectionProfile.getConnectTimeout() != null && connectionProfile.getHandshakeTimeout() != null) { + return connectionProfile; + } else { + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(connectionProfile); + if (connectionProfile.getConnectTimeout() == null) { + builder.setConnectTimeout(defaultConnectionProfile.getConnectTimeout()); + } + if (connectionProfile.getHandshakeTimeout() == null) { + builder.setHandshakeTimeout(defaultConnectionProfile.getHandshakeTimeout()); + } + return builder.build(); + } + } + + protected ConnectionProfile resolveConnectionProfile(ConnectionProfile connectionProfile) { + return resolveConnectionProfile(connectionProfile, defaultConnectionProfile); + } + + @Override + public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { + if (node == null) { + throw new ConnectTransportException(null, "can't open connection to a null node"); + } + boolean success = false; + NodeChannels nodeChannels = null; + connectionProfile = resolveConnectionProfile(connectionProfile); + closeLock.readLock().lock(); // ensure we don't open connections while we are closing + try { + ensureOpen(); + try { + int numConnections = connectionProfile.getNumConnections(); + assert numConnections > 0 : "A connection profile must be configured with at least one connection"; + List channels = new ArrayList<>(numConnections); + List> connectionFutures = new ArrayList<>(numConnections); + for (int i = 0; i < numConnections; ++i) { + try { + PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + connectionFutures.add(connectFuture); + TcpChannel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture); + logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel)); + channels.add(channel); + } catch (Exception e) { + // If there was an exception when attempting to instantiate the raw channels, we close all of the channels + TcpChannel.closeChannels(channels, false); + throw e; + } + } + + // If we make it past the block above, we successfully instantiated all of the channels + try { + TcpChannel.awaitConnected(node, connectionFutures, connectionProfile.getConnectTimeout()); + } catch (Exception ex) { + TcpChannel.closeChannels(channels, false); + throw ex; + } + + // If we make it past the block above, we have successfully established connections for all of the channels + final TcpChannel handshakeChannel = channels.get(0); // one channel is guaranteed by the connection profile + handshakeChannel.addCloseListener(ActionListener.wrap(() -> cancelHandshakeForChannel(handshakeChannel))); + Version version; + try { + version = executeHandshake(node, handshakeChannel, connectionProfile.getHandshakeTimeout()); + } catch (Exception ex) { + TcpChannel.closeChannels(channels, false); + throw ex; + } + + // If we make it past the block above, we have successfully completed the handshake and the connection is now open. + // At this point we should construct the connection, notify the transport service, and attach close listeners to the + // underlying channels. + nodeChannels = new NodeChannels(node, channels, connectionProfile, version); + transportService.onConnectionOpened(nodeChannels); + final NodeChannels finalNodeChannels = nodeChannels; + final AtomicBoolean runOnce = new AtomicBoolean(false); + Consumer onClose = c -> { + assert c.isOpen() == false : "channel is still open when onClose is called"; + // we only need to disconnect from the nodes once since all other channels + // will also try to run this we protect it from running multiple times. + if (runOnce.compareAndSet(false, true)) { + disconnectFromNodeCloseAndNotify(node, finalNodeChannels); + } + }; + + nodeChannels.channels.forEach(ch -> ch.addCloseListener(ActionListener.wrap(() -> onClose.accept(ch)))); + + if (nodeChannels.allChannelsOpen() == false) { + throw new ConnectTransportException(node, "a channel closed while connecting"); + } + success = true; + return nodeChannels; + } catch (ConnectTransportException e) { + throw e; + } catch (Exception e) { + // ConnectTransportExceptions are handled specifically on the caller end - we wrap the actual exception to ensure + // only relevant exceptions are logged on the caller end.. this is the same as in connectToNode + throw new ConnectTransportException(node, "general node connection failure", e); + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(nodeChannels); + } + } + } finally { + closeLock.readLock().unlock(); + } + } + + private void disconnectFromNodeCloseAndNotify(DiscoveryNode node, NodeChannels nodeChannels) { + assert nodeChannels != null : "nodeChannels must not be null"; + try { + IOUtils.closeWhileHandlingException(nodeChannels); + } finally { + if (closeLock.readLock().tryLock()) { + try { + if (connectedNodes.remove(node, nodeChannels)) { + transportService.onNodeDisconnected(node); + } + } finally { + closeLock.readLock().unlock(); + } + } + } + } + + @Override + public NodeChannels getConnection(DiscoveryNode node) { + NodeChannels nodeChannels = connectedNodes.get(node); + if (nodeChannels == null) { + throw new NodeNotConnectedException(node, "Node not connected"); + } + return nodeChannels; + } + + @Override + public void disconnectFromNode(DiscoveryNode node) { + closeLock.readLock().lock(); + NodeChannels nodeChannels = null; + try (Releasable ignored = connectionLock.acquire(node.getId())) { + nodeChannels = connectedNodes.remove(node); + } finally { + closeLock.readLock().unlock(); + if (nodeChannels != null) { // if we found it and removed it we close and notify + IOUtils.closeWhileHandlingException(nodeChannels, () -> transportService.onNodeDisconnected(node)); + } + } + } + + protected Version getCurrentVersion() { + // this is just for tests to mock stuff like the nodes version - tests can override this internally + return Version.CURRENT; + } + + @Override + public BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + @Override + public Map profileBoundAddresses() { + return unmodifiableMap(new HashMap<>(profileBoundAddresses)); + } + + @Override + public List getLocalAddresses() { + List local = new ArrayList<>(); + local.add("127.0.0.1"); + // check if v6 is supported, if so, v4 will also work via mapped addresses. + if (NetworkUtils.SUPPORTS_V6) { + local.add("[::1]"); // may get ports appended! + } + return local; + } + + protected void bindServer(ProfileSettings profileSettings) { + // Bind and start to accept incoming connections. + InetAddress hostAddresses[]; + List profileBindHosts = profileSettings.bindHosts; + try { + hostAddresses = networkService.resolveBindHostAddresses(profileBindHosts.toArray(Strings.EMPTY_ARRAY)); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host " + profileBindHosts, e); + } + if (logger.isDebugEnabled()) { + String[] addresses = new String[hostAddresses.length]; + for (int i = 0; i < hostAddresses.length; i++) { + addresses[i] = NetworkAddress.format(hostAddresses[i]); + } + logger.debug("binding server bootstrap to: {}", (Object) addresses); + } + + assert hostAddresses.length > 0; + + List boundAddresses = new ArrayList<>(); + for (InetAddress hostAddress : hostAddresses) { + boundAddresses.add(bindToPort(profileSettings.profileName, hostAddress, profileSettings.portOrRange)); + } + + final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(profileSettings, boundAddresses); + + if (profileSettings.isDefaultProfile) { + this.boundAddress = boundTransportAddress; + } else { + profileBoundAddresses.put(profileSettings.profileName, boundTransportAddress); + } + } + + protected InetSocketAddress bindToPort(final String name, final InetAddress hostAddress, String port) { + PortsRange portsRange = new PortsRange(port); + final AtomicReference lastException = new AtomicReference<>(); + final AtomicReference boundSocket = new AtomicReference<>(); + boolean success = portsRange.iterate(portNumber -> { + try { + TcpChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); + synchronized (serverChannels) { + List list = serverChannels.get(name); + if (list == null) { + list = new ArrayList<>(); + serverChannels.put(name, list); + } + list.add(channel); + boundSocket.set(channel.getLocalAddress()); + } + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + }); + if (!success) { + throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); + } + + if (logger.isDebugEnabled()) { + logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); + } + + return boundSocket.get(); + } + + private BoundTransportAddress createBoundTransportAddress(ProfileSettings profileSettings, + List boundAddresses) { + String[] boundAddressesHostStrings = new String[boundAddresses.size()]; + TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; + for (int i = 0; i < boundAddresses.size(); i++) { + InetSocketAddress boundAddress = boundAddresses.get(i); + boundAddressesHostStrings[i] = boundAddress.getHostString(); + transportBoundAddresses[i] = new TransportAddress(boundAddress); + } + + List publishHosts = profileSettings.publishHosts; + if (profileSettings.isDefaultProfile == false && publishHosts.isEmpty()) { + publishHosts = Arrays.asList(boundAddressesHostStrings); + } + if (publishHosts.isEmpty()) { + publishHosts = NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts.toArray(Strings.EMPTY_ARRAY)); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); + final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + return new BoundTransportAddress(transportBoundAddresses, publishAddress); + } + + // package private for tests + public static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, + InetAddress publishInetAddress) { + int publishPort = profileSettings.publishPort; + + // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress + if (publishPort < 0) { + for (InetSocketAddress boundAddress : boundAddresses) { + InetAddress boundInetAddress = boundAddress.getAddress(); + if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { + publishPort = boundAddress.getPort(); + break; + } + } + } + + // if no matching boundAddress found, check if there is a unique port for all bound addresses + if (publishPort < 0) { + final IntSet ports = new IntHashSet(); + for (InetSocketAddress boundAddress : boundAddresses) { + ports.add(boundAddress.getPort()); + } + if (ports.size() == 1) { + publishPort = ports.iterator().next().value; + } + } + + if (publishPort < 0) { + String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; + throw new BindTransportException("Failed to auto-resolve publish port" + profileExplanation + ", multiple bound addresses " + + boundAddresses + " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " + + "Please specify a unique port by setting " + PORT.getKey() + " or " + + PUBLISH_PORT.getKey()); + } + return publishPort; + } + + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + return parse(address, settings.get("transport.profiles.default.port", PORT.get(settings)), perAddressLimit); + } + + // this code is a take on guava's HostAndPort, like a HostAndPortRange + + // pattern for validating ipv6 bracket addresses. + // not perfect, but PortsRange should take care of any port range validation, not a regex + private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$"); + + /** parse a hostname+port range spec into its equivalent addresses */ + static TransportAddress[] parse(String hostPortString, String defaultPortRange, int perAddressLimit) throws UnknownHostException { + Objects.requireNonNull(hostPortString); + String host; + String portString = null; + + if (hostPortString.startsWith("[")) { + // Parse a bracketed host, typically an IPv6 literal. + Matcher matcher = BRACKET_PATTERN.matcher(hostPortString); + if (!matcher.matches()) { + throw new IllegalArgumentException("Invalid bracketed host/port range: " + hostPortString); + } + host = matcher.group(1); + portString = matcher.group(2); // could be null + } else { + int colonPos = hostPortString.indexOf(':'); + if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) { + // Exactly 1 colon. Split into host:port. + host = hostPortString.substring(0, colonPos); + portString = hostPortString.substring(colonPos + 1); + } else { + // 0 or 2+ colons. Bare hostname or IPv6 literal. + host = hostPortString; + // 2+ colons and not bracketed: exception + if (colonPos >= 0) { + throw new IllegalArgumentException("IPv6 addresses must be bracketed: " + hostPortString); + } + } + } + + // if port isn't specified, fill with the default + if (portString == null || portString.isEmpty()) { + portString = defaultPortRange; + } + + // generate address for each port in the range + Set addresses = new HashSet<>(Arrays.asList(InetAddress.getAllByName(host))); + List transportAddresses = new ArrayList<>(); + int[] ports = new PortsRange(portString).ports(); + int limit = Math.min(ports.length, perAddressLimit); + for (int i = 0; i < limit; i++) { + for (InetAddress address : addresses) { + transportAddresses.add(new TransportAddress(address, ports[i])); + } + } + return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]); + } + + @Override + protected final void doClose() { + } + + @Override + protected final void doStop() { + final CountDownLatch latch = new CountDownLatch(1); + // make sure we run it on another thread than a possible IO handler thread + threadPool.generic().execute(() -> { + closeLock.writeLock().lock(); + try { + // first stop to accept any incoming connections so nobody can connect to this transport + for (Map.Entry> entry : serverChannels.entrySet()) { + String profile = entry.getKey(); + List channels = entry.getValue(); + ActionListener closeFailLogger = ActionListener.wrap(c -> {}, + e -> logger.warn(() -> new ParameterizedMessage("Error closing serverChannel for profile [{}]", profile), e)); + channels.forEach(c -> c.addCloseListener(closeFailLogger)); + TcpChannel.closeChannels(channels, true); + } + serverChannels.clear(); + + // close all of the incoming channels. The closeChannels method takes a list so we must convert the set. + TcpChannel.closeChannels(new ArrayList<>(acceptedChannels), true); + acceptedChannels.clear(); + + + // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close + // all instances and then clear them maps + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + transportService.onNodeDisconnected(next.getKey()); + } finally { + iterator.remove(); + } + } + stopInternal(); + } finally { + closeLock.writeLock().unlock(); + latch.countDown(); + } + }); + + try { + latch.await(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore + } + } + + protected void onException(TcpChannel channel, Exception e) { + if (!lifecycle.started()) { + // just close and ignore - we are already stopped and just need to make sure we release all resources + TcpChannel.closeChannel(channel, false); + return; + } + + if (isCloseConnectionException(e)) { + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "close connection exception caught on transport layer [{}], disconnecting from relevant node", + channel), + e); + // close the channel, which will cause a node to be disconnected if relevant + TcpChannel.closeChannel(channel, false); + } else if (isConnectException(e)) { + logger.trace((Supplier) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + TcpChannel.closeChannel(channel, false); + } else if (e instanceof BindException) { + logger.trace((Supplier) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + TcpChannel.closeChannel(channel, false); + } else if (e instanceof CancelledKeyException) { + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", + channel), + e); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + TcpChannel.closeChannel(channel, false); + } else if (e instanceof TcpTransport.HttpOnTransportException) { + // in case we are able to return data, serialize the exception content and sent it back to the client + if (channel.isOpen()) { + BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); + final SendMetricListener closeChannel = new SendMetricListener(message.length()) { + @Override + protected void innerInnerOnResponse(Void v) { + TcpChannel.closeChannel(channel, false); + } + + @Override + protected void innerOnFailure(Exception e) { + logger.debug("failed to send message to httpOnTransport channel", e); + TcpChannel.closeChannel(channel, false); + } + }; + internalSendMessage(channel, message, closeChannel); + } + } else { + logger.warn( + (Supplier) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); + // close the channel, which will cause a node to be disconnected if relevant + TcpChannel.closeChannel(channel, false); + } + } + + protected void serverAcceptedChannel(TcpChannel channel) { + boolean addedOnThisCall = acceptedChannels.add(channel); + assert addedOnThisCall : "Channel should only be added to accept channel set once"; + channel.addCloseListener(ActionListener.wrap(() -> acceptedChannels.remove(channel))); + logger.trace(() -> new ParameterizedMessage("Tcp transport channel accepted: {}", channel)); + } + + /** + * Binds to the given {@link InetSocketAddress} + * + * @param name the profile name + * @param address the address to bind to + */ + protected abstract TcpChannel bind(String name, InetSocketAddress address) throws IOException; + + /** + * Initiate a single tcp socket channel to a node. Implementations do not have to observe the connectTimeout. + * It is provided for synchronous connection implementations. + * + * @param node the node + * @param connectTimeout the connection timeout + * @param connectListener listener to be called when connection complete + * @return the pending connection + * @throws IOException if an I/O exception occurs while opening the channel + */ + protected abstract TcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) + throws IOException; + + /** + * Called to tear down internal resources + */ + protected void stopInternal() { + } + + public boolean canCompress(TransportRequest request) { + return compress && (!(request instanceof BytesTransportRequest)); + } + + private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, + final TransportRequest request, TransportRequestOptions options, Version channelVersion, + byte status) throws IOException, + TransportException { + if (compress) { + options = TransportRequestOptions.builder(options).withCompress(true).build(); + } + + // only compress if asked and the request is not bytes. Otherwise only + // the header part is compressed, and the "body" can't be extracted as compressed + final boolean compressMessage = options.compress() && canCompress(request); + + status = TransportStatus.setRequest(status); + ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); + final CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, compressMessage); + boolean addedReleaseListener = false; + try { + if (compressMessage) { + status = TransportStatus.setCompress(status); + } + + // we pick the smallest of the 2, to support both backward and forward compatibility + // note, this is the only place we need to do this, since from here on, we use the serialized version + // as the version to use also when the node receiving this request will send the response with + Version version = Version.min(getCurrentVersion(), channelVersion); + + stream.setVersion(version); + threadPool.getThreadContext().writeTo(stream); + stream.writeString(action); + BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream); + final TransportRequestOptions finalOptions = options; + // this might be called in a different thread + SendListener onRequestSent = new SendListener(channel, stream, + () -> transportService.onRequestSent(node, requestId, action, request, finalOptions), message.length()); + internalSendMessage(channel, message, onRequestSent); + addedReleaseListener = true; + } finally { + if (!addedReleaseListener) { + IOUtils.close(stream); + } + } + } + + /** + * sends a message to the given channel, using the given callbacks. + */ + private void internalSendMessage(TcpChannel channel, BytesReference message, SendMetricListener listener) { + try { + channel.sendMessage(message, listener); + } catch (Exception ex) { + // call listener to ensure that any resources are released + listener.onFailure(ex); + onException(channel, ex); + } + } + + /** + * Sends back an error response to the caller via the given channel + * + * @param nodeVersion the caller node version + * @param channel the channel to send the response to + * @param error the error to return + * @param requestId the request ID this response replies to + * @param action the action this response replies to + */ + public void sendErrorResponse(Version nodeVersion, TcpChannel channel, final Exception error, final long requestId, + final String action) throws IOException { + try (BytesStreamOutput stream = new BytesStreamOutput()) { + stream.setVersion(nodeVersion); + RemoteTransportException tx = new RemoteTransportException( + nodeName(), new TransportAddress(channel.getLocalAddress()), action, error); + threadPool.getThreadContext().writeTo(stream); + stream.writeException(tx); + byte status = 0; + status = TransportStatus.setResponse(status); + status = TransportStatus.setError(status); + final BytesReference bytes = stream.bytes(); + final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); + CompositeBytesReference message = new CompositeBytesReference(header, bytes); + SendListener onResponseSent = new SendListener(channel, null, + () -> transportService.onResponseSent(requestId, action, error), message.length()); + internalSendMessage(channel, message, onResponseSent); + } + } + + /** + * Sends the response to the given channel. This method should be used to send {@link TransportResponse} objects back to the caller. + * + * @see #sendErrorResponse(Version, TcpChannel, Exception, long, String) for sending back errors to the caller + */ + public void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId, + final String action, TransportResponseOptions options) throws IOException { + sendResponse(nodeVersion, channel, response, requestId, action, options, (byte) 0); + } + + private void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId, + final String action, TransportResponseOptions options, byte status) throws IOException { + if (compress) { + options = TransportResponseOptions.builder(options).withCompress(true).build(); + } + status = TransportStatus.setResponse(status); // TODO share some code with sendRequest + ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); + CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, options.compress()); + boolean addedReleaseListener = false; + try { + if (options.compress()) { + status = TransportStatus.setCompress(status); + } + threadPool.getThreadContext().writeTo(stream); + stream.setVersion(nodeVersion); + BytesReference message = buildMessage(requestId, status, nodeVersion, response, stream); + + final TransportResponseOptions finalOptions = options; + // this might be called in a different thread + SendListener listener = new SendListener(channel, stream, + () -> transportService.onResponseSent(requestId, action, response, finalOptions), message.length()); + internalSendMessage(channel, message, listener); + addedReleaseListener = true; + } finally { + if (!addedReleaseListener) { + IOUtils.close(stream); + } + } + } + + /** + * Writes the Tcp message header into a bytes reference. + * + * @param requestId the request ID + * @param status the request status + * @param protocolVersion the protocol version used to serialize the data in the message + * @param length the payload length in bytes + * @see TcpHeader + */ + final BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { + try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) { + headerOutput.setVersion(protocolVersion); + TcpHeader.writeHeader(headerOutput, requestId, status, protocolVersion, length); + final BytesReference bytes = headerOutput.bytes(); + assert bytes.length() == TcpHeader.HEADER_SIZE : "header size mismatch expected: " + TcpHeader.HEADER_SIZE + " but was: " + + bytes.length(); + return bytes; + } + } + + /** + * Serializes the given message into a bytes representation + */ + private BytesReference buildMessage(long requestId, byte status, Version nodeVersion, TransportMessage message, + CompressibleBytesOutputStream stream) throws IOException { + final BytesReference zeroCopyBuffer; + if (message instanceof BytesTransportRequest) { // what a shitty optimization - we should use a direct send method instead + BytesTransportRequest bRequest = (BytesTransportRequest) message; + assert nodeVersion.equals(bRequest.version()); + bRequest.writeThin(stream); + zeroCopyBuffer = bRequest.bytes(); + } else { + message.writeTo(stream); + zeroCopyBuffer = BytesArray.EMPTY; + } + // we have to call materializeBytes() here before accessing the bytes. A CompressibleBytesOutputStream + // might be implementing compression. And materializeBytes() ensures that some marker bytes (EOS marker) + // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the + // #validateRequest method. this might be a problem in deflate after all but it's important to write + // the marker bytes. + final BytesReference messageBody = stream.materializeBytes(); + final BytesReference header = buildHeader(requestId, status, stream.getVersion(), messageBody.length() + zeroCopyBuffer.length()); + return new CompositeBytesReference(header, messageBody, zeroCopyBuffer); + } + + /** + * Validates the first N bytes of the message header and returns false if the message is + * a ping message and has no payload ie. isn't a real user level message. + * + * @throws IllegalStateException if the message is too short, less than the header or less that the header plus the message size + * @throws HttpOnTransportException if the message has no valid header and appears to be a HTTP message + * @throws IllegalArgumentException if the message is greater that the maximum allowed frame size. This is dependent on the available + * memory. + */ + public static boolean validateMessageHeader(BytesReference buffer) throws IOException { + final int sizeHeaderLength = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + if (buffer.length() < sizeHeaderLength) { + throw new IllegalStateException("message size must be >= to the header size"); + } + int offset = 0; + if (buffer.get(offset) != 'E' || buffer.get(offset + 1) != 'S') { + // special handling for what is probably HTTP + if (bufferStartsWith(buffer, offset, "GET ") || + bufferStartsWith(buffer, offset, "POST ") || + bufferStartsWith(buffer, offset, "PUT ") || + bufferStartsWith(buffer, offset, "HEAD ") || + bufferStartsWith(buffer, offset, "DELETE ") || + bufferStartsWith(buffer, offset, "OPTIONS ") || + bufferStartsWith(buffer, offset, "PATCH ") || + bufferStartsWith(buffer, offset, "TRACE ")) { + + throw new HttpOnTransportException("This is not a HTTP port"); + } + + // we have 6 readable bytes, show 4 (should be enough) + throw new StreamCorruptedException("invalid internal transport message format, got (" + + Integer.toHexString(buffer.get(offset) & 0xFF) + "," + + Integer.toHexString(buffer.get(offset + 1) & 0xFF) + "," + + Integer.toHexString(buffer.get(offset + 2) & 0xFF) + "," + + Integer.toHexString(buffer.get(offset + 3) & 0xFF) + ")"); + } + + final int dataLen; + try (StreamInput input = buffer.streamInput()) { + input.skip(TcpHeader.MARKER_BYTES_SIZE); + dataLen = input.readInt(); + if (dataLen == PING_DATA_SIZE) { + // discard the messages we read and continue, this is achieved by skipping the bytes + // and returning null + return false; + } + } + + if (dataLen <= 0) { + throw new StreamCorruptedException("invalid data length: " + dataLen); + } + // safety against too large frames being sent + if (dataLen > NINETY_PER_HEAP_SIZE) { + throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); + } + + if (buffer.length() < dataLen + sizeHeaderLength) { + throw new IllegalStateException("buffer must be >= to the message size but wasn't"); + } + return true; + } + + private static boolean bufferStartsWith(BytesReference buffer, int offset, String method) { + char[] chars = method.toCharArray(); + for (int i = 0; i < chars.length; i++) { + if (buffer.get(offset + i) != chars[i]) { + return false; + } + } + + return true; + } + + /** + * A helper exception to mark an incoming connection as potentially being HTTP + * so an appropriate error code can be returned + */ + public static class HttpOnTransportException extends ElasticsearchException { + + public HttpOnTransportException(String msg) { + super(msg); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + public HttpOnTransportException(StreamInput in) throws IOException { + super(in); + } + } + + /** + * This method handles the message receive part for both request and responses + */ + public final void messageReceived(BytesReference reference, TcpChannel channel, String profileName, + InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException { + final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + readBytesMetric.inc(totalMessageSize); + // we have additional bytes to read, outside of the header + boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; + StreamInput streamIn = reference.streamInput(); + boolean success = false; + try (ThreadContext.StoredContext tCtx = threadPool.getThreadContext().stashContext()) { + long requestId = streamIn.readLong(); + byte status = streamIn.readByte(); + Version version = Version.fromId(streamIn.readInt()); + if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamIn.available() > 0) { + Compressor compressor; + try { + final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; + compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); + } catch (NotCompressedException ex) { + int maxToRead = Math.min(reference.length(), 10); + StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) + .append("] content bytes out of [").append(reference.length()) + .append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are ["); + for (int i = 0; i < maxToRead; i++) { + sb.append(reference.get(i)).append(","); + } + sb.append("]"); + throw new IllegalStateException(sb.toString()); + } + streamIn = compressor.streamInput(streamIn); + } + final boolean isHandshake = TransportStatus.isHandshake(status); + ensureVersionCompatibility(version, getCurrentVersion(), isHandshake); + streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry); + streamIn.setVersion(version); + threadPool.getThreadContext().readHeaders(streamIn); + if (TransportStatus.isRequest(status)) { + handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status); + } else { + final TransportResponseHandler handler; + if (isHandshake) { + handler = pendingHandshakes.remove(requestId); + } else { + TransportResponseHandler theHandler = transportService.onResponseReceived(requestId); + if (theHandler == null && TransportStatus.isError(status)) { + handler = pendingHandshakes.remove(requestId); + } else { + handler = theHandler; + } + } + // ignore if its null, the service logs it + if (handler != null) { + if (TransportStatus.isError(status)) { + handlerResponseError(streamIn, handler); + } else { + handleResponse(remoteAddress, streamIn, handler); + } + // Check the entire message has been read + final int nextByte = streamIn.read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" + + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); + } + } + } + success = true; + } finally { + if (success) { + IOUtils.close(streamIn); + } else { + IOUtils.closeWhileHandlingException(streamIn); + } + } + } + + static void ensureVersionCompatibility(Version version, Version currentVersion, boolean isHandshake) { + // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version + // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the + // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility + // once the connection is established + final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; + if (version.isCompatible(compatibilityVersion) == false) { + final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); + String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; + throw new IllegalStateException(msg + version + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); + } + } + + private void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream, final TransportResponseHandler handler) { + final TransportResponse response; + try { + response = handler.read(stream); + response.remoteAddress(new TransportAddress(remoteAddress)); + } catch (Exception e) { + handleException(handler, new TransportSerializationException( + "Failed to deserialize response from handler [" + handler.getClass().getName() + "]", e)); + return; + } + threadPool.executor(handler.executor()).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException(e)); + } + + @Override + protected void doRun() throws Exception { + handler.handleResponse(response); + } + }); + + } + + /** + * Executed for a received response error + */ + private void handlerResponseError(StreamInput stream, final TransportResponseHandler handler) { + Exception error; + try { + error = stream.readException(); + } catch (Exception e) { + error = new TransportSerializationException("Failed to deserialize exception response from stream", e); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException(error.getMessage(), error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + threadPool.executor(handler.executor()).execute(() -> { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + } + }); + } + + protected String handleRequest(TcpChannel channel, String profileName, final StreamInput stream, long requestId, + int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) + throws IOException { + final String action = stream.readString(); + transportService.onRequestReceived(requestId, action); + TransportChannel transportChannel = null; + try { + if (TransportStatus.isHandshake(status)) { + final VersionHandshakeResponse response = new VersionHandshakeResponse(getCurrentVersion()); + sendResponse(version, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, + TransportStatus.setHandshake((byte) 0)); + } else { + final RequestHandlerRegistry reg = transportService.getRequestHandler(action); + if (reg == null) { + throw new ActionNotFoundTransportException(action); + } + if (reg.canTripCircuitBreaker()) { + getInFlightRequestBreaker().addEstimateBytesAndMaybeBreak(messageLengthBytes, ""); + } else { + getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes); + } + transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName, + messageLengthBytes); + final TransportRequest request = reg.newRequest(stream); + request.remoteAddress(new TransportAddress(remoteAddress)); + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + validateRequest(stream, requestId, action); + threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); + } + } catch (Exception e) { + // the circuit breaker tripped + if (transportChannel == null) { + transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName, 0); + } + try { + transportChannel.sendResponse(e); + } catch (IOException inner) { + inner.addSuppressed(e); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "Failed to send error message back to client for action [{}]", action), inner); + } + } + return action; + } + + // This template method is needed to inject custom error checking logic in tests. + protected void validateRequest(StreamInput stream, long requestId, String action) throws IOException { + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action + + "], available [" + stream.available() + "]; resetting"); + } + } + + class RequestHandler extends AbstractRunnable { + private final RequestHandlerRegistry reg; + private final TransportRequest request; + private final TransportChannel transportChannel; + + RequestHandler(RequestHandlerRegistry reg, TransportRequest request, TransportChannel transportChannel) { + this.reg = reg; + this.request = request; + this.transportChannel = transportChannel; + } + + @SuppressWarnings({"unchecked"}) + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + if (lifecycleState() == Lifecycle.State.STARTED) { + // we can only send a response transport is started.... + try { + transportChannel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "Failed to send error message back to client for action [{}]", reg.getAction()), inner); + } + } + } + } + + private static final class VersionHandshakeResponse extends TransportResponse { + private Version version; + + private VersionHandshakeResponse(Version version) { + this.version = version; + } + + private VersionHandshakeResponse() { + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + version = Version.readVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + assert version != null; + Version.writeVersion(version, out); + } + } + + protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) + throws IOException, InterruptedException { + numHandshakes.inc(); + final long requestId = newRequestId(); + final HandshakeResponseHandler handler = new HandshakeResponseHandler(channel); + AtomicReference versionRef = handler.versionRef; + AtomicReference exceptionRef = handler.exceptionRef; + pendingHandshakes.put(requestId, handler); + boolean success = false; + try { + if (channel.isOpen() == false) { + // we have to protect us here since sendRequestToChannel won't barf if the channel is closed. + // it's weird but to change it will cause a lot of impact on the exception handling code all over the codebase. + // yet, if we don't check the state here we might have registered a pending handshake handler but the close + // listener calling #onChannelClosed might have already run and we are waiting on the latch below unitl we time out. + throw new IllegalStateException("handshake failed, channel already closed"); + } + // for the request we use the minCompatVersion since we don't know what's the version of the node we talk to + // we also have no payload on the request but the response will contain the actual version of the node we talk + // to as the payload. + final Version minCompatVersion = getCurrentVersion().minimumCompatibilityVersion(); + sendRequestToChannel(node, channel, requestId, HANDSHAKE_ACTION_NAME, TransportRequest.Empty.INSTANCE, + TransportRequestOptions.EMPTY, minCompatVersion, TransportStatus.setHandshake((byte) 0)); + if (handler.latch.await(timeout.millis(), TimeUnit.MILLISECONDS) == false) { + throw new ConnectTransportException(node, "handshake_timeout[" + timeout + "]"); + } + success = true; + if (exceptionRef.get() != null) { + throw new IllegalStateException("handshake failed", exceptionRef.get()); + } else { + Version version = versionRef.get(); + if (getCurrentVersion().isCompatible(version) == false) { + throw new IllegalStateException("Received message from unsupported version: [" + version + + "] minimal compatible version is: [" + getCurrentVersion().minimumCompatibilityVersion() + "]"); + } + return version; + } + } finally { + final TransportResponseHandler removedHandler = pendingHandshakes.remove(requestId); + // in the case of a timeout or an exception on the send part the handshake has not been removed yet. + // but the timeout is tricky since it's basically a race condition so we only assert on the success case. + assert success && removedHandler == null || success == false : "handler for requestId [" + requestId + "] is not been removed"; + } + } + + final int getNumPendingHandshakes() { // for testing + return pendingHandshakes.size(); + } + + final long getNumHandshakes() { + return numHandshakes.count(); // for testing + } + + @Override + public long newRequestId() { + return requestIdGenerator.incrementAndGet(); + } + + /** + * Called once the channel is closed for instance due to a disconnect or a closed socket etc. + */ + private void cancelHandshakeForChannel(TcpChannel channel) { + final Optional first = pendingHandshakes.entrySet().stream() + .filter((entry) -> entry.getValue().channel == channel).map(Map.Entry::getKey).findFirst(); + if (first.isPresent()) { + final Long requestId = first.get(); + final HandshakeResponseHandler handler = pendingHandshakes.remove(requestId); + if (handler != null) { + // there might be a race removing this or this method might be called twice concurrently depending on how + // the channel is closed ie. due to connection reset or broken pipes + handler.handleException(new TransportException("connection reset")); + } + } + } + + /** + * Ensures this transport is still started / open + * + * @throws IllegalStateException if the transport is not started / open + */ + protected final void ensureOpen() { + if (lifecycle.started() == false) { + throw new IllegalStateException("transport has been stopped"); + } + } + + /** + * This listener increments the transmitted bytes metric on success. + */ + private abstract class SendMetricListener extends NotifyOnceListener { + private final long messageSize; + + private SendMetricListener(long messageSize) { + this.messageSize = messageSize; + } + + @Override + protected final void innerOnResponse(Void object) { + transmittedBytesMetric.inc(messageSize); + innerInnerOnResponse(object); + } + + protected abstract void innerInnerOnResponse(Void object); + } + + private final class SendListener extends SendMetricListener { + private final TcpChannel channel; + private final Closeable optionalCloseable; + private final Runnable transportAdaptorCallback; + + private SendListener(TcpChannel channel, Closeable optionalCloseable, Runnable transportAdaptorCallback, long messageLength) { + super(messageLength); + this.channel = channel; + this.optionalCloseable = optionalCloseable; + this.transportAdaptorCallback = transportAdaptorCallback; + } + + @Override + protected void innerInnerOnResponse(Void v) { + closeAndCallback(null); + } + + @Override + protected void innerOnFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); + closeAndCallback(e); + } + + private void closeAndCallback(final Exception e) { + try { + IOUtils.close(optionalCloseable, transportAdaptorCallback::run); + } catch (final IOException inner) { + if (e != null) { + inner.addSuppressed(e); + } + throw new UncheckedIOException(inner); + } + } + } + + @Override + public final TransportStats getStats() { + return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytesMetric.count(), + transmittedBytesMetric.sum()); + } + + /** + * Returns all profile settings for the given settings object + */ + public static Set getProfileSettings(Settings settings) { + HashSet profiles = new HashSet<>(); + boolean isDefaultSet = false; + for (String profile : settings.getGroups("transport.profiles.", true).keySet()) { + profiles.add(new ProfileSettings(settings, profile)); + if (DEFAULT_PROFILE.equals(profile)) { + isDefaultSet = true; + } + } + if (isDefaultSet == false) { + profiles.add(new ProfileSettings(settings, DEFAULT_PROFILE)); + } + return Collections.unmodifiableSet(profiles); + } + + /** + * Representation of a transport profile settings for a transport.profiles.$profilename.* + */ + public static final class ProfileSettings { + public final String profileName; + public final boolean tcpNoDelay; + public final boolean tcpKeepAlive; + public final boolean reuseAddress; + public final ByteSizeValue sendBufferSize; + public final ByteSizeValue receiveBufferSize; + public final List bindHosts; + public final List publishHosts; + public final String portOrRange; + public final int publishPort; + public final boolean isDefaultProfile; + + public ProfileSettings(Settings settings, String profileName) { + this.profileName = profileName; + isDefaultProfile = DEFAULT_PROFILE.equals(profileName); + tcpKeepAlive = TCP_KEEP_ALIVE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + tcpNoDelay = TCP_NO_DELAY_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + reuseAddress = TCP_REUSE_ADDRESS_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + sendBufferSize = TCP_SEND_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + receiveBufferSize = TCP_RECEIVE_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + List profileBindHosts = BIND_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + bindHosts = (profileBindHosts.isEmpty() ? NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(settings) + : profileBindHosts); + publishHosts = PUBLISH_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + Setting concretePort = PORT_PROFILE.getConcreteSettingForNamespace(profileName); + if (concretePort.exists(settings) == false && isDefaultProfile == false) { + throw new IllegalStateException("profile [" + profileName + "] has no port configured"); + } + portOrRange = PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + publishPort = isDefaultProfile ? PUBLISH_PORT.get(settings) : + PUBLISH_PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + } + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransportChannel.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransportChannel.java new file mode 100644 index 0000000..b0e4695 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransportChannel.java @@ -0,0 +1,90 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseOptions; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public final class TcpTransportChannel implements TransportChannel { + private final TcpTransport transport; + private final Version version; + private final String action; + private final long requestId; + private final String profileName; + private final long reservedBytes; + private final AtomicBoolean released = new AtomicBoolean(); + private final String channelType; + private final TcpChannel channel; + + TcpTransportChannel(TcpTransport transport, TcpChannel channel, String channelType, String action, + long requestId, Version version, String profileName, long reservedBytes) { + this.version = version; + this.channel = channel; + this.transport = transport; + this.action = action; + this.requestId = requestId; + this.profileName = profileName; + this.reservedBytes = reservedBytes; + this.channelType = channelType; + } + + @Override + public String getProfileName() { + return profileName; + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + sendResponse(response, TransportResponseOptions.EMPTY); + } + + @Override + public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { + try { + transport.sendResponse(version, channel, response, requestId, action, options); + } finally { + release(false); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + try { + transport.sendErrorResponse(version, channel, exception, requestId, action); + } finally { + release(true); + } + } + + private Exception releaseBy; + + private void release(boolean isExceptionResponse) { + if (released.compareAndSet(false, true)) { + assert (releaseBy = new Exception()) != null; // easier to debug if it's already closed + transport.getInFlightRequestBreaker().addWithoutBreaking(-reservedBytes); + } else if (isExceptionResponse == false) { + // only fail if we are not sending an error - we might send the error triggered by the previous + // sendResponse call + throw new IllegalStateException("reserved bytes are already released", releaseBy); + } + } + + @Override + public String getChannelType() { + return channelType; + } + + @Override + public Version getVersion() { + return version; + } + + public TcpChannel getChannel() { + return channel; + } +} + diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Transport.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Transport.java new file mode 100644 index 0000000..95ca1e2 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Transport.java @@ -0,0 +1,116 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportStats; + +import java.io.Closeable; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.List; +import java.util.Map; + +public interface Transport extends LifecycleComponent { + + Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, Property.NodeScope); + + void setTransportService(TransportService service); + + /** + * The address the transport is bound on. + */ + BoundTransportAddress boundAddress(); + + /** + * Further profile bound addresses + * @return null iff profiles are unsupported, otherwise a map with name of profile and its bound transport address + */ + Map profileBoundAddresses(); + + /** + * Returns an address from its string representation. + */ + TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException; + + /** + * Returns true if the node is connected. + */ + boolean nodeConnected(DiscoveryNode node); + + /** + * Connects to a node with the given connection profile. If the node is already connected this method has no effect. + * Once a successful is established, it can be validated before being exposed. + */ + void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, + CheckedBiConsumer connectionValidator) throws ConnectTransportException; + + /** + * Disconnected from the given node, if not connected, will do nothing. + */ + void disconnectFromNode(DiscoveryNode node); + + List getLocalAddresses(); + + default CircuitBreaker getInFlightRequestBreaker() { + return new NoopCircuitBreaker("in-flight-noop"); + } + + /** + * Returns a new request ID to use when sending a message via {@link Connection#sendRequest(long, String, + * TransportRequest, TransportRequestOptions)} + */ + long newRequestId(); + + Connection getConnection(DiscoveryNode node); + + /** + * Opens a new connection to the given node and returns it. In contrast to + * {@link #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer)} the returned connection is not managed by + * the transport implementation. This connection must be closed once it's not needed anymore. + * This connection type can be used to execute a handshake between two nodes before the node will be published via + * {@link #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer)}. + */ + Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException; + + TransportStats getStats(); + + /** + * A unidirectional connection to a {@link DiscoveryNode} + */ + interface Connection extends Closeable { + /** + * The node this connection is associated with + */ + DiscoveryNode getNode(); + + void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws + IOException, TransportException; + + /** + * Returns the version of the node this connection was established with. + */ + default Version getVersion() { + return getNode().getVersion(); + } + + /** + * Returns a key that this connection can be cached on. Delegating subclasses must delegate method call to + * the original connection. + */ + default Object getCacheKey() { + return this; + } + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportBulkClient.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportBulkClient.java new file mode 100644 index 0000000..a516429 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportBulkClient.java @@ -0,0 +1,129 @@ +package org.xbib.elasticsearch.client.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.xbib.elasticsearch.client.AbstractClient; +import org.xbib.elasticsearch.client.BulkControl; +import org.xbib.elasticsearch.client.BulkMetric; +import org.xbib.elasticsearch.client.NetworkUtils; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * Transport client with additional methods for bulk processing. + */ +public class TransportBulkClient extends AbstractClient { + + private static final Logger logger = LogManager.getLogger(TransportBulkClient.class.getName()); + + public TransportBulkClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { + super.init(client, settings, metric, control); + // auto-connect here + try { + Collection addrs = findAddresses(settings); + if (!connect(addrs, settings.getAsBoolean("autodiscover", false))) { + throw new NoNodeAvailableException("no cluster nodes available, check settings " + + settings.toString()); + } + } catch (IOException e) { + logger.error(e.getMessage(), e); + } + return this; + } + + protected ElasticsearchClient createClient(Settings settings) { + if (settings != null) { + String version = System.getProperty("os.name") + + " " + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.vendor") + + " " + System.getProperty("java.runtime.version") + + " " + System.getProperty("java.vm.version"); + logger.info("creating transport client on {} with effective settings {}", + version, settings.toString()); + return new TransportClient(Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), settings.get(EsExecutors.PROCESSORS_SETTING.getKey())) + .put("client.transport.ignore_cluster_name", true) + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .build(), Collections.singletonList(Netty4Plugin.class)); + } + return null; + } + + @Override + public synchronized void shutdown() throws IOException { + super.shutdown(); + logger.info("shutting down..."); + if (client() != null) { + TransportClient client = (TransportClient) client(); + client.close(); + client.threadPool().shutdown(); + } + logger.info("shutting down completed"); + } + + private Collection findAddresses(Settings settings) throws IOException { + List hostnames = settings.getAsList("host", Collections.singletonList("localhost")); + int port = settings.getAsInt("port", 9300); + Collection addresses = new ArrayList<>(); + for (String hostname : hostnames) { + String[] splitHost = hostname.split(":", 2); + if (splitHost.length == 2) { + String host = splitHost[0]; + InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); + try { + port = Integer.parseInt(splitHost[1]); + } catch (Exception e) { + logger.warn(e.getMessage(), e); + } + addresses.add(new TransportAddress(inetAddress, port)); + } + if (splitHost.length == 1) { + String host = splitHost[0]; + InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); + addresses.add(new TransportAddress(inetAddress, port)); + } + } + return addresses; + } + + private boolean connect(Collection addresses, boolean autodiscover) { + logger.info("trying to connect to {}", addresses); + if (client() == null) { + throw new IllegalStateException("no client?"); + } + TransportClient transportClient = (TransportClient) client(); + transportClient.addTransportAddresses(addresses); + List nodes = transportClient.connectedNodes(); + logger.info("nodes = {}", nodes); + if (nodes != null && !nodes.isEmpty()) { + if (autodiscover) { + logger.info("trying to auto-discover all cluster nodes..."); + ClusterStateRequestBuilder clusterStateRequestBuilder = + new ClusterStateRequestBuilder(client(), ClusterStateAction.INSTANCE); + ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); + DiscoveryNodes discoveryNodes = clusterStateResponse.getState().getNodes(); + transportClient.addDiscoveryNodes(discoveryNodes); + logger.info("after auto-discovery connected to {}", transportClient.connectedNodes()); + } + return true; + } + return false; + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportClient.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportClient.java new file mode 100644 index 0000000..f7b79cd --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportClient.java @@ -0,0 +1,507 @@ +package org.xbib.elasticsearch.client.transport; + +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static java.util.stream.Collectors.toList; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.cluster.node.liveness.LivenessRequest; +import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; +import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.inject.Injector; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.inject.ModulesBuilder; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.node.InternalSettingsPreparer; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.FutureTransportResponseHandler; +import org.elasticsearch.transport.TransportRequestOptions; + +import java.io.Closeable; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Simplified transport client, without the node sampling and retrying mode like in the mainline version. + * + * Configurable connect ping interval setting added. + */ +public class TransportClient extends AbstractClient { + + private static final Logger logger = LogManager.getLogger(TransportClient.class); + + private static final String CLIENT_TYPE = "transport"; + + private final Injector injector; + + private final long pingTimeout; + + private final ClusterName clusterName; + + private final TransportService transportService; + + private final AtomicInteger tempNodeId = new AtomicInteger(); + + private final AtomicInteger nodeCounter = new AtomicInteger(); + + private final Object mutex = new Object(); + + private volatile List nodes = Collections.emptyList(); + + private volatile List listedNodes = Collections.emptyList(); + + private volatile List filteredNodes = Collections.emptyList(); + + private volatile boolean closed; + + /** + * Creates a new TransportClient with the given settings and plugins. + * @param settings settings + */ + public TransportClient(Settings settings) { + this(buildParams(settings, Settings.EMPTY, Collections.emptyList())); + } + + /** + * Creates a new TransportClient with the given settings and plugins. + * @param settings settings + * @param plugins plugins + */ + public TransportClient(Settings settings, Collection> plugins) { + this(buildParams(settings, Settings.EMPTY, plugins)); + } + + /** + * Creates a new TransportClient with the given settings, defaults and plugins. + * @param settings the client settings + * @param defaultSettings default settings that are merged after the plugins have added it's additional settings. + * @param plugins the client plugins + */ + protected TransportClient(Settings settings, Settings defaultSettings, Collection> plugins) { + this(buildParams(settings, defaultSettings, plugins)); + } + + private TransportClient(final Injector injector) { + super(getSettings(injector), getThreadPool(injector)); + this.injector = injector; + this.clusterName = new ClusterName(getSettings(injector).get("cluster.name", "elasticsearch")); + this.transportService = injector.getInstance(TransportService.class); + this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis(); + } + + private static Settings getSettings(Injector injector) { + return injector.getInstance(Settings.class); + } + + private static ThreadPool getThreadPool(Injector injector) { + return injector.getInstance(ThreadPool.class); + } + + /** + * Returns the current registered transport addresses to use. + * + * @return list of transport addresess + */ + public List transportAddresses() { + List list = new ArrayList<>(); + for (DiscoveryNode listedNode : listedNodes) { + list.add(listedNode.getAddress()); + } + return Collections.unmodifiableList(list); + } + + /** + * Returns the current connected transport nodes that this client will use. + * The nodes include all the nodes that are currently alive based on the transport + * addresses provided. + * + * @return list of nodes + */ + public List connectedNodes() { + return this.nodes; + } + + /** + * The list of filtered nodes that were not connected to, for example, due to mismatch in cluster name. + * + * @return list of nodes + */ + public List filteredNodes() { + return this.filteredNodes; + } + + /** + * Returns the listed nodes in the transport client, once added to it. + * + * @return list of nodes + */ + public List listedNodes() { + return this.listedNodes; + } + + /** + * Adds a list of transport addresses that will be used to connect to. + * The Node this transport address represents will be used if its possible to connect to it. + * If it is unavailable, it will be automatically connected to once it is up. + * In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}. + * + * @param discoveryNodes nodes + * @return this transport client + */ + public TransportClient addDiscoveryNodes(DiscoveryNodes discoveryNodes) { + Collection addresses = new ArrayList<>(); + for (DiscoveryNode discoveryNode : discoveryNodes) { + addresses.add(discoveryNode.getAddress()); + } + addTransportAddresses(addresses); + return this; + } + + /** + * Adds a list of transport addresses that will be used to connect to. + * The Node this transport address represents will be used if its possible to connect to it. + * If it is unavailable, it will be automatically connected to once it is up. + * In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}. + * + * @param transportAddresses transport addresses + */ + public TransportClient addTransportAddresses(Collection transportAddresses) { + synchronized (mutex) { + if (closed) { + throw new IllegalStateException("transport client is closed, can't add addresses"); + } + Set discoveryNodeList = new HashSet<>(listedNodes); + logger.info("before adding: nodes={} listednodes={} transportAddresses={}", + nodes, listedNodes, transportAddresses); + for (TransportAddress newTransportAddress : transportAddresses) { + boolean found = false; + for (DiscoveryNode discoveryNode : discoveryNodeList) { + logger.debug("checking existing address [{}] against new [{}]", + discoveryNode.getAddress(), newTransportAddress); + if (discoveryNode.getAddress().equals(newTransportAddress)) { // sameHost + found = true; + logger.debug("address [{}] already connected, ignoring", newTransportAddress, discoveryNode); + break; + } + } + if (!found) { + DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeId.incrementAndGet(), + newTransportAddress, Version.CURRENT.minimumCompatibilityVersion()); + logger.info("adding new address [{}]", node); + discoveryNodeList.add(node); + } + } + listedNodes = Collections.unmodifiableList(new ArrayList<>(discoveryNodeList)); + connect(); + } + return this; + } + + /** + * Removes a transport address from the list of transport addresses that are used to connect to. + * + * @param transportAddress transport address to remove + * @return this transport client + */ + public TransportClient removeTransportAddress(TransportAddress transportAddress) { + synchronized (mutex) { + if (closed) { + throw new IllegalStateException("transport client is closed, can't remove an address"); + } + List builder = new ArrayList<>(); + for (DiscoveryNode otherNode : listedNodes) { + if (!otherNode.getAddress().equals(transportAddress)) { + builder.add(otherNode); + } else { + logger.debug("removing address [{}]", otherNode); + } + } + listedNodes = Collections.unmodifiableList(builder); + } + return this; + } + + @Override + @SuppressWarnings("rawtypes") + public void close() { + synchronized (mutex) { + if (closed) { + return; + } + closed = true; + logger.info("disconnecting from nodes {}", nodes); + for (DiscoveryNode node : nodes) { + transportService.disconnectFromNode(node); + } + nodes = Collections.emptyList(); + logger.info("disconnecting from listed nodes {}", listedNodes); + for (DiscoveryNode listedNode : listedNodes) { + transportService.disconnectFromNode(listedNode); + } + listedNodes = Collections.emptyList(); + } + transportService.close(); + PluginsService pluginsService = injector.getInstance(PluginsService.class); + for (Class guiceService : pluginsService.getGuiceServiceClasses()) { + logger.info("closing plugin service {}", guiceService); + injector.getInstance(guiceService).close(); + } + // closing all plugins + pluginsService.filterPlugins(Plugin.class).forEach(plugin -> { + try { + logger.info("closing plugin {}", plugin); + plugin.close(); + } catch (IOException e) { + logger.warn(e.getMessage(), e); + } + }); + try { + ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS); + } catch (Exception e) { + logger.warn(e.getMessage(), e); + } + } + + private void connect() { + Set newNodes = new HashSet<>(); + Set newFilteredNodes = new HashSet<>(); + for (DiscoveryNode listedNode : listedNodes) { + if (!transportService.nodeConnected(listedNode)) { + try { + logger.info("connecting to listed node " + listedNode); + transportService.connectToNode(listedNode); + } catch (Exception e) { + logger.warn("failed to connect to node " + listedNode, e); + continue; + } + } + try { + LivenessResponse livenessResponse = transportService.submitRequest(listedNode, + TransportLivenessAction.NAME, new LivenessRequest(), + TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE) + .withTimeout(pingTimeout).build(), + new FutureTransportResponseHandler() { + @SuppressWarnings("deprecation") + @Override + public LivenessResponse newInstance() { + return new LivenessResponse(); + } + }).txGet(); + if (!clusterName.equals(livenessResponse.getClusterName())) { + logger.warn("node {} not part of the cluster {}, ignoring", listedNode, clusterName); + newFilteredNodes.add(listedNode); + } else if (livenessResponse.getDiscoveryNode() != null) { + DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode(); + newNodes.add(new DiscoveryNode(nodeWithInfo.getName(), nodeWithInfo.getId(), + nodeWithInfo.getEphemeralId(), nodeWithInfo.getHostName(), + nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(), + nodeWithInfo.getRoles(), nodeWithInfo.getVersion())); + } else { + logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", + listedNode); + newNodes.add(listedNode); + } + } catch (Exception e) { + logger.warn("failed to get node info for {}, disconnecting", e, listedNode); + transportService.disconnectFromNode(listedNode); + } + } + for (Iterator it = newNodes.iterator(); it.hasNext(); ) { + DiscoveryNode node = it.next(); + if (!transportService.nodeConnected(node)) { + try { + logger.debug("connecting to new node [{}]", node); + transportService.connectToNode(node); + } catch (Exception e) { + it.remove(); + logger.warn("failed to connect to new node [" + node + "], removed", e); + } + } + } + this.nodes = Collections.unmodifiableList(new ArrayList<>(newNodes)); + logger.info("connected to nodes: {}", nodes); + this.filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); + } + + @Override + @SuppressWarnings({"unchecked", "rawtypes"}) + protected > + void doExecute(Action action, final R request, final ActionListener listener) { + List nodeList = this.nodes; + if (nodeList.isEmpty()) { + throw new NoNodeAvailableException("none of the configured nodes are available: " + this.listedNodes); + } + int index = nodeCounter.incrementAndGet(); + if (index < 0) { + index = 0; + nodeCounter.set(0); + } + DiscoveryNode discoveryNode = nodeList.get(index % nodeList.size()); + // try once and never more + try { + ActionRequestValidationException validationException = request.validate(); + if (validationException != null) { + listener.onFailure(validationException); + return; + } + TransportRequestOptions transportOptions = action.transportOptions(settings); + transportService.sendRequest(discoveryNode, action.name(), request, transportOptions, + new ActionListenerResponseHandler<>(listener, action::newResponse)); + } catch (Exception e) { + listener.onFailure(e); + } + } + + private static Injector buildParams(Settings givenSettings, Settings defaultSettings, + Collection> plugins) { + Settings providedSettings = givenSettings; + if (!Node.NODE_NAME_SETTING.exists(providedSettings)) { + providedSettings = Settings.builder().put(providedSettings) + .put(Node.NODE_NAME_SETTING.getKey(), "_client_") + .build(); + } + final PluginsService pluginsService = newPluginService(providedSettings, plugins); + final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build(); + final List resourcesToClose = new ArrayList<>(); + final ThreadPool threadPool = new ThreadPool(settings); + resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); + final NetworkService networkService = new NetworkService(Collections.emptyList()); + try { + final List> additionalSettings = new ArrayList<>(pluginsService.getPluginSettings()); + final List additionalSettingsFilter = new ArrayList<>(pluginsService.getPluginSettingsFilter()); + for (final ExecutorBuilder builder : threadPool.builders()) { + additionalSettings.addAll(builder.getRegisteredSettings()); + } + SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); + SearchModule searchModule = new SearchModule(settings, true, + pluginsService.filterPlugins(SearchPlugin.class)); + List entries = new ArrayList<>(); + entries.addAll(NetworkModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + entries.addAll(pluginsService.filterPlugins(Plugin.class).stream() + .flatMap(p -> p.getNamedWriteables().stream()) + .collect(toList())); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); + NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of( + searchModule.getNamedXContents().stream(), + pluginsService.filterPlugins(Plugin.class).stream() + .flatMap(p -> p.getNamedXContent().stream()) + ).flatMap(Function.identity()).collect(toList())); + ModulesBuilder modules = new ModulesBuilder(); + // plugin modules must be added here, before others or we can get crazy injection errors + for (Module pluginModule : pluginsService.createGuiceModules()) { + modules.add(pluginModule); + } + modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool)); + ActionModule actionModule = new ActionModule(true, settings, null, + settingsModule.getIndexScopedSettings(), + settingsModule.getClusterSettings(), + settingsModule.getSettingsFilter(), + threadPool, + pluginsService.filterPlugins(ActionPlugin.class), null, null, null); + modules.add(actionModule); + CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(), + settingsModule.getClusterSettings()); + PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings); + BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService); + resourcesToClose.add(circuitBreakerService); + resourcesToClose.add(bigArrays); + modules.add(settingsModule); + NetworkModule networkModule = new NetworkModule(settings, true, + pluginsService.filterPlugins(NetworkPlugin.class), threadPool, + bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, + xContentRegistry, networkService, null); + final Transport transport = networkModule.getTransportSupplier().get(); + final TransportService transportService = new TransportService(settings, transport, threadPool, + networkModule.getTransportInterceptor(), + boundTransportAddress -> DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 0), + UUIDs.randomBase64UUID()), null, Collections.emptySet()); + modules.add((b -> { + b.bind(BigArrays.class).toInstance(bigArrays); + b.bind(PluginsService.class).toInstance(pluginsService); + b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService); + b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); + b.bind(Transport.class).toInstance(transport); + b.bind(TransportService.class).toInstance(transportService); + b.bind(NetworkService.class).toInstance(networkService); + })); + Injector injector = modules.createInjector(); + List pluginLifecycleComponents = pluginsService.getGuiceServiceClasses() + .stream().map(injector::getInstance).collect(Collectors.toList()); + resourcesToClose.addAll(pluginLifecycleComponents); + transportService.start(); + transportService.acceptIncomingRequests(); + resourcesToClose.clear(); + return injector; + } finally { + IOUtils.closeWhileHandlingException(resourcesToClose); + } + } + + private static TransportAddress dummyAddress(NetworkModule networkModule) { + final TransportAddress address; + try { + address = networkModule.getTransportSupplier().get().addressesFromString("0.0.0.0:0", 1)[0]; + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + return address; + } + + private static PluginsService newPluginService(final Settings settings, Collection> plugins) { + final Settings.Builder settingsBuilder = Settings.builder() + .put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval + .put(NetworkService.NETWORK_SERVER.getKey(), false) + .put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE); + if (!settings.isEmpty()) { + logger.info(settings.toString()); + settingsBuilder.put(InternalSettingsPreparer.prepareSettings(settings)); + } + return new PluginsService(settingsBuilder.build(), null, null, null, plugins); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportConnectionListener.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportConnectionListener.java new file mode 100644 index 0000000..db349a5 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportConnectionListener.java @@ -0,0 +1,27 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.cluster.node.DiscoveryNode; + +public interface TransportConnectionListener { + + /** + * Called once a node connection is opened and registered. + */ + default void onNodeConnected(DiscoveryNode node) {} + + /** + * Called once a node connection is closed and unregistered. + */ + default void onNodeDisconnected(DiscoveryNode node) {} + + /** + * Called once a node connection is closed. The connection might not have been registered in the + * transport as a shared connection to a specific node + */ + default void onConnectionClosed(Transport.Connection connection) {} + + /** + * Called once a node connection is opened. + */ + default void onConnectionOpened(Transport.Connection connection) {} +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportInterceptor.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportInterceptor.java new file mode 100644 index 0000000..6235693 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportInterceptor.java @@ -0,0 +1,31 @@ +package org.xbib.elasticsearch.client.transport; + +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; + +/** + * This interface allows plugins to intercept requests on both the sender and the receiver side. + */ +public interface TransportInterceptor { + + default TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, + TransportRequestHandler actualHandler) { + return actualHandler; + } + + + default AsyncSender interceptSender(AsyncSender sender) { + return sender; + } + + + interface AsyncSender { + void sendRequest(Transport.Connection connection, String action, + TransportRequest request, TransportRequestOptions options, + TransportResponseHandler handler); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportService.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportService.java new file mode 100644 index 0000000..d035f85 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportService.java @@ -0,0 +1,1224 @@ +package org.xbib.elasticsearch.client.transport; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ActionNotFoundTransportException; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.FutureTransportResponseHandler; +import org.elasticsearch.transport.NodeDisconnectedException; +import org.elasticsearch.transport.NodeNotConnectedException; +import org.elasticsearch.transport.PlainTransportFuture; +import org.elasticsearch.transport.ReceiveTimeoutTransportException; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.RequestHandlerRegistry; +import org.elasticsearch.transport.ResponseHandlerFailureTransportException; +import org.elasticsearch.transport.SendRequestTransportException; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportFuture; +import org.elasticsearch.transport.TransportInfo; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportResponseOptions; +import org.elasticsearch.transport.TransportStats; + +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.common.settings.Setting.listSetting; + +public class TransportService extends AbstractLifecycleComponent { + + public static final String DIRECT_RESPONSE_PROFILE = ".direct"; + public static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; + + private final CountDownLatch blockIncomingRequestsLatch = new CountDownLatch(1); + protected final Transport transport; + protected final ThreadPool threadPool; + protected final ClusterName clusterName; + protected final TaskManager taskManager; + private final TransportInterceptor.AsyncSender asyncSender; + private final Function localNodeFactory; + private final boolean connectToRemoteCluster; + + volatile Map requestHandlers = Collections.emptyMap(); + final Object requestHandlerMutex = new Object(); + + final ConcurrentMapLong clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + + final CopyOnWriteArrayList connectionListeners = new CopyOnWriteArrayList<>(); + + private final TransportInterceptor interceptor; + + // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they + // do show up, we can print more descriptive information about them + final Map timeoutInfoHandlers = + Collections.synchronizedMap(new LinkedHashMap(100, .75F, true) { + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > 100; + } + }); + + public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() {}; + + // tracer log + + public static final Setting> TRACE_LOG_INCLUDE_SETTING = + listSetting("transport.tracer.include", emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope); + public static final Setting> TRACE_LOG_EXCLUDE_SETTING = + listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), + Function.identity(), Property.Dynamic, Property.NodeScope); + + private final Logger tracerLog; + + volatile String[] tracerLogInclude; + volatile String[] tracerLogExclude; + + private final RemoteClusterService remoteClusterService; + + /** if set will call requests sent to this id to shortcut and executed locally */ + volatile DiscoveryNode localNode = null; + private final Transport.Connection localNodeConnection = new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return localNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + sendLocalRequest(requestId, action, request, options); + } + + @Override + public void close() throws IOException { + } + }; + + /** + * Build the service. + * + * @param clusterSettings if non null, the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings + * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. + */ + public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, + Function localNodeFactory, @Nullable ClusterSettings clusterSettings, + Set taskHeaders) { + super(settings); + this.transport = transport; + this.threadPool = threadPool; + this.localNodeFactory = localNodeFactory; + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); + setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); + setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); + tracerLog = Loggers.getLogger(logger, ".tracer"); + taskManager = createTaskManager(settings, threadPool, taskHeaders); + this.interceptor = transportInterceptor; + this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); + this.connectToRemoteCluster = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings); + remoteClusterService = new RemoteClusterService(settings, this); + if (clusterSettings != null) { + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); + if (connectToRemoteCluster) { + remoteClusterService.listenForUpdates(clusterSettings); + } + } + } + + public RemoteClusterService getRemoteClusterService() { + return remoteClusterService; + } + + public DiscoveryNode getLocalNode() { + return localNode; + } + + public TaskManager getTaskManager() { + return taskManager; + } + + protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { + return new TaskManager(settings, threadPool, taskHeaders); + } + + /** + * The executor service for this transport service. + * + * @return the executor service + */ + protected ExecutorService getExecutorService() { + return threadPool.generic(); + } + + void setTracerLogInclude(List tracerLogInclude) { + this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); + } + + void setTracerLogExclude(List tracerLogExclude) { + this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY); + } + + @Override + protected void doStart() { + transport.setTransportService(this); + transport.start(); + + if (transport.boundAddress() != null && logger.isInfoEnabled()) { + logger.info("{}", transport.boundAddress()); + for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { + logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); + } + } + localNode = localNodeFactory.apply(transport.boundAddress()); + registerRequestHandler( + HANDSHAKE_ACTION_NAME, + () -> HandshakeRequest.INSTANCE, + ThreadPool.Names.SAME, + false, false, + (request, channel) -> channel.sendResponse( + new HandshakeResponse(localNode, clusterName, localNode.getVersion()))); + if (connectToRemoteCluster) { + // here we start to connect to the remote clusters + remoteClusterService.initializeRemoteClusters(); + } + } + + @Override + protected void doStop() { + try { + transport.stop(); + } finally { + // in case the transport is not connected to our local node (thus cleaned on node disconnect) + // make sure to clean any leftover on going handles + for (Map.Entry entry : clientHandlers.entrySet()) { + final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); + if (holderToNotify != null) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + getExecutorService().execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action()), + e); + } + @Override + public void onFailure(Exception e) { + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action()), + e); + } + @Override + public void doRun() { + TransportException ex = new TransportException("transport stopped, action: " + holderToNotify.action()); + holderToNotify.handler().handleException(ex); + } + }); + } + } + } + } + + @Override + protected void doClose() throws IOException { + IOUtils.close(remoteClusterService, transport); + } + + /** + * start accepting incoming requests. + * when the transport layer starts up it will block any incoming requests until + * this method is called + */ + public final void acceptIncomingRequests() { + blockIncomingRequestsLatch.countDown(); + } + + public TransportInfo info() { + BoundTransportAddress boundTransportAddress = boundAddress(); + if (boundTransportAddress == null) { + return null; + } + return new TransportInfo(boundTransportAddress, transport.profileBoundAddresses()); + } + + public TransportStats stats() { + return transport.getStats(); + } + + public BoundTransportAddress boundAddress() { + return transport.boundAddress(); + } + + public List getLocalAddresses() { + return transport.getLocalAddresses(); + } + + /** + * Returns true iff the given node is already connected. + */ + public boolean nodeConnected(DiscoveryNode node) { + return isLocalNode(node) || transport.nodeConnected(node); + } + + public void connectToNode(DiscoveryNode node) throws ConnectTransportException { + connectToNode(node, null); + } + + /** + * Connect to the specified node with the given connection profile + * + * @param node the node to connect to + * @param connectionProfile the connection profile to use when connecting to this node + */ + public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + if (isLocalNode(node)) { + return; + } + transport.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { + // We don't validate cluster names to allow for tribe node connections. + final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true); + // removed for TransportClient + //if (node.equals(remote) == false) { + // throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); + //} + }); + } + + /** + * Establishes and returns a new connection to the given node. The connection is NOT maintained by this service, it's the callers + * responsibility to close the connection once it goes out of scope. + * @param node the node to connect to + * @param profile the connection profile to use + */ + public Transport.Connection openConnection(final DiscoveryNode node, ConnectionProfile profile) throws IOException { + if (isLocalNode(node)) { + return localNodeConnection; + } else { + return transport.openConnection(node, profile); + } + } + + /** + * Executes a high-level handshake using the given connection + * and returns the discovery node of the node the connection + * was established with. The handshake will fail if the cluster + * name on the target node mismatches the local cluster name. + * + * @param connection the connection to a specific node + * @param handshakeTimeout handshake timeout + * @return the connected node + * @throws ConnectTransportException if the connection failed + * @throws IllegalStateException if the handshake failed + */ + public DiscoveryNode handshake( + final Transport.Connection connection, + final long handshakeTimeout) throws ConnectTransportException { + return handshake(connection, handshakeTimeout, clusterName::equals); + } + + /** + * Executes a high-level handshake using the given connection + * and returns the discovery node of the node the connection + * was established with. The handshake will fail if the cluster + * name on the target node doesn't match the local cluster name. + * + * @param connection the connection to a specific node + * @param handshakeTimeout handshake timeout + * @param clusterNamePredicate cluster name validation predicate + * @return the connected node + * @throws ConnectTransportException if the connection failed + * @throws IllegalStateException if the handshake failed + */ + public DiscoveryNode handshake( + final Transport.Connection connection, + final long handshakeTimeout, Predicate clusterNamePredicate) throws ConnectTransportException { + final HandshakeResponse response; + final DiscoveryNode node = connection.getNode(); + try { + PlainTransportFuture futureHandler = new PlainTransportFuture<>( + new FutureTransportResponseHandler() { + @Override + public HandshakeResponse newInstance() { + return new HandshakeResponse(); + } + }); + sendRequest(connection, HANDSHAKE_ACTION_NAME, HandshakeRequest.INSTANCE, + TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), futureHandler); + response = futureHandler.txGet(); + } catch (Exception e) { + throw new IllegalStateException("handshake failed with " + node, e); + } + + if (!clusterNamePredicate.test(response.clusterName)) { + throw new IllegalStateException("handshake failed, mismatched cluster name [" + response.clusterName + "] - " + node); + } else if (response.version.isCompatible(localNode.getVersion()) == false) { + throw new IllegalStateException("handshake failed, incompatible version [" + response.version + "] - " + node); + } + logger.info("handshake: success with node {}", response.discoveryNode); + return response.discoveryNode; + } + + static class HandshakeRequest extends TransportRequest { + + public static final HandshakeRequest INSTANCE = new HandshakeRequest(); + + private HandshakeRequest() { + } + + } + + public static class HandshakeResponse extends TransportResponse { + private DiscoveryNode discoveryNode; + private ClusterName clusterName; + private Version version; + + HandshakeResponse() { + } + + public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { + this.discoveryNode = discoveryNode; + this.version = version; + this.clusterName = clusterName; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + discoveryNode = in.readOptionalWriteable(DiscoveryNode::new); + clusterName = new ClusterName(in); + version = Version.readVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(discoveryNode); + clusterName.writeTo(out); + Version.writeVersion(version, out); + } + } + + public void disconnectFromNode(DiscoveryNode node) { + if (isLocalNode(node)) { + return; + } + transport.disconnectFromNode(node); + } + + public void addConnectionListener(TransportConnectionListener listener) { + connectionListeners.add(listener); + } + + public void removeConnectionListener(TransportConnectionListener listener) { + connectionListeners.remove(listener); + } + + public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, + TransportResponseHandler handler) throws TransportException { + return submitRequest(node, action, request, TransportRequestOptions.EMPTY, handler); + } + + public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler) throws TransportException { + PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); + try { + Transport.Connection connection = getConnection(node); + sendRequest(connection, action, request, options, futureHandler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + futureHandler.handleException(ex); + } + return futureHandler; + } + + public void sendRequest(final DiscoveryNode node, final String action, + final TransportRequest request, + final TransportResponseHandler handler) { + try { + Transport.Connection connection = getConnection(node); + sendRequest(connection, action, request, TransportRequestOptions.EMPTY, handler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } + } + + public final void sendRequest(final DiscoveryNode node, final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { + try { + Transport.Connection connection = getConnection(node); + sendRequest(connection, action, request, options, handler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } + } + + public final void sendRequest(final Transport.Connection connection, final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { + + asyncSender.sendRequest(connection, action, request, options, handler); + } + + /** + * Returns either a real transport connection or a local node connection if we are using the local node optimization. + * @throws NodeNotConnectedException if the given node is not connected + */ + public Transport.Connection getConnection(DiscoveryNode node) { + if (isLocalNode(node)) { + return localNodeConnection; + } else { + return transport.getConnection(node); + } + } + + public final void sendChildRequest(final DiscoveryNode node, final String action, + final TransportRequest request, final Task parentTask, + final TransportRequestOptions options, + final TransportResponseHandler handler) { + try { + Transport.Connection connection = getConnection(node); + sendChildRequest(connection, action, request, parentTask, options, handler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } + } + + public void sendChildRequest(final Transport.Connection connection, final String action, + final TransportRequest request, final Task parentTask, + final TransportResponseHandler handler) { + sendChildRequest(connection, action, request, parentTask, TransportRequestOptions.EMPTY, handler); + } + + public void sendChildRequest(final Transport.Connection connection, final String action, + final TransportRequest request, final Task parentTask, + final TransportRequestOptions options, + final TransportResponseHandler handler) { + request.setParentTask(localNode.getId(), parentTask.getId()); + try { + sendRequest(connection, action, request, options, handler); + } catch (TaskCancelledException ex) { + // The parent task is already cancelled - just fail the request + handler.handleException(new TransportException(ex)); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } + + } + + private void sendRequestInternal(final Transport.Connection connection, final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler) { + if (connection == null) { + throw new IllegalStateException("can't send request to a null connection"); + } + DiscoveryNode node = connection.getNode(); + final long requestId = transport.newRequestId(); + final TimeoutHandler timeoutHandler; + try { + + if (options.timeout() == null) { + timeoutHandler = null; + } else { + timeoutHandler = new TimeoutHandler(requestId); + } + Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); + TransportResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); + clientHandlers.put(requestId, new RequestHolder<>(responseHandler, connection, action, timeoutHandler)); + if (lifecycle.stoppedOrClosed()) { + // if we are not started the exception handling will remove the RequestHolder again and calls the handler to notify + // the caller. It will only notify if the toStop code hasn't done the work yet. + throw new TransportException("TransportService is closed stopped can't send request"); + } + if (timeoutHandler != null) { + assert options.timeout() != null; + timeoutHandler.future = threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler); + } + connection.sendRequest(requestId, action, request, options); // local node optimization happens upstream + } catch (final Exception e) { + // usually happen either because we failed to connect to the node + // or because we failed serializing the message + final RequestHolder holderToNotify = clientHandlers.remove(requestId); + // If holderToNotify == null then handler has already been taken care of. + if (holderToNotify != null) { + holderToNotify.cancelTimeout(); + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e); + threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action()), + e); + } + @Override + public void onFailure(Exception e) { + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action()), + e); + } + @Override + protected void doRun() throws Exception { + holderToNotify.handler().handleException(sendRequestException); + } + }); + } else { + logger.debug("Exception while sending request, handler likely already notified due to timeout", e); + } + } + } + + private void sendLocalRequest(long requestId, final String action, final TransportRequest request, TransportRequestOptions options) { + final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, this, threadPool); + try { + onRequestSent(localNode, requestId, action, request, options); + onRequestReceived(requestId, action); + final RequestHandlerRegistry reg = getRequestHandler(action); + if (reg == null) { + throw new ActionNotFoundTransportException("Action [" + action + "] not found"); + } + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + //noinspection unchecked + reg.processMessageReceived(request, channel); + } else { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + //noinspection unchecked + reg.processMessageReceived(request, channel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify channel of error message for action [{}]", action), inner); + } + } + }); + } + + } catch (Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "failed to notify channel of error message for action [{}]", action), inner); + } + } + } + + private boolean shouldTraceAction(String action) { + if (tracerLogInclude.length > 0) { + if (Regex.simpleMatch(tracerLogInclude, action) == false) { + return false; + } + } + if (tracerLogExclude.length > 0) { + return !Regex.simpleMatch(tracerLogExclude, action); + } + return true; + } + + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + return transport.addressesFromString(address, perAddressLimit); + } + + /** + * Registers a new request handler + * + * @param action The action the request handler is associated with + * @param requestFactory a callable to be used construct new instances for streaming + * @param executor The executor the request handling will be executed on + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler(String action, Supplier requestFactory, + String executor, TransportRequestHandler handler) { + handler = interceptor.interceptHandler(action, executor, false, handler); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, Streamable.newWriteableReader(requestFactory), taskManager, handler, executor, false, true); + registerRequestHandler(reg); + } + + /** + * Registers a new request handler + * + * @param action The action the request handler is associated with + * @param requestReader a callable to be used construct new instances for streaming + * @param executor The executor the request handling will be executed on + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler(String action, String executor, + Writeable.Reader requestReader, + TransportRequestHandler handler) { + handler = interceptor.interceptHandler(action, executor, false, handler); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, requestReader, taskManager, handler, executor, false, true); + registerRequestHandler(reg); + } + + /** + * Registers a new request handler + * + * @param action The action the request handler is associated with + * @param request The request class that will be used to construct new instances for streaming + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler(String action, Supplier request, + String executor, boolean forceExecution, + boolean canTripCircuitBreaker, + TransportRequestHandler handler) { + handler = interceptor.interceptHandler(action, executor, forceExecution, handler); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, Streamable.newWriteableReader(request), taskManager, handler, executor, forceExecution, canTripCircuitBreaker); + registerRequestHandler(reg); + } + + /** + * Registers a new request handler + * + * @param action The action the request handler is associated with + * @param requestReader The request class that will be used to construct new instances for streaming + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler(String action, + String executor, boolean forceExecution, + boolean canTripCircuitBreaker, + Writeable.Reader requestReader, + TransportRequestHandler handler) { + handler = interceptor.interceptHandler(action, executor, forceExecution, handler); + RequestHandlerRegistry reg = new RequestHandlerRegistry<>( + action, requestReader, taskManager, handler, executor, forceExecution, canTripCircuitBreaker); + registerRequestHandler(reg); + } + + private void registerRequestHandler(RequestHandlerRegistry reg) { + synchronized (requestHandlerMutex) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + } + + /** called by the {@link Transport} implementation once a request has been sent */ + void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions options) { + if (traceEnabled() && shouldTraceAction(action)) { + traceRequestSent(node, requestId, action, options); + } + } + + protected boolean traceEnabled() { + return tracerLog.isTraceEnabled(); + } + + /** called by the {@link Transport} implementation once a response was sent to calling node */ + void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions options) { + if (traceEnabled() && shouldTraceAction(action)) { + traceResponseSent(requestId, action); + } + } + + /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ + void onResponseSent(long requestId, String action, Exception e) { + if (traceEnabled() && shouldTraceAction(action)) { + traceResponseSent(requestId, action, e); + } + } + + protected void traceResponseSent(long requestId, String action, Exception e) { + tracerLog.trace( + (org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + } + + /** + * called by the {@link Transport} implementation when an incoming request arrives but before + * any parsing of it has happened (with the exception of the requestId and action) + */ + void onRequestReceived(long requestId, String action) { + try { + blockIncomingRequestsLatch.await(); + } catch (InterruptedException e) { + logger.trace("interrupted while waiting for incoming requests block to be removed"); + } + if (traceEnabled() && shouldTraceAction(action)) { + traceReceivedRequest(requestId, action); + } + } + + public RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); + } + + /** + * called by the {@link Transport} implementation when a response or an exception has been received for a previously + * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not + * found. + */ + public TransportResponseHandler onResponseReceived(final long requestId) { + RequestHolder holder = clientHandlers.remove(requestId); + + if (holder == null) { + checkForTimeout(requestId); + return null; + } + holder.cancelTimeout(); + if (traceEnabled() && shouldTraceAction(holder.action())) { + traceReceivedResponse(requestId, holder.connection().getNode(), holder.action()); + } + return holder.handler(); + } + + private void checkForTimeout(long requestId) { + // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished + final DiscoveryNode sourceNode; + final String action; + assert clientHandlers.get(requestId) == null; + TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); + if (timeoutInfoHolder != null) { + long time = System.currentTimeMillis(); + logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " + + "action [{}], node [{}], id [{}]", time - timeoutInfoHolder.sentTime(), time - timeoutInfoHolder.timeoutTime(), + timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId); + action = timeoutInfoHolder.action(); + sourceNode = timeoutInfoHolder.node(); + } else { + logger.warn("Transport response handler not found of id [{}]", requestId); + action = null; + sourceNode = null; + } + // call tracer out of lock + if (traceEnabled() == false) { + return; + } + if (action == null) { + assert sourceNode == null; + traceUnresolvedResponse(requestId); + } else if (shouldTraceAction(action)) { + traceReceivedResponse(requestId, sourceNode, action); + } + } + + void onNodeConnected(final DiscoveryNode node) { + // capture listeners before spawning the background callback so the following pattern won't trigger a call + // connectToNode(); connection is completed successfully + // addConnectionListener(); this listener shouldn't be called + final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); + getExecutorService().execute(() -> listenersToNotify.forEach(listener -> listener.onNodeConnected(node))); + } + + void onConnectionOpened(Transport.Connection connection) { + // capture listeners before spawning the background callback so the following pattern won't trigger a call + // connectToNode(); connection is completed successfully + // addConnectionListener(); this listener shouldn't be called + final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); + getExecutorService().execute(() -> listenersToNotify.forEach(listener -> listener.onConnectionOpened(connection))); + } + + public void onNodeDisconnected(final DiscoveryNode node) { + try { + getExecutorService().execute( () -> { + for (final TransportConnectionListener connectionListener : connectionListeners) { + connectionListener.onNodeDisconnected(node); + } + }); + } catch (EsRejectedExecutionException ex) { + logger.debug("Rejected execution on NodeDisconnected", ex); + } + } + + void onConnectionClosed(Transport.Connection connection) { + try { + for (Map.Entry entry : clientHandlers.entrySet()) { + RequestHolder holder = entry.getValue(); + if (holder.connection().getCacheKey().equals(connection.getCacheKey())) { + final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); + if (holderToNotify != null) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + getExecutorService().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException( + connection.getNode(), holderToNotify.action()))); + } + } + } + } catch (EsRejectedExecutionException ex) { + logger.debug("Rejected execution on onConnectionClosed", ex); + } + } + + protected void traceReceivedRequest(long requestId, String action) { + tracerLog.trace("[{}][{}] received request", requestId, action); + } + + protected void traceResponseSent(long requestId, String action) { + tracerLog.trace("[{}][{}] sent response", requestId, action); + } + + protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { + tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode); + } + + protected void traceUnresolvedResponse(long requestId) { + tracerLog.trace("[{}] received response but can't resolve it to a request", requestId); + } + + protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { + tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); + } + + class TimeoutHandler implements Runnable { + + private final long requestId; + + private final long sentTime = System.currentTimeMillis(); + + volatile ScheduledFuture future; + + TimeoutHandler(long requestId) { + this.requestId = requestId; + } + + @Override + public void run() { + // we get first to make sure we only add the TimeoutInfoHandler if needed. + final RequestHolder holder = clientHandlers.get(requestId); + if (holder != null) { + // add it to the timeout information holder, in case we are going to get a response later + long timeoutTime = System.currentTimeMillis(); + timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(holder.connection().getNode(), holder.action(), sentTime, + timeoutTime)); + // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id + final RequestHolder removedHolder = clientHandlers.remove(requestId); + if (removedHolder != null) { + assert removedHolder == holder : "two different holder instances for request [" + requestId + "]"; + removedHolder.handler().handleException( + new ReceiveTimeoutTransportException(holder.connection().getNode(), holder.action(), + "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]")); + } else { + // response was processed, remove timeout info. + timeoutInfoHandlers.remove(requestId); + } + } + } + + /** + * cancels timeout handling. this is a best effort only to avoid running it. remove the requestId from {@link #clientHandlers} + * to make sure this doesn't run. + */ + public void cancel() { + assert clientHandlers.get(requestId) == null : + "cancel must be called after the requestId [" + requestId + "] has been removed from clientHandlers"; + FutureUtils.cancel(future); + } + } + + static class TimeoutInfoHolder { + + private final DiscoveryNode node; + private final String action; + private final long sentTime; + private final long timeoutTime; + + TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { + this.node = node; + this.action = action; + this.sentTime = sentTime; + this.timeoutTime = timeoutTime; + } + + public DiscoveryNode node() { + return node; + } + + public String action() { + return action; + } + + public long sentTime() { + return sentTime; + } + + public long timeoutTime() { + return timeoutTime; + } + } + + static class RequestHolder { + + private final TransportResponseHandler handler; + + private final Transport.Connection connection; + + private final String action; + + private final TimeoutHandler timeoutHandler; + + RequestHolder(TransportResponseHandler handler, Transport.Connection connection, String action, TimeoutHandler timeoutHandler) { + this.handler = handler; + this.connection = connection; + this.action = action; + this.timeoutHandler = timeoutHandler; + } + + public TransportResponseHandler handler() { + return handler; + } + + public Transport.Connection connection() { + return this.connection; + } + + public String action() { + return this.action; + } + + public void cancelTimeout() { + if (timeoutHandler != null) { + timeoutHandler.cancel(); + } + } + } + + /** + * This handler wrapper ensures that the response thread executes with the correct thread context. Before any of the handle methods + * are invoked we restore the context. + */ + public static final class ContextRestoreResponseHandler implements TransportResponseHandler { + + private final TransportResponseHandler delegate; + private final Supplier contextSupplier; + + public ContextRestoreResponseHandler(Supplier contextSupplier, TransportResponseHandler delegate) { + this.delegate = delegate; + this.contextSupplier = contextSupplier; + } + + @Override + public T read(StreamInput in) throws IOException { + return delegate.read(in); + } + + @Override + public void handleResponse(T response) { + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleResponse(response); + } + } + + @Override + public void handleException(TransportException exp) { + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleException(exp); + } + } + + @Override + public String executor() { + return delegate.executor(); + } + + @Override + public String toString() { + return getClass().getName() + "/" + delegate.toString(); + } + + } + + static class DirectResponseChannel implements TransportChannel { + final Logger logger; + final DiscoveryNode localNode; + private final String action; + private final long requestId; + final TransportService service; + final ThreadPool threadPool; + + DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, TransportService service, + ThreadPool threadPool) { + this.logger = logger; + this.localNode = localNode; + this.action = action; + this.requestId = requestId; + this.service = service; + this.threadPool = threadPool; + } + + @Override + public String getProfileName() { + return DIRECT_RESPONSE_PROFILE; + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + sendResponse(response, TransportResponseOptions.EMPTY); + } + + @Override + public void sendResponse(final TransportResponse response, TransportResponseOptions options) throws IOException { + service.onResponseSent(requestId, action, response, options); + final TransportResponseHandler handler = service.onResponseReceived(requestId); + // ignore if its null, the service logs it + if (handler != null) { + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processResponse(handler, response); + } else { + threadPool.executor(executor).execute(() -> processResponse(handler, response)); + } + } + } + + @SuppressWarnings("unchecked") + protected void processResponse(TransportResponseHandler handler, TransportResponse response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + processException(handler, wrapInRemote(new ResponseHandlerFailureTransportException(e))); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + service.onResponseSent(requestId, action, exception); + final TransportResponseHandler handler = service.onResponseReceived(requestId); + // ignore if its null, the service logs it + if (handler != null) { + final RemoteTransportException rtx = wrapInRemote(exception); + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processException(handler, rtx); + } else { + threadPool.executor(handler.executor()).execute(() -> processException(handler, rtx)); + } + } + } + + protected RemoteTransportException wrapInRemote(Exception e) { + if (e instanceof RemoteTransportException) { + return (RemoteTransportException) e; + } + return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); + } + + protected void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to handle exception for action [{}], handler [{}]", action, handler), e); + } + } + + @Override + public String getChannelType() { + return "direct"; + } + + @Override + public Version getVersion() { + return localNode.getVersion(); + } + } + + /** + * Returns the internal thread pool + */ + public ThreadPool getThreadPool() { + return threadPool; + } + + private boolean isLocalNode(DiscoveryNode discoveryNode) { + return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); + } +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportStatus.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportStatus.java new file mode 100644 index 0000000..ed69ad5 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportStatus.java @@ -0,0 +1,52 @@ +package org.xbib.elasticsearch.client.transport; + +public final class TransportStatus { + + private static final byte STATUS_REQRES = 1 << 0; + private static final byte STATUS_ERROR = 1 << 1; + private static final byte STATUS_COMPRESS = 1 << 2; + private static final byte STATUS_HANDSHAKE = 1 << 3; + + public static boolean isRequest(byte value) { + return (value & STATUS_REQRES) == 0; + } + + public static byte setRequest(byte value) { + value &= ~STATUS_REQRES; + return value; + } + + public static byte setResponse(byte value) { + value |= STATUS_REQRES; + return value; + } + + public static boolean isError(byte value) { + return (value & STATUS_ERROR) != 0; + } + + public static byte setError(byte value) { + value |= STATUS_ERROR; + return value; + } + + public static boolean isCompress(byte value) { + return (value & STATUS_COMPRESS) != 0; + } + + public static byte setCompress(byte value) { + value |= STATUS_COMPRESS; + return value; + } + + static boolean isHandshake(byte value) { // pkg private since it's only used internally + return (value & STATUS_HANDSHAKE) != 0; + } + + static byte setHandshake(byte value) { // pkg private since it's only used internally + value |= STATUS_HANDSHAKE; + return value; + } + + +} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/package-info.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/package-info.java new file mode 100644 index 0000000..50220b5 --- /dev/null +++ b/transport/src/main/java/org/xbib/elasticsearch/client/transport/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for Elasticsearch transport client. + */ +package org.xbib.elasticsearch.client.transport; diff --git a/transport/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods b/transport/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods new file mode 100644 index 0000000..c94ea28 --- /dev/null +++ b/transport/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods @@ -0,0 +1 @@ +org.xbib.elasticsearch.client.transport.TransportBulkClient \ No newline at end of file diff --git a/transport/src/main/resources/extra-security.policy b/transport/src/main/resources/extra-security.policy new file mode 100644 index 0000000..24db998 --- /dev/null +++ b/transport/src/main/resources/extra-security.policy @@ -0,0 +1,15 @@ + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect,resolve"; + // 4.1.24 io.netty.util.concurrent.GlobalEventExecutor$2.run(GlobalEventExecutor.java:228) + permission java.lang.RuntimePermission "setContextClassLoader"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TestRunnerThreadsFilter.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TestRunnerThreadsFilter.java new file mode 100644 index 0000000..0ad52e3 --- /dev/null +++ b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TestRunnerThreadsFilter.java @@ -0,0 +1,11 @@ +package org.xbib.elasticsearch.client.transport; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +public class TestRunnerThreadsFilter implements ThreadFilter { + + @Override + public boolean reject(Thread thread) { + return thread.getName().startsWith("ObjectCleanerThread"); + } +} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientDuplicateIDTests.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientDuplicateIDTests.java new file mode 100644 index 0000000..52bb8df --- /dev/null +++ b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientDuplicateIDTests.java @@ -0,0 +1,107 @@ +package org.xbib.elasticsearch.client.transport; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.util.Collection; +import java.util.Collections; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class TransportBulkClientDuplicateIDTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(TransportBulkClientDuplicateIDTests.class.getName()); + + private static final long MAX_ACTIONS = 100L; + + private static final long NUM_ACTIONS = 12345L; + + private TransportAddress address; + + @Before + public void fetchTransportAddress() { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + address = nodeInfo.getTransport().getAddress().publishAddress(); + } + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .build(); + } + + private Settings transportClientSettings() { + return Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") + .put("host", address.address().getHostString() + ":" + address.getPort()) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .build(); + } + + public void testDuplicateDocIDs() throws Exception { + final TransportBulkClient client = ClientBuilder.builder() + .put(transportClientSettings()) + .put(ClientBuilder.MAX_CONCURRENT_REQUESTS, 2) // avoid EsRejectedExecutionException + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(TransportBulkClient.class); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < NUM_ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + logger.info("numactions = {}, submitted = {}, succeeded= {}, failed = {}", NUM_ACTIONS, + client.getMetric().getSubmitted().getCount(), + client.getMetric().getSucceeded().getCount(), + client.getMetric().getFailed().getCount()); + assertEquals(NUM_ACTIONS, client.getMetric().getSubmitted().getCount()); + assertEquals(NUM_ACTIONS, client.getMetric().getSucceeded().getCount()); + } + } +} diff --git a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeReplicaTest.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientReplicaTests.java similarity index 54% rename from src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeReplicaTest.java rename to transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientReplicaTests.java index b4fec6b..7168304 100644 --- a/src/integration-test/java/org/xbib/elasticsearch/extras/client/node/BulkNodeReplicaTest.java +++ b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientReplicaTests.java @@ -1,5 +1,11 @@ -package org.xbib.elasticsearch.extras.client.node; +package org.xbib.elasticsearch.client.transport; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndexStats; @@ -9,61 +15,76 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.indexing.IndexingStats; -import org.junit.Test; -import org.xbib.elasticsearch.NodeTestUtils; -import org.xbib.elasticsearch.extras.client.Clients; -import org.xbib.elasticsearch.extras.client.SimpleBulkControl; -import org.xbib.elasticsearch.extras.client.SimpleBulkMetric; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; import java.util.Map; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) +public class TransportBulkClientReplicaTests extends ESIntegTestCase { -public class BulkNodeReplicaTest extends NodeTestUtils { + private static final Logger logger = LogManager.getLogger(TransportBulkClientTests.class.getName()); - private final static ESLogger logger = ESLoggerFactory.getLogger(BulkNodeReplicaTest.class.getSimpleName()); + private String clusterName; + + private TransportAddress address; + + @Before + public void fetchTransportAddress() { + clusterName = client().admin().cluster().prepareClusterStats().get().getClusterName().value(); + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + address = nodeInfo.getTransport().getAddress().publishAddress(); + } + + private Settings ourTransportClientSettings() { + return Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName) + .put("host", address.address().getHostString() + ":" + address.getPort()) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .build(); + } - @Test public void testReplicaLevel() throws Exception { - // we need nodes for replica levels - startNode("2"); - startNode("3"); - startNode("4"); + //ensureStableCluster(4); - Settings settingsTest1 = Settings.settingsBuilder() - .put("index.number_of_shards", 2) - .put("index.number_of_replicas", 3) + Settings settingsTest1 = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 2) .build(); - Settings settingsTest2 = Settings.settingsBuilder() - .put("index.number_of_shards", 2) + Settings settingsTest2 = Settings.builder() + .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) .build(); - final BulkNodeClient client = Clients.builder() + final TransportBulkClient client = ClientBuilder.builder() + .put(ourTransportClientSettings()) .setMetric(new SimpleBulkMetric()) .setControl(new SimpleBulkControl()) - .toBulkNodeClient(client("1")); - + .getClient(TransportBulkClient.class); try { client.newIndex("test1", settingsTest1, null) .newIndex("test2", settingsTest2, null); - client.waitForCluster("GREEN", "30s"); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); for (int i = 0; i < 1234; i++) { - client.index("test1", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + client.index("test1", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); } for (int i = 0; i < 1234; i++) { - client.index("test2", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); + client.index("test2", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); } client.flushIngest(); - client.waitForResponses("30s"); + client.waitForResponses(TimeValue.timeValueSeconds(60)); } catch (NoNodeAvailableException e) { logger.warn("skipping, no node available"); } finally { @@ -76,8 +97,8 @@ public class BulkNodeReplicaTest extends NodeTestUtils { long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); logger.info("query total hits={}", hits); assertEquals(2468, hits); - IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), IndicesStatsAction.INSTANCE) - .all(); + IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), + IndicesStatsAction.INSTANCE).all(); IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); for (Map.Entry m : response.getIndices().entrySet()) { IndexStats indexStats = m.getValue(); @@ -105,5 +126,4 @@ public class BulkNodeReplicaTest extends NodeTestUtils { assertFalse(client.hasThrowable()); } } - } diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientTests.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientTests.java new file mode 100644 index 0000000..5b9375e --- /dev/null +++ b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientTests.java @@ -0,0 +1,255 @@ +package org.xbib.elasticsearch.client.transport; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +public class TransportBulkClientTests extends ESSingleNodeTestCase { + + private static final Logger logger = LogManager.getLogger(TransportBulkClientTests.class.getName()); + + private static final Long MAX_ACTIONS = 10L; + + private static final Long NUM_ACTIONS = 1234L; + + private TransportAddress address; + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(Netty4Plugin.class); + } + + @Override + public Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") + .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) + .build(); + } + + private Settings transportClientSettings() { + return Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") + .put("host", address.address().getHostString() + ":" + address.getPort()) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .build(); + } + + @Before + public void fetchTransportAddress() { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + address = nodeInfo.getTransport().getAddress().publishAddress(); + } + + public void testBulkTransportClientNewIndex() throws Exception { + logger.info("firing up BulkTransportClient"); + final TransportBulkClient client = ClientBuilder.builder() + .put(transportClientSettings()) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(TransportBulkClient.class); + try { + logger.info("creating index"); + client.newIndex("test"); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + logger.info("deleting/creating index: start"); + client.deleteIndex("test") + .newIndex("test") + .deleteIndex("test"); + logger.info("deleting/creating index: end"); + } catch (NoNodeAvailableException e) { + logger.error("no node available"); + } finally { + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + public void testBulkTransportClientMapping() throws Exception { + final TransportBulkClient client = ClientBuilder.builder() + .put(transportClientSettings()) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(TransportBulkClient.class); + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("location") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .endObject(); + client.mapping("test", builder.string()); + client.newIndex("test"); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); + GetMappingsResponse getMappingsResponse = + client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + logger.info("mappings={}", getMappingsResponse.getMappings()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + + public void testBulkTransportClientSingleDoc() throws IOException { + logger.info("firing up BulkTransportClient"); + final TransportBulkClient client = ClientBuilder.builder() + .put(transportClientSettings()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(TransportBulkClient.class); + try { + logger.info("creating index"); + client.newIndex("test"); + logger.info("indexing one doc"); + client.index("test", "test", "1", false, "{ \"name\" : \"Hello World\"}"); // single doc ingest + logger.info("flush"); + client.flushIngest(); + logger.info("wait for responses"); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + logger.info("waited for responses"); + } catch (InterruptedException e) { + // ignore + } catch (ExecutionException e) { + logger.error(e.getMessage(), e); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + assertEquals(1, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + public void testBulkTransportClientRandomDocs() throws Exception { + long numactions = NUM_ACTIONS; + final TransportBulkClient client = ClientBuilder.builder() + .put(transportClientSettings()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(TransportBulkClient.class); + try { + client.newIndex("test"); + for (int i = 0; i < NUM_ACTIONS; i++) { + client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + logger.info("assuring {} == {}", numactions, client.getMetric().getSucceeded().getCount()); + assertEquals(numactions, client.getMetric().getSucceeded().getCount()); + assertFalse(client.hasThrowable()); + client.shutdown(); + } + } + + public void testBulkTransportClientThreadedRandomDocs() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + long maxactions = MAX_ACTIONS; + final long maxloop = NUM_ACTIONS; + logger.info("TransportClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); + final TransportBulkClient client = ClientBuilder.builder() + .put(transportClientSettings()) + .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) + .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) // = effectively disables autoflush for this test + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(TransportBulkClient.class); + try { + client.newIndex("test").startBulk("test", 30 * 1000, 1000); + ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + executorService.execute(() -> { + for (int i1 = 0; i1 < maxloop; i1++) { + client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + latch.countDown(); + }); + } + logger.info("waiting for max 30 seconds..."); + latch.await(30, TimeUnit.SECONDS); + logger.info("client flush ..."); + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + logger.info("executor service to be shut down ..."); + executorService.shutdown(); + logger.info("executor service is shut down"); + client.stopBulk("test"); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("assuring {} == {}", maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) + .setIndices("test") + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(0); + assertEquals(maxthreads * maxloop, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.shutdown(); + } + } +} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientUpdateReplicaLevelTests.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientUpdateReplicaLevelTests.java new file mode 100644 index 0000000..d47ba30 --- /dev/null +++ b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientUpdateReplicaLevelTests.java @@ -0,0 +1,82 @@ +package org.xbib.elasticsearch.client.transport; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; +import org.xbib.elasticsearch.client.ClientBuilder; +import org.xbib.elasticsearch.client.SimpleBulkControl; +import org.xbib.elasticsearch.client.SimpleBulkMetric; + +@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) +@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) +public class TransportBulkClientUpdateReplicaLevelTests extends ESIntegTestCase { + + private static final Logger logger = LogManager.getLogger(TransportBulkClientUpdateReplicaLevelTests.class.getName()); + + private String clusterName; + + private TransportAddress address; + + @Before + public void fetchClusterInfo() { + clusterName = client().admin().cluster().prepareClusterStats().get().getClusterName().value(); + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + address = nodeInfo.getTransport().getAddress().publishAddress(); + } + + private Settings ourTransportClientSettings() { + return Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName) + .put("host", address.address().getHostString() + ":" + address.getPort()) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .build(); + } + + public void testUpdateReplicaLevel() throws Exception { + + //ensureStableCluster(3); + + int shardsAfterReplica; + + Settings settings = Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 0) + .build(); + + final TransportBulkClient client = ClientBuilder.builder() + .put(ourTransportClientSettings()) + .setMetric(new SimpleBulkMetric()) + .setControl(new SimpleBulkControl()) + .getClient(TransportBulkClient.class); + + try { + client.newIndex("replicatest", settings, null); + client.waitForCluster("GREEN", TimeValue.timeValueSeconds(30)); + for (int i = 0; i < 12345; i++) { + client.index("replicatest", "replicatest", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); + } + client.flushIngest(); + client.waitForResponses(TimeValue.timeValueSeconds(30)); + shardsAfterReplica = client.updateReplicaLevel("replicatest", 3); + assertEquals(shardsAfterReplica, 2 * (3 + 1)); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.shutdown(); + if (client.hasThrowable()) { + logger.error("error", client.getThrowable()); + } + assertFalse(client.hasThrowable()); + } + } + +} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/package-info.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/package-info.java new file mode 100644 index 0000000..3b21564 --- /dev/null +++ b/transport/src/test/java/org/xbib/elasticsearch/client/transport/package-info.java @@ -0,0 +1,4 @@ +/** + * Classes for testing the transport client. + */ +package org.xbib.elasticsearch.client.transport;