diff --git a/.travis.yml b/.travis.yml index ee1dfd1..dd9c325 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ language: java sudo: required jdk: - - oraclejdk8 + - openjdk11 cache: directories: - $HOME/.m2 diff --git a/api/build.gradle b/api/build.gradle deleted file mode 100644 index 6344343..0000000 --- a/api/build.gradle +++ /dev/null @@ -1,11 +0,0 @@ - -dependencies { - compile "io.netty:netty-buffer:${rootProject.property('netty.version')}" - compile "io.netty:netty-codec-http:${rootProject.property('netty.version')}" - compile "io.netty:netty-handler:${rootProject.property('netty.version')}" - compile "org.xbib.elasticsearch:elasticsearch:${rootProject.property('elasticsearch-server.version')}" -} - -jar { - baseName "${rootProject.name}-api" -} diff --git a/api/config/checkstyle/checkstyle.xml b/api/config/checkstyle/checkstyle.xml deleted file mode 100644 index 8cb4438..0000000 --- a/api/config/checkstyle/checkstyle.xml +++ /dev/null @@ -1,321 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/api/src/docs/asciidoc/css/foundation.css b/api/src/docs/asciidoc/css/foundation.css deleted file mode 100644 index 27be611..0000000 --- a/api/src/docs/asciidoc/css/foundation.css +++ /dev/null @@ -1,684 +0,0 @@ -/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ -/* ========================================================================== HTML5 display definitions ========================================================================== */ -/** Correct `block` display not defined in IE 8/9. */ -article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } - -/** Correct `inline-block` display not defined in IE 8/9. */ -audio, canvas, video { display: inline-block; } - -/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ -audio:not([controls]) { display: none; height: 0; } - -/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ -[hidden], template { display: none; } - -script { display: none !important; } - -/* ========================================================================== Base ========================================================================== */ -/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ -html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } - -/** Remove default margin. */ -body { margin: 0; } - -/* ========================================================================== Links ========================================================================== */ -/** Remove the gray background color from active links in IE 10. */ -a { background: transparent; } - -/** Address `outline` inconsistency between Chrome and other browsers. */ -a:focus { outline: thin dotted; } - -/** Improve readability when focused and also mouse hovered in all browsers. */ -a:active, a:hover { outline: 0; } - -/* ========================================================================== Typography ========================================================================== */ -/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ -h1 { font-size: 2em; margin: 0.67em 0; } - -/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ -abbr[title] { border-bottom: 1px dotted; } - -/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ -b, strong { font-weight: bold; } - -/** Address styling not present in Safari 5 and Chrome. */ -dfn { font-style: italic; } - -/** Address differences between Firefox and other browsers. */ -hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } - -/** Address styling not present in IE 8/9. */ -mark { background: #ff0; color: #000; } - -/** Correct font family set oddly in Safari 5 and Chrome. */ -code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } - -/** Improve readability of pre-formatted text in all browsers. */ -pre { white-space: pre-wrap; } - -/** Set consistent quote types. */ -q { quotes: "\201C" "\201D" "\2018" "\2019"; } - -/** Address inconsistent and variable font size in all browsers. */ -small { font-size: 80%; } - -/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ -sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } - -sup { top: -0.5em; } - -sub { bottom: -0.25em; } - -/* ========================================================================== Embedded content ========================================================================== */ -/** Remove border when inside `a` element in IE 8/9. */ -img { border: 0; } - -/** Correct overflow displayed oddly in IE 9. */ -svg:not(:root) { overflow: hidden; } - -/* ========================================================================== Figures ========================================================================== */ -/** Address margin not present in IE 8/9 and Safari 5. */ -figure { margin: 0; } - -/* ========================================================================== Forms ========================================================================== */ -/** Define consistent border, margin, and padding. */ -fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } - -/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ -legend { border: 0; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ -button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } - -/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ -button, input { line-height: normal; } - -/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ -button, select { text-transform: none; } - -/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ -button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } - -/** Re-set default cursor for disabled elements. */ -button[disabled], html input[disabled] { cursor: default; } - -/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ -input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ -input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } - -/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ -input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } - -/** Remove inner padding and border in Firefox 4+. */ -button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } - -/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ -textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } - -/* ========================================================================== Tables ========================================================================== */ -/** Remove most spacing between table cells. */ -table { border-collapse: collapse; border-spacing: 0; } - -meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } - -meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } - -meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } - -*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } - -html, body { font-size: 100%; } - -body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } - -a:hover { cursor: pointer; } - -img, object, embed { max-width: 100%; height: auto; } - -object, embed { height: 100%; } - -img { -ms-interpolation-mode: bicubic; } - -#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } - -.left { float: left !important; } - -.right { float: right !important; } - -.text-left { text-align: left !important; } - -.text-right { text-align: right !important; } - -.text-center { text-align: center !important; } - -.text-justify { text-align: justify !important; } - -.hide { display: none; } - -.antialiased { -webkit-font-smoothing: antialiased; } - -img { display: inline-block; vertical-align: middle; } - -textarea { height: auto; min-height: 50px; } - -select { width: 100%; } - -object, svg { display: inline-block; vertical-align: middle; } - -.center { margin-left: auto; margin-right: auto; } - -.spread { width: 100%; } - -p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } - -.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } - -/* Typography resets */ -div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } - -/* Default Link Styles */ -a { color: #2ba6cb; text-decoration: none; line-height: inherit; } -a:hover, a:focus { color: #2795b6; } -a img { border: none; } - -/* Default paragraph styles */ -p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } -p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } - -/* Default header styles */ -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } -h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } - -h1 { font-size: 2.125em; } - -h2 { font-size: 1.6875em; } - -h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } - -h4 { font-size: 1.125em; } - -h5 { font-size: 1.125em; } - -h6 { font-size: 1em; } - -hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } - -/* Helpful Typography Defaults */ -em, i { font-style: italic; line-height: inherit; } - -strong, b { font-weight: bold; line-height: inherit; } - -small { font-size: 60%; line-height: inherit; } - -code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } - -/* Lists */ -ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } - -ul, ol { margin-left: 1.5em; } -ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } - -/* Unordered Lists */ -ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } -ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } -ul.square { list-style-type: square; } -ul.circle { list-style-type: circle; } -ul.disc { list-style-type: disc; } -ul.no-bullet { list-style: none; } - -/* Ordered Lists */ -ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } - -/* Definition Lists */ -dl dt { margin-bottom: 0.3125em; font-weight: bold; } -dl dd { margin-bottom: 1.25em; } - -/* Abbreviations */ -abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } - -abbr { text-transform: none; } - -/* Blockquotes */ -blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } -blockquote cite { display: block; font-size: 0.8125em; color: #555555; } -blockquote cite:before { content: "\2014 \0020"; } -blockquote cite a, blockquote cite a:visited { color: #555555; } - -blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } - -/* Microformats */ -.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } -.vcard li { margin: 0; display: block; } -.vcard .fn { font-weight: bold; font-size: 0.9375em; } - -.vevent .summary { font-weight: bold; } -.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } - -@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - h1 { font-size: 2.75em; } - h2 { font-size: 2.3125em; } - h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } - h4 { font-size: 1.4375em; } } -/* Tables */ -table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } -table thead, table tfoot { background: whitesmoke; font-weight: bold; } -table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } -table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } -table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } -table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } - -body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } - -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - -.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } -.clearfix:after, .float-group:after { clear: both; } - -*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } -*:not(pre) > code.nobreak { word-wrap: normal; } -*:not(pre) > code.nowrap { white-space: nowrap; } - -pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } - -em em { font-style: normal; } - -strong strong { font-weight: normal; } - -.keyseq { color: #555555; } - -kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } - -.keyseq kbd:first-child { margin-left: 0; } - -.keyseq kbd:last-child { margin-right: 0; } - -.menuseq, .menu { color: #090909; } - -b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } - -b.button:before { content: "["; padding: 0 3px 0 2px; } - -b.button:after { content: "]"; padding: 0 2px 0 3px; } - -#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } -#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } -#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } - -#content { margin-top: 1.25em; } - -#content:before { content: none; } - -#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } -#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } -#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } -#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } -#header .details span:first-child { margin-left: -0.125em; } -#header .details span.email a { color: #6f6f6f; } -#header .details br { display: none; } -#header .details br + span:before { content: "\00a0\2013\00a0"; } -#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } -#header .details br + span#revremark:before { content: "\00a0|\00a0"; } -#header #revnumber { text-transform: capitalize; } -#header #revnumber:after { content: "\00a0"; } - -#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } - -#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } -#toc > ul { margin-left: 0.125em; } -#toc ul.sectlevel0 > li > a { font-style: italic; } -#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } -#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } -#toc li { line-height: 1.3334; margin-top: 0.3334em; } -#toc a { text-decoration: none; } -#toc a:active { text-decoration: underline; } - -#toctitle { color: #6f6f6f; font-size: 1.2em; } - -@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } - body.toc2 { padding-left: 15em; padding-right: 0; } - #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } - #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } - #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } - #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } - #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } - body.toc2.toc-right { padding-left: 0; padding-right: 15em; } - body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } -@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } - #toc.toc2 { width: 20em; } - #toc.toc2 #toctitle { font-size: 1.375em; } - #toc.toc2 > ul { font-size: 0.95em; } - #toc.toc2 ul ul { padding-left: 1.25em; } - body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } -#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -#content #toc > :first-child { margin-top: 0; } -#content #toc > :last-child { margin-bottom: 0; } - -#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } - -#footer-text { color: #dddddd; line-height: 1.44; } - -.sect1 { padding-bottom: 0.625em; } - -@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } -.sect1 + .sect1 { border-top: 1px solid #dddddd; } - -#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } -#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } -#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } -#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } -#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } - -.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } - -.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } - -table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } - -.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } - -table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } - -.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } -.admonitionblock > table td.icon { text-align: center; width: 80px; } -.admonitionblock > table td.icon img { max-width: initial; } -.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } -.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } -.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } - -.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } -.exampleblock > .content > :first-child { margin-top: 0; } -.exampleblock > .content > :last-child { margin-bottom: 0; } - -.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -.sidebarblock > :first-child { margin-top: 0; } -.sidebarblock > :last-child { margin-bottom: 0; } -.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } - -.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } - -.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } -.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } - -.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } -.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } -@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } -@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } - -.literalblock.output pre { color: #eeeeee; background-color: black; } - -.listingblock pre.highlightjs { padding: 0; } -.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } - -.listingblock > .content { position: relative; } - -.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } - -.listingblock:hover code[data-lang]:before { display: block; } - -.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } - -.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } - -table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } - -table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } - -table.pyhltable td.code { padding-left: .75em; padding-right: 0; } - -pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } - -pre.pygments .lineno { display: inline-block; margin-right: .25em; } - -table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } - -.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } -.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } -.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } -.quoteblock blockquote { margin: 0; padding: 0; border: 0; } -.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } -.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } -.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } -.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } -.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } -.quoteblock .quoteblock blockquote:before { display: none; } - -.verseblock { margin: 0 1em 1.25em 1em; } -.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } -.verseblock pre strong { font-weight: 400; } -.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } - -.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } -.quoteblock .attribution br, .verseblock .attribution br { display: none; } -.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } - -.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } -.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } -.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } - -table.tableblock { max-width: 100%; border-collapse: separate; } -table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } - -table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } - -table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } - -table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } - -table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } - -table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } - -table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } - -table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } - -table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } - -table.frame-all { border-width: 1px; } - -table.frame-sides { border-width: 0 1px; } - -table.frame-topbot { border-width: 1px 0; } - -th.halign-left, td.halign-left { text-align: left; } - -th.halign-right, td.halign-right { text-align: right; } - -th.halign-center, td.halign-center { text-align: center; } - -th.valign-top, td.valign-top { vertical-align: top; } - -th.valign-bottom, td.valign-bottom { vertical-align: bottom; } - -th.valign-middle, td.valign-middle { vertical-align: middle; } - -table thead th, table tfoot th { font-weight: bold; } - -tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } - -tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } - -p.tableblock > code:only-child { background: none; padding: 0; } - -p.tableblock { font-size: 1em; } - -td > div.verse { white-space: pre; } - -ol { margin-left: 1.75em; } - -ul li ol { margin-left: 1.5em; } - -dl dd { margin-left: 1.125em; } - -dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } - -ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } - -ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } - -ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } - -ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } - -ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } - -ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } -ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } -ul.inline > li > * { display: block; } - -.unstyled dl dt { font-weight: normal; font-style: normal; } - -ol.arabic { list-style-type: decimal; } - -ol.decimal { list-style-type: decimal-leading-zero; } - -ol.loweralpha { list-style-type: lower-alpha; } - -ol.upperalpha { list-style-type: upper-alpha; } - -ol.lowerroman { list-style-type: lower-roman; } - -ol.upperroman { list-style-type: upper-roman; } - -ol.lowergreek { list-style-type: lower-greek; } - -.hdlist > table, .colist > table { border: 0; background: none; } -.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } - -td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } - -td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } - -.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } - -.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } -.colist > table tr > td:first-of-type img { max-width: initial; } -.colist > table tr > td:last-of-type { padding: 0.25em 0; } - -.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } - -.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } -.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } -.imageblock > .title { margin-bottom: 0; } -.imageblock.thumb, .imageblock.th { border-width: 6px; } -.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } - -.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } -.image.left { margin-right: 0.625em; } -.image.right { margin-left: 0.625em; } - -a.image { text-decoration: none; display: inline-block; } -a.image object { pointer-events: none; } - -sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } -sup.footnote a, sup.footnoteref a { text-decoration: none; } -sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } - -#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } -#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } -#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } -#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } -#footnotes .footnote:last-of-type { margin-bottom: 0; } -#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } - -.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } -.gist .file-data > table td.line-data { width: 99%; } - -div.unbreakable { page-break-inside: avoid; } - -.big { font-size: larger; } - -.small { font-size: smaller; } - -.underline { text-decoration: underline; } - -.overline { text-decoration: overline; } - -.line-through { text-decoration: line-through; } - -.aqua { color: #00bfbf; } - -.aqua-background { background-color: #00fafa; } - -.black { color: black; } - -.black-background { background-color: black; } - -.blue { color: #0000bf; } - -.blue-background { background-color: #0000fa; } - -.fuchsia { color: #bf00bf; } - -.fuchsia-background { background-color: #fa00fa; } - -.gray { color: #606060; } - -.gray-background { background-color: #7d7d7d; } - -.green { color: #006000; } - -.green-background { background-color: #007d00; } - -.lime { color: #00bf00; } - -.lime-background { background-color: #00fa00; } - -.maroon { color: #600000; } - -.maroon-background { background-color: #7d0000; } - -.navy { color: #000060; } - -.navy-background { background-color: #00007d; } - -.olive { color: #606000; } - -.olive-background { background-color: #7d7d00; } - -.purple { color: #600060; } - -.purple-background { background-color: #7d007d; } - -.red { color: #bf0000; } - -.red-background { background-color: #fa0000; } - -.silver { color: #909090; } - -.silver-background { background-color: #bcbcbc; } - -.teal { color: #006060; } - -.teal-background { background-color: #007d7d; } - -.white { color: #bfbfbf; } - -.white-background { background-color: #fafafa; } - -.yellow { color: #bfbf00; } - -.yellow-background { background-color: #fafa00; } - -span.icon > .fa { cursor: default; } - -.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } -.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } -.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } -.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } -.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } -.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } - -.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } -.conum[data-value] * { color: #fff !important; } -.conum[data-value] + b { display: none; } -.conum[data-value]:after { content: attr(data-value); } -pre .conum[data-value] { position: relative; top: -0.125em; } - -b.conum * { color: inherit !important; } - -.conum:not([data-value]):empty { display: none; } - -.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/api/src/docs/asciidoclet/overview.adoc b/api/src/docs/asciidoclet/overview.adoc deleted file mode 100644 index 7947331..0000000 --- a/api/src/docs/asciidoclet/overview.adoc +++ /dev/null @@ -1,4 +0,0 @@ -= Elasticsearch Java client -Jörg Prante -Version 5.4.0.0 - diff --git a/backup/XbibTransportService.java b/backup/XbibTransportService.java deleted file mode 100644 index c2dc502..0000000 --- a/backup/XbibTransportService.java +++ /dev/null @@ -1,1047 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.FutureTransportResponseHandler; -import org.elasticsearch.transport.NodeDisconnectedException; -import org.elasticsearch.transport.NodeNotConnectedException; -import org.elasticsearch.transport.PlainTransportFuture; -import org.elasticsearch.transport.ReceiveTimeoutTransportException; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.ResponseHandlerFailureTransportException; -import org.elasticsearch.transport.SendRequestTransportException; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportFuture; -import org.elasticsearch.transport.TransportInterceptor; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportResponseOptions; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ScheduledFuture; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.function.Supplier; - -/** - * - */ -public class XbibTransportService extends AbstractLifecycleComponent { - - private static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; - - private static final Setting> TRACE_LOG_INCLUDE_SETTING = - Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), - Property.Dynamic, Property.NodeScope); - - private static final Setting> TRACE_LOG_EXCLUDE_SETTING = - Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", - TransportLivenessAction.NAME), Function.identity(), Property.Dynamic, Property.NodeScope); - - private final CountDownLatch blockIncomingRequestsLatch = new CountDownLatch(1); - - private final Transport transport; - - private final ThreadPool threadPool; - - private final ClusterName clusterName; - - private final TaskManager taskManager; - - private final TransportInterceptor.AsyncSender asyncSender; - - private final Function localNodeFactory; - - private volatile Map> requestHandlers = Collections.emptyMap(); - - private final Object requestHandlerMutex = new Object(); - - private final ConcurrentMapLong> clientHandlers = - ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - - private final TransportInterceptor interceptor; - - // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they - // do show up, we can print more descriptive information about them - private final Map timeoutInfoHandlers = - Collections.synchronizedMap(new LinkedHashMap(100, .75F, true) { - private static final long serialVersionUID = 9174428975922394994L; - - @Override - protected boolean removeEldestEntry(Map.Entry eldest) { - return size() > 100; - } - }); - - private final Logger tracerLog; - - private volatile String[] tracerLogInclude; - - private volatile String[] tracerLogExclude; - - private volatile DiscoveryNode localNode = null; - - private final Transport.Connection localNodeConnection = new Transport.Connection() { - @Override - public DiscoveryNode getNode() { - return localNode; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - sendLocalRequest(requestId, action, request, options); - } - - @Override - public void close() throws IOException { - } - }; - - /** - * Build the service. - * - * @param clusterSettings if non null the the {@linkplain XbibTransportService} will register - * with the {@link ClusterSettings} for settings updates for - * {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. - */ - XbibTransportService(Settings settings, Transport transport, ThreadPool threadPool, - TransportInterceptor transportInterceptor, - Function localNodeFactory, - @Nullable ClusterSettings clusterSettings) { - super(settings); - this.transport = transport; - this.threadPool = threadPool; - this.localNodeFactory = localNodeFactory; - this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); - setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); - tracerLog = Loggers.getLogger(logger, ".tracer"); - taskManager = createTaskManager(); - this.interceptor = transportInterceptor; - this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); - if (clusterSettings != null) { - clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); - clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); - } - } - - private TaskManager createTaskManager() { - return new TaskManager(settings); - } - - private void setTracerLogInclude(List tracerLogInclude) { - this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); - } - - private void setTracerLogExclude(List tracerLogExclude) { - this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY); - } - - @Override - protected void doStart() { - rxMetric.clear(); - txMetric.clear(); - transport.setTransportService(this); - transport.start(); - if (transport.boundAddress() != null && logger.isInfoEnabled()) { - logger.info("{}", transport.boundAddress()); - for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { - logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); - } - } - localNode = localNodeFactory.apply(transport.boundAddress()); - registerRequestHandler(HANDSHAKE_ACTION_NAME, - () -> HandshakeRequest.INSTANCE, - ThreadPool.Names.SAME, - (request, channel) -> channel.sendResponse(new HandshakeResponse(localNode, clusterName, - localNode.getVersion()))); - } - - @Override - protected void doStop() { - try { - transport.stop(); - } finally { - // in case the transport is not connected to our local node (thus cleaned on node disconnect) - // make sure to clean any leftover on going handles - for (Map.Entry> entry : clientHandlers.entrySet()) { - final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); - if (holderToNotify != null) { - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onRejection(Exception e) { - // if we get rejected during node shutdown we don't wanna bubble it up - logger.debug((Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on rejection, action: {}", - holderToNotify.action()), - e); - } - @Override - public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on exception, action: {}", - holderToNotify.action()), - e); - } - @Override - public void doRun() { - TransportException ex = new TransportException("transport stopped, action: " + - holderToNotify.action()); - holderToNotify.handler().handleException(ex); - } - }); - } - } - } - } - - @Override - protected void doClose() { - transport.close(); - } - - /** - * Start accepting incoming requests. - * when the transport layer starts up it will block any incoming requests until - * this method is called - */ - final void acceptIncomingRequests() { - blockIncomingRequestsLatch.countDown(); - } - - /** - * Returns true iff the given node is already connected. - */ - boolean nodeConnected(DiscoveryNode node) { - return isLocalNode(node) || transport.nodeConnected(node); - } - - /** - * Connect to the specified node. - * - * @param node the node to connect to - */ - void connectToNode(final DiscoveryNode node) { - if (isLocalNode(node)) { - return; - } - transport.connectToNode(node, null, (newConnection, actualProfile) -> - handshake(newConnection, actualProfile.getHandshakeTimeout().millis())); - } - - /** - * Executes a high-level handshake using the given connection - * and returns the discovery node of the node the connection - * was established with. The handshake will fail if the cluster - * name on the target node mismatches the local cluster name. - * - * @param connection the connection to a specific node - * @param handshakeTimeout handshake timeout - * @return the connected node - * @throws ConnectTransportException if the connection failed - * @throws IllegalStateException if the handshake failed - */ - private DiscoveryNode handshake(final Transport.Connection connection, - final long handshakeTimeout) throws ConnectTransportException { - return handshake(connection, handshakeTimeout, clusterName::equals); - } - - /** - * Executes a high-level handshake using the given connection - * and returns the discovery node of the node the connection - * was established with. The handshake will fail if the cluster - * name on the target node doesn't match the local cluster name. - * - * @param connection the connection to a specific node - * @param handshakeTimeout handshake timeout - * @param clusterNamePredicate cluster name validation predicate - * @return the connected node - * @throws ConnectTransportException if the connection failed - * @throws IllegalStateException if the handshake failed - */ - private DiscoveryNode handshake(final Transport.Connection connection, - final long handshakeTimeout, Predicate clusterNamePredicate) - throws ConnectTransportException { - final HandshakeResponse response; - final DiscoveryNode node = connection.getNode(); - try { - PlainTransportFuture futureHandler = new PlainTransportFuture<>( - new FutureTransportResponseHandler() { - @Override - public HandshakeResponse newInstance() { - return new HandshakeResponse(); - } - }); - sendRequest(connection, HANDSHAKE_ACTION_NAME, HandshakeRequest.INSTANCE, - TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), futureHandler); - response = futureHandler.txGet(); - } catch (Exception e) { - throw new IllegalStateException("handshake failed with " + node, e); - } - if (!clusterNamePredicate.test(response.clusterName)) { - throw new IllegalStateException("handshake failed, mismatched cluster name [" + - response.clusterName + "] - " + node); - } else if (!response.version.isCompatible(localNode.getVersion())) { - throw new IllegalStateException("handshake failed, incompatible version [" + - response.version + "] - " + node); - } - return response.discoveryNode; - } - - void disconnectFromNode(DiscoveryNode node) { - if (isLocalNode(node)) { - return; - } - transport.disconnectFromNode(node); - } - - TransportFuture submitRequest(DiscoveryNode node, String action, - TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) - throws TransportException { - PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); - try { - Transport.Connection connection = getConnection(node); - sendRequest(connection, action, request, options, futureHandler); - } catch (NodeNotConnectedException ex) { - futureHandler.handleException(ex); - } - return futureHandler; - } - - final void sendRequest(final DiscoveryNode node, final String action, - final TransportRequest request, - final TransportRequestOptions options, - TransportResponseHandler handler) { - try { - Transport.Connection connection = getConnection(node); - sendRequest(connection, action, request, options, handler); - } catch (NodeNotConnectedException ex) { - handler.handleException(ex); - } - } - - private void sendRequest(final Transport.Connection connection, final String action, - final TransportRequest request, - final TransportRequestOptions options, - TransportResponseHandler handler) { - - asyncSender.sendRequest(connection, action, request, options, handler); - } - - /** - * Returns either a real transport connection or a local node connection - * if we are using the local node optimization. - * @throws NodeNotConnectedException if the given node is not connected - */ - private Transport.Connection getConnection(DiscoveryNode node) { - if (isLocalNode(node)) { - return localNodeConnection; - } else { - return transport.getConnection(node); - } - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private void sendRequestInternal(final Transport.Connection connection, - final String action, - final TransportRequest request, - final TransportRequestOptions options, - TransportResponseHandler handler) { - if (connection == null) { - throw new IllegalStateException("can't send request to a null connection"); - } - DiscoveryNode node = connection.getNode(); - final long requestId = transport.newRequestId(); - final TimeoutHandler timeoutHandler; - try { - if (options.timeout() == null) { - timeoutHandler = null; - } else { - timeoutHandler = new TimeoutHandler(requestId); - } - Supplier storedContextSupplier = - threadPool.getThreadContext().newRestorableContext(true); - TransportResponseHandler responseHandler = - new ContextRestoreResponseHandler<>(storedContextSupplier, handler); - clientHandlers.put(requestId, - new RequestHolder(responseHandler, connection.getNode(), action, timeoutHandler)); - if (lifecycle.stoppedOrClosed()) { - // if we are not started the exception handling will remove the RequestHolder again - // and calls the handler to notify the caller. It will only notify if the toStop code - // hasn't done the work yet. - throw new TransportException("TransportService is closed stopped can't send request"); - } - if (timeoutHandler != null) { - assert options.timeout() != null; - timeoutHandler.future = threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler); - } - connection.sendRequest(requestId, action, request, options); - } catch (final Exception e) { - // usually happen either because we failed to connect to the node - // or because we failed serializing the message - final RequestHolder holderToNotify = clientHandlers.remove(requestId); - // If holderToNotify == null then handler has already been taken care of. - if (holderToNotify != null) { - holderToNotify.cancelTimeout(); - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - final SendRequestTransportException sendRequestException = - new SendRequestTransportException(node, action, e); - threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() { - @Override - public void onRejection(Exception e) { - // if we get rejected during node shutdown we don't wanna bubble it up - logger.debug((Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on rejection, action: {}", - holderToNotify.action()), e); - } - @Override - public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on exception, action: {}", - holderToNotify.action()), e); - } - @Override - protected void doRun() throws Exception { - holderToNotify.handler().handleException(sendRequestException); - } - }); - } else { - logger.debug("Exception while sending request, handler likely already notified due to timeout", e); - } - } - } - - private void sendLocalRequest(long requestId, final String action, final TransportRequest request, - TransportRequestOptions options) { - final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, adapter, - threadPool); - try { - adapter.onRequestSent(localNode, requestId, action, request, options); - adapter.onRequestReceived(requestId, action); - final RequestHandlerRegistry reg = adapter.getRequestHandler(action); - if (reg == null) { - throw new ActionNotFoundTransportException("Action [" + action + "] not found"); - } - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - reg.processMessageReceived(request, channel); - } else { - threadPool.executor(executor).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, channel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn((Supplier) () -> - new ParameterizedMessage("failed to notify channel of error message for action [{}]", - action), inner); - } - } - }); - } - - } catch (Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failed to notify channel of error message for action [{}]", action), inner); - } - } - } - - private boolean shouldTraceAction(String action) { - if (tracerLogInclude.length > 0) { - if (!Regex.simpleMatch(tracerLogInclude, action)) { - return false; - } - } - return tracerLogExclude.length <= 0 || !Regex.simpleMatch(tracerLogExclude, action); - } - - /** - * Registers a new request handler. - * - * @param action the action the request handler is associated with - * @param request the request class that will be used to construct new instances for streaming - * @param executor the executor the request handling will be executed on - * @param handler the handler itself that implements the request handling - */ - private void registerRequestHandler(String action, Supplier request, - String executor, - TransportRequestHandler handler) { - handler = interceptor.interceptHandler(action, executor, false, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( - action, request, taskManager, handler, executor, false, false); - registerRequestHandler(reg); - } - - @SuppressWarnings("unchecked") - private void registerRequestHandler(RequestHandlerRegistry reg) { - synchronized (requestHandlerMutex) { - if (requestHandlers.containsKey(reg.getAction())) { - throw new IllegalArgumentException("transport handlers for action " + - reg.getAction() + " is already registered"); - } - requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), - (RequestHandlerRegistry) reg).immutableMap(); - } - } - - private boolean isLocalNode(DiscoveryNode discoveryNode) { - return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); - } - - static class HandshakeRequest extends TransportRequest { - - static final HandshakeRequest INSTANCE = new HandshakeRequest(); - - private HandshakeRequest() { - } - - } - - /** - * - */ - public static class HandshakeResponse extends TransportResponse { - - private DiscoveryNode discoveryNode; - - private ClusterName clusterName; - - private Version version; - - /** - * For extern construction. - */ - public HandshakeResponse() { - } - - HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { - this.discoveryNode = discoveryNode; - this.version = version; - this.clusterName = clusterName; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - discoveryNode = in.readOptionalWriteable(DiscoveryNode::new); - clusterName = new ClusterName(in); - version = Version.readVersion(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalWriteable(discoveryNode); - clusterName.writeTo(out); - Version.writeVersion(version, out); - } - } - - private final class Adapter implements TransportServiceAdapter { - - final MeanMetric rxMetric = new MeanMetric(); - - final MeanMetric txMetric = new MeanMetric(); - - @Override - public void addBytesReceived(long size) { - rxMetric.inc(size); - } - - @Override - public void addBytesSent(long size) { - txMetric.inc(size); - } - - @Override - public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, - TransportRequestOptions options) { - if (traceEnabled() && shouldTraceAction(action)) { - traceRequestSent(node, requestId, action, options); - } - } - - boolean traceEnabled() { - return tracerLog.isTraceEnabled(); - } - - @Override - public void onResponseSent(long requestId, String action, TransportResponse response, - TransportResponseOptions options) { - if (traceEnabled() && shouldTraceAction(action)) { - traceResponseSent(requestId, action); - } - } - - @Override - public void onResponseSent(long requestId, String action, Exception e) { - if (traceEnabled() && shouldTraceAction(action)) { - traceResponseSent(requestId, action, e); - } - } - - void traceResponseSent(long requestId, String action, Exception e) { - tracerLog.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); - } - - @Override - public void onRequestReceived(long requestId, String action) { - try { - blockIncomingRequestsLatch.await(); - } catch (InterruptedException e) { - logger.trace("interrupted while waiting for incoming requests block to be removed"); - } - if (traceEnabled() && shouldTraceAction(action)) { - traceReceivedRequest(requestId, action); - } - } - - @Override - public RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); - } - - @Override - public TransportResponseHandler onResponseReceived(final long requestId) { - RequestHolder holder = clientHandlers.remove(requestId); - if (holder == null) { - checkForTimeout(requestId); - return null; - } - holder.cancelTimeout(); - if (traceEnabled() && shouldTraceAction(holder.action())) { - traceReceivedResponse(requestId, holder.node(), holder.action()); - } - return holder.handler(); - } - - void checkForTimeout(long requestId) { - // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout - // handling has finished - final DiscoveryNode sourceNode; - final String action; - if (clientHandlers.get(requestId) != null) { - throw new IllegalStateException(); - } - TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); - if (timeoutInfoHolder != null) { - long time = System.currentTimeMillis(); - logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " + - "action [{}], node [{}], id [{}]", time - timeoutInfoHolder.sentTime(), - time - timeoutInfoHolder.timeoutTime(), - timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId); - action = timeoutInfoHolder.action(); - sourceNode = timeoutInfoHolder.node(); - } else { - logger.warn("Transport response handler not found of id [{}]", requestId); - action = null; - sourceNode = null; - } - // call tracer out of lock - if (!traceEnabled()) { - return; - } - if (action == null) { - assert sourceNode == null; - traceUnresolvedResponse(requestId); - } else if (shouldTraceAction(action)) { - traceReceivedResponse(requestId, sourceNode, action); - } - } - - @Override - public void onNodeConnected(final DiscoveryNode node) { - } - - @Override - public void onConnectionOpened(DiscoveryNode node) { - } - - @Override - public void onNodeDisconnected(final DiscoveryNode node) { - try { - for (Map.Entry> entry : clientHandlers.entrySet()) { - RequestHolder holder = entry.getValue(); - if (holder.node().equals(node)) { - final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); - if (holderToNotify != null) { - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - threadPool.generic().execute(() -> holderToNotify.handler() - .handleException(new NodeDisconnectedException(node, - holderToNotify.action()))); - } - } - } - } catch (EsRejectedExecutionException ex) { - logger.debug("Rejected execution on NodeDisconnected", ex); - } - } - - void traceReceivedRequest(long requestId, String action) { - tracerLog.trace("[{}][{}] received request", requestId, action); - } - - void traceResponseSent(long requestId, String action) { - tracerLog.trace("[{}][{}] sent response", requestId, action); - } - - void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { - tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode); - } - - void traceUnresolvedResponse(long requestId) { - tracerLog.trace("[{}] received response but can't resolve it to a request", requestId); - } - - void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { - tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); - } - } - - private final class TimeoutHandler implements Runnable { - - private final long requestId; - - private final long sentTime = System.currentTimeMillis(); - - volatile ScheduledFuture future; - - TimeoutHandler(long requestId) { - this.requestId = requestId; - } - - @Override - public void run() { - // we get first to make sure we only add the TimeoutInfoHandler if needed. - final RequestHolder holder = clientHandlers.get(requestId); - if (holder != null) { - // add it to the timeout information holder, in case we are going to get a response later - long timeoutTime = System.currentTimeMillis(); - timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(holder.node(), holder.action(), sentTime, - timeoutTime)); - // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id - final RequestHolder removedHolder = clientHandlers.remove(requestId); - if (removedHolder != null) { - assert removedHolder == holder : "two different holder instances for request [" + requestId + "]"; - removedHolder.handler().handleException( - new ReceiveTimeoutTransportException(holder.node(), holder.action(), - "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]")); - } else { - // response was processed, remove timeout info. - timeoutInfoHandlers.remove(requestId); - } - } - } - - /** - * Cancels timeout handling. This is a best effort only to avoid running it. - * Remove the requestId from {@link #clientHandlers} to make sure this doesn't run. - */ - void cancel() { - if (clientHandlers.get(requestId) != null) { - throw new IllegalStateException("cancel must be called after the requestId [" + - requestId + "] has been removed from clientHandlers"); - } - FutureUtils.cancel(future); - } - } - - private static class TimeoutInfoHolder { - - private final DiscoveryNode node; - private final String action; - private final long sentTime; - private final long timeoutTime; - - TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { - this.node = node; - this.action = action; - this.sentTime = sentTime; - this.timeoutTime = timeoutTime; - } - - public DiscoveryNode node() { - return node; - } - - String action() { - return action; - } - - long sentTime() { - return sentTime; - } - - long timeoutTime() { - return timeoutTime; - } - } - - private static class RequestHolder { - - private final TransportResponseHandler handler; - - private final DiscoveryNode node; - - private final String action; - - private final TimeoutHandler timeoutHandler; - - RequestHolder(TransportResponseHandler handler, DiscoveryNode node, String action, - TimeoutHandler timeoutHandler) { - this.handler = handler; - this.node = node; - this.action = action; - this.timeoutHandler = timeoutHandler; - } - - TransportResponseHandler handler() { - return handler; - } - - public DiscoveryNode node() { - return this.node; - } - - String action() { - return this.action; - } - - void cancelTimeout() { - if (timeoutHandler != null) { - timeoutHandler.cancel(); - } - } - } - - /** - * This handler wrapper ensures that the response thread executes with the correct thread context. - * Before any of the handle methods are invoked we restore the context. - * @param thr transport response type - */ - public static final class ContextRestoreResponseHandler - implements TransportResponseHandler { - - private final TransportResponseHandler delegate; - - private final Supplier contextSupplier; - - ContextRestoreResponseHandler(Supplier contextSupplier, - TransportResponseHandler delegate) { - this.delegate = delegate; - this.contextSupplier = contextSupplier; - } - - @Override - public T newInstance() { - return delegate.newInstance(); - } - - @SuppressWarnings("try") - @Override - public void handleResponse(T response) { - try (ThreadContext.StoredContext ignore = contextSupplier.get()) { - delegate.handleResponse(response); - } - } - - @SuppressWarnings("try") - @Override - public void handleException(TransportException exp) { - try (ThreadContext.StoredContext ignore = contextSupplier.get()) { - delegate.handleException(exp); - } - } - - @Override - public String executor() { - return delegate.executor(); - } - - @Override - public String toString() { - return getClass().getName() + "/" + delegate.toString(); - } - - } - - static class DirectResponseChannel implements TransportChannel { - - private static final String DIRECT_RESPONSE_PROFILE = ".direct"; - - private final Logger logger; - - private final DiscoveryNode localNode; - - private final String action; - - private final long requestId; - - private final TransportServiceAdapter adapter; - - private final ThreadPool threadPool; - - DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, - TransportServiceAdapter adapter, ThreadPool threadPool) { - this.logger = logger; - this.localNode = localNode; - this.action = action; - this.requestId = requestId; - this.adapter = adapter; - this.threadPool = threadPool; - } - - @Override - public String action() { - return action; - } - - @Override - public String getProfileName() { - return DIRECT_RESPONSE_PROFILE; - } - - @Override - public void sendResponse(TransportResponse response) throws IOException { - sendResponse(response, TransportResponseOptions.EMPTY); - } - - @SuppressWarnings("unchecked") - @Override - public void sendResponse(final TransportResponse response, TransportResponseOptions options) - throws IOException { - adapter.onResponseSent(requestId, action, response, options); - final TransportResponseHandler handler = adapter.onResponseReceived(requestId); - if (handler != null) { - final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { - processResponse(handler, response); - } else { - threadPool.executor(executor).execute(() -> processResponse(handler, response)); - } - } - } - - void processResponse(TransportResponseHandler handler, TransportResponse response) { - try { - handler.handleResponse(response); - } catch (Exception e) { - processException(handler, wrapInRemote(new ResponseHandlerFailureTransportException(e))); - } - } - - @SuppressWarnings("unchecked") - @Override - public void sendResponse(Exception exception) throws IOException { - adapter.onResponseSent(requestId, action, exception); - final TransportResponseHandler handler = adapter.onResponseReceived(requestId); - if (handler != null) { - final RemoteTransportException rtx = wrapInRemote(exception); - final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { - processException(handler, rtx); - } else { - threadPool.executor(handler.executor()).execute(() -> processException(handler, rtx)); - } - } - } - - RemoteTransportException wrapInRemote(Exception e) { - if (e instanceof RemoteTransportException) { - return (RemoteTransportException) e; - } - return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); - } - - void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) { - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage( - "failed to handle exception for action [{}], handler [{}]", action, handler), e); - } - } - - @Override - public long getRequestId() { - return requestId; - } - - @Override - public String getChannelType() { - return "direct"; - } - - @Override - public Version getVersion() { - return localNode.getVersion(); - } - } -} diff --git a/build.gradle b/build.gradle index 712808f..121df15 100644 --- a/build.gradle +++ b/build.gradle @@ -1,27 +1,13 @@ -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter - -buildscript { - repositories { - jcenter() - maven { - url 'http://xbib.org/repository' - } - } - dependencies { - classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" - } -} plugins { id "org.sonarqube" version "2.6.1" id "io.codearte.nexus-staging" version "0.11.0" + id "com.github.spotbugs" version "1.6.9" id "org.xbib.gradle.plugin.asciidoctor" version "1.6.0.1" } -printf "Date: %s\nHost: %s\nOS: %s %s %s\nJava: %s %s %s %s\nGradle: %s Groovy: %s Java: %s\n" + +printf "Host: %s\nOS: %s %s %s\nJava: %s %s %s %s\nGradle: %s Groovy: %s Java: %s\n" + "Build: group: ${project.group} name: ${project.name} version: ${project.version}\n", - ZonedDateTime.now().format(DateTimeFormatter.ISO_DATE_TIME), InetAddress.getLocalHost(), System.getProperty("os.name"), System.getProperty("os.arch"), @@ -33,31 +19,28 @@ printf "Date: %s\nHost: %s\nOS: %s %s %s\nJava: %s %s %s %s\nGradle: %s Groovy: gradle.gradleVersion, GroovySystem.getVersion(), JavaVersion.current() -apply plugin: "io.codearte.nexus-staging" -apply plugin: 'org.xbib.gradle.plugin.asciidoctor' - -ext { - user = 'jprante' - name = 'elx' - description = 'Elasticsearch extensions' - scmUrl = 'https://github.com/' + user + '/' + name - scmConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' - scmDeveloperConnection = 'scm:git:git://github.com/' + user + '/' + name + '.git' +if (JavaVersion.current() < JavaVersion.VERSION_11) { + throw new GradleException("This build must be run with java 11 or higher") } subprojects { apply plugin: 'java' apply plugin: 'maven' apply plugin: 'signing' + apply plugin: 'com.github.spotbugs' + apply plugin: 'pmd' + apply plugin: 'checkstyle' + apply plugin: 'org.xbib.gradle.plugin.asciidoctor' configurations { - wagon - alpnagent asciidoclet + wagon } dependencies { - alpnagent "org.mortbay.jetty.alpn:jetty-alpn-agent:${project.property('alpnagent.version')}" + testCompile "junit:junit:${project.property('junit.version')}" + testCompile "org.apache.logging.log4j:log4j-core:${project.property('log4j.version')}" + testCompile "org.apache.logging.log4j:log4j-slf4j-impl:${project.property('log4j.version')}" asciidoclet "org.xbib:asciidoclet:${project.property('asciidoclet.version')}" wagon "org.apache.maven.wagon:wagon-ssh:${project.property('wagon.version')}" } @@ -71,10 +54,32 @@ subprojects { targetCompatibility = JavaVersion.VERSION_11 } - jar { - baseName "${rootProject.name}-${project.name}" + tasks.withType(JavaCompile) { + options.compilerArgs << "-Xlint:all" + if (!options.compilerArgs.contains("-processor")) { + options.compilerArgs << '-proc:none' + } } + test { + jvmArgs =[ + '--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED', + '--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED', + '--add-opens=java.base/java.nio=ALL-UNNAMED' + ] + systemProperty 'jna.debug_load', 'true' + testLogging { + showStandardStreams = true + exceptionFormat = 'full' + } + } + + clean { + delete "data" + delete "logs" + delete "out" + } + /*javadoc { options.docletpath = configurations.asciidoclet.files.asType(List) options.doclet = 'org.xbib.asciidoclet.Asciidoclet' @@ -105,16 +110,76 @@ subprojects { archives javadocJar, sourcesJar }*/ + task javadocJar(type: Jar, dependsOn: javadoc) { + classifier 'javadoc' + } + + task sourcesJar(type: Jar, dependsOn: classes) { + from sourceSets.main.allSource + classifier 'sources' + } + + artifacts { + archives javadocJar, sourcesJar + } + if (project.hasProperty('signing.keyId')) { signing { sign configurations.archives } } - apply from: "${rootProject.projectDir}/gradle/ext.gradle" apply from: "${rootProject.projectDir}/gradle/publish.gradle" - //apply from: "${rootProject.projectDir}/gradle/sonarqube.gradle" + spotbugs { + effort = "max" + reportLevel = "low" + //includeFilter = file("findbugs-exclude.xml") + } + + tasks.withType(com.github.spotbugs.SpotBugsTask) { + ignoreFailures = true + reports { + xml.enabled = false + html.enabled = true + } + } + + tasks.withType(Pmd) { + ignoreFailures = true + reports { + xml.enabled = true + html.enabled = true + } + } + tasks.withType(Checkstyle) { + ignoreFailures = true + reports { + xml.enabled = true + html.enabled = true + } + } + + pmd { + toolVersion = '6.11.0' + ruleSets = ['category/java/bestpractices.xml'] + } + + checkstyle { + configFile = rootProject.file('config/checkstyle/checkstyle.xml') + ignoreFailures = true + showViolations = true + } + + sonarqube { + properties { + property "sonar.projectName", "${project.group} ${project.name}" + property "sonar.sourceEncoding", "UTF-8" + property "sonar.tests", "src/test/java" + property "sonar.scm.provider", "git" + property "sonar.junit.reportsPath", "build/test-results/test/" + } + } } /*asciidoctor { diff --git a/common/build.gradle b/common/build.gradle deleted file mode 100644 index 5e961d5..0000000 --- a/common/build.gradle +++ /dev/null @@ -1,70 +0,0 @@ -buildscript { - repositories { - jcenter() - maven { - url 'http://xbib.org/repository' - } - } - dependencies { - classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" - } -} - -apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' - -configurations { - main - tests -} - -dependencies { - compile project(':api') - compile "org.xbib:metrics:${project.property('xbib-metrics.version')}" - compileOnly "org.apache.logging.log4j:log4j-api:${project.property('log4j.version')}" - testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" - testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" -} - -jar { - baseName "${rootProject.name}-common" -} - -/* -task testJar(type: Jar, dependsOn: testClasses) { - baseName = "${project.archivesBaseName}-tests" - from sourceSets.test.output -} -*/ - -artifacts { - main jar - tests testJar - archives sourcesJar, javadocJar -} - -test { - enabled = false - //jvmArgs "-javaagent:" + configurations.alpnagent.asPath - systemProperty 'path.home', project.buildDir.absolutePath - testLogging { - showStandardStreams = true - exceptionFormat = 'full' - } -} - -randomizedTest { - enabled = false -} - -esTest { - // test with the jars, not the classes, for security manager - // classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files - systemProperty 'tests.security.manager', 'true' -} -esTest.dependsOn jar, testJar - -dependencyLicenses.enabled = false - -// we not like to examine Netty -thirdPartyAudit.enabled = false - diff --git a/common/config/checkstyle/checkstyle.xml b/common/config/checkstyle/checkstyle.xml deleted file mode 100644 index 8cb4438..0000000 --- a/common/config/checkstyle/checkstyle.xml +++ /dev/null @@ -1,321 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/common/licenses/classloader-6.3.2.1.jar.sha1 b/common/licenses/classloader-6.3.2.1.jar.sha1 deleted file mode 100644 index c959ad5..0000000 --- a/common/licenses/classloader-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f14124d1557cd7c21742f09cd18913a861125e56 \ No newline at end of file diff --git a/common/licenses/elasticsearch-6.3.2.1.jar.sha1 b/common/licenses/elasticsearch-6.3.2.1.jar.sha1 deleted file mode 100644 index 7f6a7c3..0000000 --- a/common/licenses/elasticsearch-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2bc144784abc748426b125a948b0bdd4fc4dd7d6 \ No newline at end of file diff --git a/common/licenses/elx-api-6.3.2.0.jar.sha1 b/common/licenses/elx-api-6.3.2.0.jar.sha1 deleted file mode 100644 index 06a07c6..0000000 --- a/common/licenses/elx-api-6.3.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af8cf6c3e7de988bbb7e6e441a2235ba1df8eaf8 \ No newline at end of file diff --git a/common/licenses/elx-api-LICENSE.txt b/common/licenses/elx-api-LICENSE.txt deleted file mode 100644 index d645695..0000000 --- a/common/licenses/elx-api-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/common/licenses/hdrhistogram-6.3.2.1.jar.sha1 b/common/licenses/hdrhistogram-6.3.2.1.jar.sha1 deleted file mode 100644 index 72d7e23..0000000 --- a/common/licenses/hdrhistogram-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -436454f1e6e821f6f18def7a2e4b467eeb341430 \ No newline at end of file diff --git a/common/licenses/hppc-6.3.2.1.jar.sha1 b/common/licenses/hppc-6.3.2.1.jar.sha1 deleted file mode 100644 index 55b3ead..0000000 --- a/common/licenses/hppc-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80ef947c9edfaacb261ee27e2c7fa5968b3eeaa6 \ No newline at end of file diff --git a/common/licenses/jackson-6.3.2.1.jar.sha1 b/common/licenses/jackson-6.3.2.1.jar.sha1 deleted file mode 100644 index d2c2967..0000000 --- a/common/licenses/jackson-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abf31b393745f2a6e133819ee7485420d6bc5160 \ No newline at end of file diff --git a/common/licenses/jna-6.3.2.1.jar.sha1 b/common/licenses/jna-6.3.2.1.jar.sha1 deleted file mode 100644 index 5142c47..0000000 --- a/common/licenses/jna-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68463acec824eb54989fcecbe44074a41ee639e3 \ No newline at end of file diff --git a/common/licenses/joda-6.3.2.1.jar.sha1 b/common/licenses/joda-6.3.2.1.jar.sha1 deleted file mode 100644 index aade01f..0000000 --- a/common/licenses/joda-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c232fdaf23b8c7b1ff1ca1ba9b91fcc0fa01938 \ No newline at end of file diff --git a/common/licenses/joptsimple-6.3.2.1.jar.sha1 b/common/licenses/joptsimple-6.3.2.1.jar.sha1 deleted file mode 100644 index 6059409..0000000 --- a/common/licenses/joptsimple-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7834ee69f91a3360f17a31cf6a27b245a3a2f668 \ No newline at end of file diff --git a/common/licenses/jts-6.3.2.1.jar.sha1 b/common/licenses/jts-6.3.2.1.jar.sha1 deleted file mode 100644 index 8d0ab58..0000000 --- a/common/licenses/jts-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b10c7f51ab98e6f6f252c931534edbb632cb108e \ No newline at end of file diff --git a/common/licenses/log4j-6.3.2.1.jar.sha1 b/common/licenses/log4j-6.3.2.1.jar.sha1 deleted file mode 100644 index c6f346a..0000000 --- a/common/licenses/log4j-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -824c180dc70fda00b70a146d2f2be9a8f36cfdbb \ No newline at end of file diff --git a/common/licenses/lucene-6.3.2.1.jar.sha1 b/common/licenses/lucene-6.3.2.1.jar.sha1 deleted file mode 100644 index bee6197..0000000 --- a/common/licenses/lucene-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68fba4b570c4717cda49a3f187e2bfb909697fc8 \ No newline at end of file diff --git a/common/licenses/metrics-1.1.0.jar.sha1 b/common/licenses/metrics-1.1.0.jar.sha1 deleted file mode 100644 index 959a34a..0000000 --- a/common/licenses/metrics-1.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8949a50a223ab837edc312e34ee597febe86464 \ No newline at end of file diff --git a/common/licenses/netty-buffer-4.1.33.Final.jar.sha1 b/common/licenses/netty-buffer-4.1.33.Final.jar.sha1 deleted file mode 100644 index 6bab3bb..0000000 --- a/common/licenses/netty-buffer-4.1.33.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0d4fdb13d5832a0f348e4d855c71201a2b15d560 \ No newline at end of file diff --git a/common/licenses/netty-codec-4.1.33.Final.jar.sha1 b/common/licenses/netty-codec-4.1.33.Final.jar.sha1 deleted file mode 100644 index e103a84..0000000 --- a/common/licenses/netty-codec-4.1.33.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -600762bf6861fa62b061782debb6fcdeff1f1984 \ No newline at end of file diff --git a/common/licenses/netty-codec-http-4.1.33.Final.jar.sha1 b/common/licenses/netty-codec-http-4.1.33.Final.jar.sha1 deleted file mode 100644 index b5a8826..0000000 --- a/common/licenses/netty-codec-http-4.1.33.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad557dffc0777b1b24558d6c57b77b0198dbb58d \ No newline at end of file diff --git a/common/licenses/netty-common-4.1.33.Final.jar.sha1 b/common/licenses/netty-common-4.1.33.Final.jar.sha1 deleted file mode 100644 index 22d10fa..0000000 --- a/common/licenses/netty-common-4.1.33.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -398b725cbaca8c691b74759ae6c3d69b8eeb0574 \ No newline at end of file diff --git a/common/licenses/netty-handler-4.1.33.Final.jar.sha1 b/common/licenses/netty-handler-4.1.33.Final.jar.sha1 deleted file mode 100644 index 8d86585..0000000 --- a/common/licenses/netty-handler-4.1.33.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3bcc2db64f7b0ebacba552aff319b41962c2df96 \ No newline at end of file diff --git a/common/licenses/netty-resolver-4.1.33.Final.jar.sha1 b/common/licenses/netty-resolver-4.1.33.Final.jar.sha1 deleted file mode 100644 index 3b12aa7..0000000 --- a/common/licenses/netty-resolver-4.1.33.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3873f5ed509b5c169fb7cbaf34b694d8c748926 \ No newline at end of file diff --git a/common/licenses/netty-transport-4.1.33.Final.jar.sha1 b/common/licenses/netty-transport-4.1.33.Final.jar.sha1 deleted file mode 100644 index fdad609..0000000 --- a/common/licenses/netty-transport-4.1.33.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b9f9af72dfcd8464c16169670d52c6dc5fe65897 \ No newline at end of file diff --git a/common/licenses/noggit-6.3.2.1.jar.sha1 b/common/licenses/noggit-6.3.2.1.jar.sha1 deleted file mode 100644 index ac01817..0000000 --- a/common/licenses/noggit-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe9c516ca4ead60f713eceb398e6f636b83d0a5b \ No newline at end of file diff --git a/common/licenses/s2geo-6.3.2.1.jar.sha1 b/common/licenses/s2geo-6.3.2.1.jar.sha1 deleted file mode 100644 index 2ec741a..0000000 --- a/common/licenses/s2geo-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1bd19c1f50b6764f104cdcbfa3f01b1b3bb2045 \ No newline at end of file diff --git a/common/licenses/securesm-6.3.2.1.jar.sha1 b/common/licenses/securesm-6.3.2.1.jar.sha1 deleted file mode 100644 index 9632107..0000000 --- a/common/licenses/securesm-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ce2c501f3c72eb1099467d708b9c134ed0b7bb2a \ No newline at end of file diff --git a/common/licenses/snakeyaml-6.3.2.1.jar.sha1 b/common/licenses/snakeyaml-6.3.2.1.jar.sha1 deleted file mode 100644 index 2f3cc62..0000000 --- a/common/licenses/snakeyaml-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -62a48b60b17e6d2a823439a5e68f31ef196f11e7 \ No newline at end of file diff --git a/common/licenses/spatial4j-6.3.2.1.jar.sha1 b/common/licenses/spatial4j-6.3.2.1.jar.sha1 deleted file mode 100644 index 5c26ca0..0000000 --- a/common/licenses/spatial4j-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02855ff60b4cecf9dd15e6e91e3cc0902d2e7eac \ No newline at end of file diff --git a/common/licenses/tdigest-6.3.2.1.jar.sha1 b/common/licenses/tdigest-6.3.2.1.jar.sha1 deleted file mode 100644 index 9ae2373..0000000 --- a/common/licenses/tdigest-6.3.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d28517afc71abe5b7f224944280d5f03ed2f2cc \ No newline at end of file diff --git a/common/src/docs/asciidoc/css/foundation.css b/common/src/docs/asciidoc/css/foundation.css deleted file mode 100644 index 27be611..0000000 --- a/common/src/docs/asciidoc/css/foundation.css +++ /dev/null @@ -1,684 +0,0 @@ -/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ -/* ========================================================================== HTML5 display definitions ========================================================================== */ -/** Correct `block` display not defined in IE 8/9. */ -article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } - -/** Correct `inline-block` display not defined in IE 8/9. */ -audio, canvas, video { display: inline-block; } - -/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ -audio:not([controls]) { display: none; height: 0; } - -/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ -[hidden], template { display: none; } - -script { display: none !important; } - -/* ========================================================================== Base ========================================================================== */ -/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ -html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } - -/** Remove default margin. */ -body { margin: 0; } - -/* ========================================================================== Links ========================================================================== */ -/** Remove the gray background color from active links in IE 10. */ -a { background: transparent; } - -/** Address `outline` inconsistency between Chrome and other browsers. */ -a:focus { outline: thin dotted; } - -/** Improve readability when focused and also mouse hovered in all browsers. */ -a:active, a:hover { outline: 0; } - -/* ========================================================================== Typography ========================================================================== */ -/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ -h1 { font-size: 2em; margin: 0.67em 0; } - -/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ -abbr[title] { border-bottom: 1px dotted; } - -/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ -b, strong { font-weight: bold; } - -/** Address styling not present in Safari 5 and Chrome. */ -dfn { font-style: italic; } - -/** Address differences between Firefox and other browsers. */ -hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } - -/** Address styling not present in IE 8/9. */ -mark { background: #ff0; color: #000; } - -/** Correct font family set oddly in Safari 5 and Chrome. */ -code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } - -/** Improve readability of pre-formatted text in all browsers. */ -pre { white-space: pre-wrap; } - -/** Set consistent quote types. */ -q { quotes: "\201C" "\201D" "\2018" "\2019"; } - -/** Address inconsistent and variable font size in all browsers. */ -small { font-size: 80%; } - -/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ -sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } - -sup { top: -0.5em; } - -sub { bottom: -0.25em; } - -/* ========================================================================== Embedded content ========================================================================== */ -/** Remove border when inside `a` element in IE 8/9. */ -img { border: 0; } - -/** Correct overflow displayed oddly in IE 9. */ -svg:not(:root) { overflow: hidden; } - -/* ========================================================================== Figures ========================================================================== */ -/** Address margin not present in IE 8/9 and Safari 5. */ -figure { margin: 0; } - -/* ========================================================================== Forms ========================================================================== */ -/** Define consistent border, margin, and padding. */ -fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } - -/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ -legend { border: 0; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ -button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } - -/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ -button, input { line-height: normal; } - -/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ -button, select { text-transform: none; } - -/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ -button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } - -/** Re-set default cursor for disabled elements. */ -button[disabled], html input[disabled] { cursor: default; } - -/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ -input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ -input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } - -/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ -input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } - -/** Remove inner padding and border in Firefox 4+. */ -button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } - -/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ -textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } - -/* ========================================================================== Tables ========================================================================== */ -/** Remove most spacing between table cells. */ -table { border-collapse: collapse; border-spacing: 0; } - -meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } - -meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } - -meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } - -*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } - -html, body { font-size: 100%; } - -body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } - -a:hover { cursor: pointer; } - -img, object, embed { max-width: 100%; height: auto; } - -object, embed { height: 100%; } - -img { -ms-interpolation-mode: bicubic; } - -#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } - -.left { float: left !important; } - -.right { float: right !important; } - -.text-left { text-align: left !important; } - -.text-right { text-align: right !important; } - -.text-center { text-align: center !important; } - -.text-justify { text-align: justify !important; } - -.hide { display: none; } - -.antialiased { -webkit-font-smoothing: antialiased; } - -img { display: inline-block; vertical-align: middle; } - -textarea { height: auto; min-height: 50px; } - -select { width: 100%; } - -object, svg { display: inline-block; vertical-align: middle; } - -.center { margin-left: auto; margin-right: auto; } - -.spread { width: 100%; } - -p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } - -.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } - -/* Typography resets */ -div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } - -/* Default Link Styles */ -a { color: #2ba6cb; text-decoration: none; line-height: inherit; } -a:hover, a:focus { color: #2795b6; } -a img { border: none; } - -/* Default paragraph styles */ -p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } -p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } - -/* Default header styles */ -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } -h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } - -h1 { font-size: 2.125em; } - -h2 { font-size: 1.6875em; } - -h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } - -h4 { font-size: 1.125em; } - -h5 { font-size: 1.125em; } - -h6 { font-size: 1em; } - -hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } - -/* Helpful Typography Defaults */ -em, i { font-style: italic; line-height: inherit; } - -strong, b { font-weight: bold; line-height: inherit; } - -small { font-size: 60%; line-height: inherit; } - -code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } - -/* Lists */ -ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } - -ul, ol { margin-left: 1.5em; } -ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } - -/* Unordered Lists */ -ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } -ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } -ul.square { list-style-type: square; } -ul.circle { list-style-type: circle; } -ul.disc { list-style-type: disc; } -ul.no-bullet { list-style: none; } - -/* Ordered Lists */ -ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } - -/* Definition Lists */ -dl dt { margin-bottom: 0.3125em; font-weight: bold; } -dl dd { margin-bottom: 1.25em; } - -/* Abbreviations */ -abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } - -abbr { text-transform: none; } - -/* Blockquotes */ -blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } -blockquote cite { display: block; font-size: 0.8125em; color: #555555; } -blockquote cite:before { content: "\2014 \0020"; } -blockquote cite a, blockquote cite a:visited { color: #555555; } - -blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } - -/* Microformats */ -.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } -.vcard li { margin: 0; display: block; } -.vcard .fn { font-weight: bold; font-size: 0.9375em; } - -.vevent .summary { font-weight: bold; } -.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } - -@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - h1 { font-size: 2.75em; } - h2 { font-size: 2.3125em; } - h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } - h4 { font-size: 1.4375em; } } -/* Tables */ -table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } -table thead, table tfoot { background: whitesmoke; font-weight: bold; } -table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } -table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } -table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } -table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } - -body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } - -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - -.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } -.clearfix:after, .float-group:after { clear: both; } - -*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } -*:not(pre) > code.nobreak { word-wrap: normal; } -*:not(pre) > code.nowrap { white-space: nowrap; } - -pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } - -em em { font-style: normal; } - -strong strong { font-weight: normal; } - -.keyseq { color: #555555; } - -kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } - -.keyseq kbd:first-child { margin-left: 0; } - -.keyseq kbd:last-child { margin-right: 0; } - -.menuseq, .menu { color: #090909; } - -b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } - -b.button:before { content: "["; padding: 0 3px 0 2px; } - -b.button:after { content: "]"; padding: 0 2px 0 3px; } - -#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } -#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } -#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } - -#content { margin-top: 1.25em; } - -#content:before { content: none; } - -#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } -#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } -#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } -#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } -#header .details span:first-child { margin-left: -0.125em; } -#header .details span.email a { color: #6f6f6f; } -#header .details br { display: none; } -#header .details br + span:before { content: "\00a0\2013\00a0"; } -#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } -#header .details br + span#revremark:before { content: "\00a0|\00a0"; } -#header #revnumber { text-transform: capitalize; } -#header #revnumber:after { content: "\00a0"; } - -#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } - -#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } -#toc > ul { margin-left: 0.125em; } -#toc ul.sectlevel0 > li > a { font-style: italic; } -#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } -#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } -#toc li { line-height: 1.3334; margin-top: 0.3334em; } -#toc a { text-decoration: none; } -#toc a:active { text-decoration: underline; } - -#toctitle { color: #6f6f6f; font-size: 1.2em; } - -@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } - body.toc2 { padding-left: 15em; padding-right: 0; } - #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } - #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } - #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } - #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } - #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } - body.toc2.toc-right { padding-left: 0; padding-right: 15em; } - body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } -@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } - #toc.toc2 { width: 20em; } - #toc.toc2 #toctitle { font-size: 1.375em; } - #toc.toc2 > ul { font-size: 0.95em; } - #toc.toc2 ul ul { padding-left: 1.25em; } - body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } -#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -#content #toc > :first-child { margin-top: 0; } -#content #toc > :last-child { margin-bottom: 0; } - -#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } - -#footer-text { color: #dddddd; line-height: 1.44; } - -.sect1 { padding-bottom: 0.625em; } - -@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } -.sect1 + .sect1 { border-top: 1px solid #dddddd; } - -#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } -#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } -#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } -#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } -#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } - -.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } - -.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } - -table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } - -.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } - -table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } - -.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } -.admonitionblock > table td.icon { text-align: center; width: 80px; } -.admonitionblock > table td.icon img { max-width: initial; } -.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } -.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } -.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } - -.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } -.exampleblock > .content > :first-child { margin-top: 0; } -.exampleblock > .content > :last-child { margin-bottom: 0; } - -.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -.sidebarblock > :first-child { margin-top: 0; } -.sidebarblock > :last-child { margin-bottom: 0; } -.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } - -.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } - -.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } -.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } - -.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } -.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } -@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } -@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } - -.literalblock.output pre { color: #eeeeee; background-color: black; } - -.listingblock pre.highlightjs { padding: 0; } -.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } - -.listingblock > .content { position: relative; } - -.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } - -.listingblock:hover code[data-lang]:before { display: block; } - -.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } - -.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } - -table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } - -table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } - -table.pyhltable td.code { padding-left: .75em; padding-right: 0; } - -pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } - -pre.pygments .lineno { display: inline-block; margin-right: .25em; } - -table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } - -.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } -.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } -.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } -.quoteblock blockquote { margin: 0; padding: 0; border: 0; } -.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } -.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } -.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } -.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } -.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } -.quoteblock .quoteblock blockquote:before { display: none; } - -.verseblock { margin: 0 1em 1.25em 1em; } -.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } -.verseblock pre strong { font-weight: 400; } -.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } - -.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } -.quoteblock .attribution br, .verseblock .attribution br { display: none; } -.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } - -.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } -.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } -.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } - -table.tableblock { max-width: 100%; border-collapse: separate; } -table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } - -table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } - -table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } - -table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } - -table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } - -table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } - -table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } - -table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } - -table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } - -table.frame-all { border-width: 1px; } - -table.frame-sides { border-width: 0 1px; } - -table.frame-topbot { border-width: 1px 0; } - -th.halign-left, td.halign-left { text-align: left; } - -th.halign-right, td.halign-right { text-align: right; } - -th.halign-center, td.halign-center { text-align: center; } - -th.valign-top, td.valign-top { vertical-align: top; } - -th.valign-bottom, td.valign-bottom { vertical-align: bottom; } - -th.valign-middle, td.valign-middle { vertical-align: middle; } - -table thead th, table tfoot th { font-weight: bold; } - -tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } - -tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } - -p.tableblock > code:only-child { background: none; padding: 0; } - -p.tableblock { font-size: 1em; } - -td > div.verse { white-space: pre; } - -ol { margin-left: 1.75em; } - -ul li ol { margin-left: 1.5em; } - -dl dd { margin-left: 1.125em; } - -dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } - -ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } - -ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } - -ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } - -ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } - -ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } - -ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } -ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } -ul.inline > li > * { display: block; } - -.unstyled dl dt { font-weight: normal; font-style: normal; } - -ol.arabic { list-style-type: decimal; } - -ol.decimal { list-style-type: decimal-leading-zero; } - -ol.loweralpha { list-style-type: lower-alpha; } - -ol.upperalpha { list-style-type: upper-alpha; } - -ol.lowerroman { list-style-type: lower-roman; } - -ol.upperroman { list-style-type: upper-roman; } - -ol.lowergreek { list-style-type: lower-greek; } - -.hdlist > table, .colist > table { border: 0; background: none; } -.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } - -td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } - -td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } - -.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } - -.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } -.colist > table tr > td:first-of-type img { max-width: initial; } -.colist > table tr > td:last-of-type { padding: 0.25em 0; } - -.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } - -.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } -.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } -.imageblock > .title { margin-bottom: 0; } -.imageblock.thumb, .imageblock.th { border-width: 6px; } -.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } - -.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } -.image.left { margin-right: 0.625em; } -.image.right { margin-left: 0.625em; } - -a.image { text-decoration: none; display: inline-block; } -a.image object { pointer-events: none; } - -sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } -sup.footnote a, sup.footnoteref a { text-decoration: none; } -sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } - -#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } -#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } -#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } -#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } -#footnotes .footnote:last-of-type { margin-bottom: 0; } -#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } - -.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } -.gist .file-data > table td.line-data { width: 99%; } - -div.unbreakable { page-break-inside: avoid; } - -.big { font-size: larger; } - -.small { font-size: smaller; } - -.underline { text-decoration: underline; } - -.overline { text-decoration: overline; } - -.line-through { text-decoration: line-through; } - -.aqua { color: #00bfbf; } - -.aqua-background { background-color: #00fafa; } - -.black { color: black; } - -.black-background { background-color: black; } - -.blue { color: #0000bf; } - -.blue-background { background-color: #0000fa; } - -.fuchsia { color: #bf00bf; } - -.fuchsia-background { background-color: #fa00fa; } - -.gray { color: #606060; } - -.gray-background { background-color: #7d7d7d; } - -.green { color: #006000; } - -.green-background { background-color: #007d00; } - -.lime { color: #00bf00; } - -.lime-background { background-color: #00fa00; } - -.maroon { color: #600000; } - -.maroon-background { background-color: #7d0000; } - -.navy { color: #000060; } - -.navy-background { background-color: #00007d; } - -.olive { color: #606000; } - -.olive-background { background-color: #7d7d00; } - -.purple { color: #600060; } - -.purple-background { background-color: #7d007d; } - -.red { color: #bf0000; } - -.red-background { background-color: #fa0000; } - -.silver { color: #909090; } - -.silver-background { background-color: #bcbcbc; } - -.teal { color: #006060; } - -.teal-background { background-color: #007d7d; } - -.white { color: #bfbfbf; } - -.white-background { background-color: #fafafa; } - -.yellow { color: #bfbf00; } - -.yellow-background { background-color: #fafa00; } - -span.icon > .fa { cursor: default; } - -.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } -.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } -.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } -.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } -.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } -.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } - -.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } -.conum[data-value] * { color: #fff !important; } -.conum[data-value] + b { display: none; } -.conum[data-value]:after { content: attr(data-value); } -pre .conum[data-value] { position: relative; top: -0.125em; } - -b.conum * { color: inherit !important; } - -.conum:not([data-value]):empty { display: none; } - -.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/common/src/docs/asciidoclet/overview.adoc b/common/src/docs/asciidoclet/overview.adoc deleted file mode 100644 index 7947331..0000000 --- a/common/src/docs/asciidoclet/overview.adoc +++ /dev/null @@ -1,4 +0,0 @@ -= Elasticsearch Java client -Jörg Prante -Version 5.4.0.0 - diff --git a/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java b/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java deleted file mode 100644 index 6db0452..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/AbstractClient.java +++ /dev/null @@ -1,925 +0,0 @@ -package org.xbib.elasticsearch.client; - -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; -import org.elasticsearch.action.admin.indices.create.CreateIndexAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; -import org.elasticsearch.action.admin.indices.flush.FlushAction; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexAction; -import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; -import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; -import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshAction; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.sort.SortBuilder; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.search.sort.SortOrder; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.StringWriter; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public abstract class AbstractClient implements ClientMethods { - - private static final Logger logger = LogManager.getLogger(AbstractClient.class.getName()); - - private Settings.Builder settingsBuilder; - - private Settings settings; - - private Map mappings; - - private ElasticsearchClient client; - - protected BulkProcessor bulkProcessor; - - protected BulkMetric metric; - - protected BulkControl control; - - protected Throwable throwable; - - protected boolean closed; - - protected int maxActionsPerRequest = DEFAULT_MAX_ACTIONS_PER_REQUEST; - - protected int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS; - - protected String maxVolumePerRequest = DEFAULT_MAX_VOLUME_PER_REQUEST; - - protected String flushIngestInterval = DEFAULT_FLUSH_INTERVAL; - - @Override - public AbstractClient init(ElasticsearchClient client, Settings settings, - final BulkMetric metric, final BulkControl control) { - this.client = client; - this.mappings = new HashMap<>(); - if (settings == null) { - settings = findSettings(); - } - if (client == null && settings != null) { - try { - this.client = createClient(settings); - } catch (IOException e) { - logger.error(e.getMessage(), e); - } - } - this.metric = metric; - this.control = control; - if (metric != null) { - metric.start(); - } - resetSettings(); - BulkProcessor.Listener listener = new BulkProcessor.Listener() { - - private final Logger logger = LogManager.getLogger(getClass().getName() + ".Listener"); - - @Override - public void beforeBulk(long executionId, BulkRequest request) { - long l = -1; - if (metric != null) { - metric.getCurrentIngest().inc(); - l = metric.getCurrentIngest().getCount(); - int n = request.numberOfActions(); - metric.getSubmitted().inc(n); - metric.getCurrentIngestNumDocs().inc(n); - metric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes()); - } - logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]", - executionId, - request.numberOfActions(), - request.estimatedSizeInBytes(), - l); - } - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - long l = -1; - if (metric != null) { - metric.getCurrentIngest().dec(); - l = metric.getCurrentIngest().getCount(); - metric.getSucceeded().inc(response.getItems().length); - } - int n = 0; - for (BulkItemResponse itemResponse : response.getItems()) { - if (metric != null) { - metric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId()); - } - if (itemResponse.isFailed()) { - n++; - if (metric != null) { - metric.getSucceeded().dec(1); - metric.getFailed().inc(1); - } - } - } - if (metric != null) { - logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests", - executionId, - metric.getSucceeded().getCount(), - metric.getFailed().getCount(), - response.getTook().millis(), - l); - } - if (n > 0) { - logger.error("bulk [{}] failed with {} failed items, failure message = {}", - executionId, n, response.buildFailureMessage()); - } else { - if (metric != null) { - metric.getCurrentIngestNumDocs().dec(response.getItems().length); - } - } - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Throwable failure) { - if (metric != null) { - metric.getCurrentIngest().dec(); - } - throwable = failure; - closed = true; - logger.error("after bulk [" + executionId + "] error", failure); - } - }; - if (this.client != null) { - BulkProcessor.Builder builder = BulkProcessor.builder(this.client, listener) - .setBulkActions(maxActionsPerRequest) - .setConcurrentRequests(maxConcurrentRequests) - .setFlushInterval(TimeValue.parseTimeValue(flushIngestInterval, "flushIngestInterval")); - if (maxVolumePerRequest != null) { - builder.setBulkSize(ByteSizeValue.parseBytesSizeValue(maxVolumePerRequest, "maxVolumePerRequest")); - } - this.bulkProcessor = builder.build(); - } - this.closed = false; - return this; - } - - protected abstract ElasticsearchClient createClient(Settings settings) throws IOException; - - @Override - public ElasticsearchClient client() { - return client; - } - - @Override - public ClientMethods maxActionsPerRequest(int maxActionsPerRequest) { - this.maxActionsPerRequest = maxActionsPerRequest; - return this; - } - - @Override - public ClientMethods maxConcurrentRequests(int maxConcurrentRequests) { - this.maxConcurrentRequests = maxConcurrentRequests; - return this; - } - - @Override - public ClientMethods maxVolumePerRequest(String maxVolumePerRequest) { - this.maxVolumePerRequest = maxVolumePerRequest; - return this; - } - - @Override - public ClientMethods flushIngestInterval(String flushIngestInterval) { - this.flushIngestInterval = flushIngestInterval; - return this; - } - - @Override - public BulkMetric getMetric() { - return metric; - } - - public void resetSettings() { - this.settingsBuilder = Settings.builder(); - settings = null; - mappings = new HashMap<>(); - } - - public void setSettings(Settings settings) { - this.settings = settings; - } - - public void setting(String key, String value) { - if (settingsBuilder == null) { - settingsBuilder = Settings.builder(); - } - settingsBuilder.put(key, value); - } - - public void setting(String key, Boolean value) { - if (settingsBuilder == null) { - settingsBuilder = Settings.builder(); - } - settingsBuilder.put(key, value); - } - - public void setting(String key, Integer value) { - if (settingsBuilder == null) { - settingsBuilder = Settings.builder(); - } - settingsBuilder.put(key, value); - } - - public void setting(InputStream in) throws IOException { - settingsBuilder = Settings.builder().loadFromStream(".json", in, true); - } - - public Settings.Builder settingsBuilder() { - return settingsBuilder != null ? settingsBuilder : Settings.builder(); - } - - public Settings settings() { - if (settings != null) { - return settings; - } - if (settingsBuilder == null) { - settingsBuilder = Settings.builder(); - } - return settingsBuilder.build(); - } - - @Override - public void mapping(String type, String mapping) throws IOException { - mappings.put(type, mapping); - } - - @Override - public void mapping(String type, InputStream in) throws IOException { - if (type == null) { - return; - } - StringWriter sw = new StringWriter(); - Streams.copy(new InputStreamReader(in, StandardCharsets.UTF_8), sw); - mappings.put(type, sw.toString()); - } - - @Override - public ClientMethods index(String index, String type, String id, boolean create, BytesReference source) { - return indexRequest(new IndexRequest(index).type(type).id(id).create(create).source(source, XContentType.JSON)); - } - - @Override - public ClientMethods index(String index, String type, String id, boolean create, String source) { - return indexRequest(new IndexRequest(index).type(type).id(id).create(create).source(source, XContentType.JSON)); - } - - @Override - public ClientMethods indexRequest(IndexRequest indexRequest) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id()); - } - bulkProcessor.add(indexRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of index request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public ClientMethods delete(String index, String type, String id) { - return deleteRequest(new DeleteRequest(index).type(type).id(id)); - } - - @Override - public ClientMethods deleteRequest(DeleteRequest deleteRequest) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); - } - bulkProcessor.add(deleteRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of delete failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public ClientMethods update(String index, String type, String id, BytesReference source) { - return updateRequest(new UpdateRequest().index(index).type(type).id(id).upsert(source, XContentType.JSON)); - } - - @Override - public ClientMethods update(String index, String type, String id, String source) { - return updateRequest(new UpdateRequest().index(index).type(type).id(id).upsert(source, XContentType.JSON)); - } - - @Override - public ClientMethods updateRequest(UpdateRequest updateRequest) { - if (closed) { - throwClose(); - } - try { - if (metric != null) { - metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); - } - bulkProcessor.add(updateRequest); - } catch (Exception e) { - throwable = e; - closed = true; - logger.error("bulk add of update request failed: " + e.getMessage(), e); - } - return this; - } - - @Override - public ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) - throws IOException { - if (control == null) { - return this; - } - if (!control.isBulk(index) && startRefreshIntervalSeconds > 0L && stopRefreshIntervalSeconds > 0L) { - control.startBulk(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds); - updateIndexSetting(index, "refresh_interval", startRefreshIntervalSeconds + "s"); - } - return this; - } - - @Override - public ClientMethods stopBulk(String index) throws IOException { - if (control == null) { - return this; - } - if (control.isBulk(index)) { - long secs = control.getStopBulkRefreshIntervals().get(index); - if (secs > 0L) { - updateIndexSetting(index, "refresh_interval", secs + "s"); - } - control.finishBulk(index); - } - return this; - } - - @Override - public ClientMethods flushIngest() { - if (closed) { - throwClose(); - } - logger.debug("flushing bulk processor"); - bulkProcessor.flush(); - return this; - } - - @Override - public synchronized void shutdown() throws IOException { - if (closed) { - throwClose(); - } - if (bulkProcessor != null) { - logger.info("closing bulk processor..."); - bulkProcessor.close(); - } - if (metric != null) { - logger.info("stopping metric"); - metric.stop(); - } - if (control != null && control.indices() != null && !control.indices().isEmpty()) { - logger.info("stopping bulk mode for indices {}...", control.indices()); - for (String index : control.indices()) { - stopBulk(index); - } - } - } - - @Override - public ClientMethods newIndex(String index) { - if (closed) { - throwClose(); - } - return newIndex(index, null, null); - } - - @Override - public ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException { - resetSettings(); - setting(settings); - mapping(type, mappings); - return newIndex(index, settings(), this.mappings); - } - - @Override - public ClientMethods newIndex(String index, Settings settings, Map mappings) { - if (closed) { - throwClose(); - } - if (client() == null) { - logger.warn("no client for create index"); - return this; - } - if (index == null) { - logger.warn("no index name given to create index"); - return this; - } - CreateIndexRequestBuilder createIndexRequestBuilder = - new CreateIndexRequestBuilder(client(), CreateIndexAction.INSTANCE).setIndex(index); - if (settings != null) { - logger.info("found settings {}", settings.toString()); - createIndexRequestBuilder.setSettings(settings); - } - if (mappings != null) { - for (Map.Entry entry : mappings.entrySet()) { - String type = entry.getKey(); - String mapping = entry.getValue(); - logger.info("found mapping for {}", type); - createIndexRequestBuilder.addMapping(type, mapping, XContentType.JSON); - } - } - CreateIndexResponse createIndexResponse = createIndexRequestBuilder.execute().actionGet(); - logger.info("index {} created: {}", index, createIndexResponse); - return this; - } - - - @Override - public ClientMethods newMapping(String index, String type, Map mapping) { - PutMappingRequestBuilder putMappingRequestBuilder = - new PutMappingRequestBuilder(client(), PutMappingAction.INSTANCE) - .setIndices(index) - .setType(type) - .setSource(mapping); - putMappingRequestBuilder.execute().actionGet(); - logger.info("mapping created for index {} and type {}", index, type); - return this; - } - - @Override - public ClientMethods deleteIndex(String index) { - if (closed) { - throwClose(); - } - if (client == null) { - logger.warn("no client"); - return this; - } - if (index == null) { - logger.warn("no index name given to delete index"); - return this; - } - DeleteIndexRequestBuilder deleteIndexRequestBuilder = - new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, index); - deleteIndexRequestBuilder.execute().actionGet(); - return this; - } - - @Override - public ClientMethods waitForResponses(String maxWaitTime) throws InterruptedException, ExecutionException { - if (closed) { - throwClose(); - } - long millis = TimeValue.parseTimeValue(maxWaitTime, "millis").getMillis(); - while (!bulkProcessor.awaitClose(millis, TimeUnit.MILLISECONDS)) { - logger.warn("still waiting for responses"); - } - return this; - } - - public void waitForRecovery() throws IOException { - if (client() == null) { - return; - } - client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).actionGet(); - } - - @Override - public int waitForRecovery(String index) throws IOException { - if (client() == null) { - return -1; - } - if (index == null) { - throw new IOException("unable to waitfor recovery, index not set"); - } - RecoveryResponse response = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest(index)).actionGet(); - int shards = response.getTotalShards(); - client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest(index) - .waitForActiveShards(shards)).actionGet(); - return shards; - } - - @Override - public void waitForCluster(String statusString, String timeout) throws IOException { - if (client() == null) { - return; - } - ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString); - ClusterHealthResponse healthResponse = - client().execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest() - .waitForStatus(status).timeout(timeout)).actionGet(); - if (healthResponse != null && healthResponse.isTimedOut()) { - throw new IOException("cluster state is " + healthResponse.getStatus().name() - + " and not " + status.name() - + ", from here on, everything will fail!"); - } - } - - public String fetchClusterName() { - if (client() == null) { - return null; - } - try { - ClusterStateRequestBuilder clusterStateRequestBuilder = - new ClusterStateRequestBuilder(client(), ClusterStateAction.INSTANCE).all(); - ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); - String name = clusterStateResponse.getClusterName().value(); - int nodeCount = clusterStateResponse.getState().getNodes().getSize(); - return name + " (" + nodeCount + " nodes connected)"; - } catch (ElasticsearchTimeoutException e) { - logger.warn(e.getMessage(), e); - return "TIMEOUT"; - } catch (NoNodeAvailableException e) { - logger.warn(e.getMessage(), e); - return "DISCONNECTED"; - } catch (Exception e) { - logger.warn(e.getMessage(), e); - return "[" + e.getMessage() + "]"; - } - } - - public String healthColor() { - if (client() == null) { - return null; - } - try { - ClusterHealthResponse healthResponse = - client().execute(ClusterHealthAction.INSTANCE, - new ClusterHealthRequest().timeout(TimeValue.timeValueSeconds(30))).actionGet(); - ClusterHealthStatus status = healthResponse.getStatus(); - return status.name(); - } catch (ElasticsearchTimeoutException e) { - logger.warn(e.getMessage(), e); - return "TIMEOUT"; - } catch (NoNodeAvailableException e) { - logger.warn(e.getMessage(), e); - return "DISCONNECTED"; - } catch (Exception e) { - logger.warn(e.getMessage(), e); - return "[" + e.getMessage() + "]"; - } - } - - public int updateReplicaLevel(String index, int level) throws IOException { - waitForCluster("YELLOW","30s"); - updateIndexSetting(index, "number_of_replicas", level); - return waitForRecovery(index); - } - - public void flushIndex(String index) { - if (client() == null) { - return; - } - if (index != null) { - client().execute(FlushAction.INSTANCE, new FlushRequest(index)).actionGet(); - } - } - - public void refreshIndex(String index) { - if (client() == null) { - return; - } - if (index != null) { - client().execute(RefreshAction.INSTANCE, new RefreshRequest(index)).actionGet(); - } - } - - public void putMapping(String index) { - if (client() == null) { - return; - } - if (!mappings.isEmpty()) { - for (Map.Entry me : mappings.entrySet()) { - client().execute(PutMappingAction.INSTANCE, - new PutMappingRequest(index).type(me.getKey()).source(me.getValue(), XContentType.JSON)).actionGet(); - } - } - } - - public String resolveAlias(String alias) { - if (client() == null) { - return alias; - } - GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); - GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet(); - if (!getAliasesResponse.getAliases().isEmpty()) { - return getAliasesResponse.getAliases().keys().iterator().next().value; - } - return alias; - } - - public String resolveMostRecentIndex(String alias) { - if (client() == null) { - return alias; - } - if (alias == null) { - return null; - } - GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); - GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet(); - Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); - Set indices = new TreeSet<>(Collections.reverseOrder()); - for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) { - Matcher m = pattern.matcher(indexName.value); - if (m.matches() && alias.equals(m.group(1))) { - indices.add(indexName.value); - } - } - return indices.isEmpty() ? alias : indices.iterator().next(); - } - - public Map getAliasFilters(String alias) { - GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); - return getFilters(getAliasesRequestBuilder.setIndices(resolveAlias(alias)).execute().actionGet()); - } - - public Map getIndexFilters(String index) { - GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), GetAliasesAction.INSTANCE); - return getFilters(getAliasesRequestBuilder.setIndices(index).execute().actionGet()); - } - - - @Override - public void switchAliases(String index, String concreteIndex, List extraAliases) { - switchAliases(index, concreteIndex, extraAliases, null); - } - - @Override - public void switchAliases(String index, String concreteIndex, - List extraAliases, IndexAliasAdder adder) { - if (client() == null) { - return; - } - if (index.equals(concreteIndex)) { - return; - } - // two situations: 1. there is a new alias 2. there is already an old index with the alias - String oldIndex = resolveAlias(index); - final Map oldFilterMap = oldIndex.equals(index) ? null : getIndexFilters(oldIndex); - final List newAliases = new LinkedList<>(); - final List switchAliases = new LinkedList<>(); - IndicesAliasesRequestBuilder requestBuilder = new IndicesAliasesRequestBuilder(client(), IndicesAliasesAction.INSTANCE); - if (oldFilterMap == null || !oldFilterMap.containsKey(index)) { - // never apply a filter for trunk index name - requestBuilder.addAlias(concreteIndex, index); - newAliases.add(index); - } - // switch existing aliases - if (oldFilterMap != null) { - for (Map.Entry entry : oldFilterMap.entrySet()) { - String alias = entry.getKey(); - String filter = entry.getValue(); - requestBuilder.removeAlias(oldIndex, alias); - if (filter != null) { - requestBuilder.addAlias(concreteIndex, alias, filter); - } else { - requestBuilder.addAlias(concreteIndex, alias); - } - switchAliases.add(alias); - } - } - // a list of aliases that should be added, check if new or old - if (extraAliases != null) { - for (String extraAlias : extraAliases) { - if (oldFilterMap == null || !oldFilterMap.containsKey(extraAlias)) { - // index alias adder only active on extra aliases, and if alias is new - if (adder != null) { - adder.addIndexAlias(requestBuilder, concreteIndex, extraAlias); - } else { - requestBuilder.addAlias(concreteIndex, extraAlias); - } - newAliases.add(extraAlias); - } else { - String filter = oldFilterMap.get(extraAlias); - requestBuilder.removeAlias(oldIndex, extraAlias); - if (filter != null) { - requestBuilder.addAlias(concreteIndex, extraAlias, filter); - } else { - requestBuilder.addAlias(concreteIndex, extraAlias); - } - switchAliases.add(extraAlias); - } - } - } - if (!newAliases.isEmpty() || !switchAliases.isEmpty()) { - logger.info("new aliases = {}, switch aliases = {}", newAliases, switchAliases); - requestBuilder.execute().actionGet(); - } - } - - @Override - public void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep) { - if (client() == null) { - return; - } - if (index.equals(concreteIndex)) { - return; - } - GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client(), GetIndexAction.INSTANCE); - GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet(); - Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); - Set indices = new TreeSet<>(); - logger.info("{} indices", getIndexResponse.getIndices().length); - for (String s : getIndexResponse.getIndices()) { - Matcher m = pattern.matcher(s); - if (m.matches() && index.equals(m.group(1)) && !s.equals(concreteIndex)) { - indices.add(s); - } - } - if (indices.isEmpty()) { - logger.info("no indices found, retention policy skipped"); - return; - } - if (mintokeep > 0 && indices.size() <= mintokeep) { - logger.info("{} indices found, not enough for retention policy ({}), skipped", - indices.size(), mintokeep); - return; - } else { - logger.info("candidates for deletion = {}", indices); - } - List indicesToDelete = new ArrayList<>(); - // our index - Matcher m1 = pattern.matcher(concreteIndex); - if (m1.matches()) { - Integer i1 = Integer.parseInt(m1.group(2)); - for (String s : indices) { - Matcher m2 = pattern.matcher(s); - if (m2.matches()) { - Integer i2 = Integer.parseInt(m2.group(2)); - int kept = indices.size() - indicesToDelete.size(); - if ((timestampdiff == 0 || (timestampdiff > 0 && i1 - i2 > timestampdiff)) && mintokeep <= kept) { - indicesToDelete.add(s); - } - } - } - } - logger.info("indices to delete = {}", indicesToDelete); - if (indicesToDelete.isEmpty()) { - logger.info("not enough indices found to delete, retention policy complete"); - return; - } - String[] s = indicesToDelete.toArray(new String[indicesToDelete.size()]); - DeleteIndexRequestBuilder requestBuilder = new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, s); - DeleteIndexResponse response = requestBuilder.execute().actionGet(); - if (!response.isAcknowledged()) { - logger.warn("retention delete index operation was not acknowledged"); - } - } - - @Override - public Long mostRecentDocument(String index, String timestampfieldname) { - if (client() == null) { - return null; - } - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client(), SearchAction.INSTANCE); - SortBuilder sort = SortBuilders.fieldSort(timestampfieldname).order(SortOrder.DESC); - SearchResponse searchResponse = searchRequestBuilder.setIndices(index) - .addStoredField(timestampfieldname) - .setSize(1) - .addSort(sort) - .execute().actionGet(); - if (searchResponse.getHits().getHits().length == 1) { - SearchHit hit = searchResponse.getHits().getHits()[0]; - if (hit.getFields().get(timestampfieldname) != null) { - return hit.getFields().get(timestampfieldname).getValue(); - } else { - return 0L; - } - } - return null; - } - - @Override - public boolean hasThrowable() { - return throwable != null; - } - - @Override - public Throwable getThrowable() { - return throwable; - } - - protected static void throwClose() { - throw new ElasticsearchException("client is closed"); - } - - - protected void updateIndexSetting(String index, String key, Object value) throws IOException { - if (client() == null) { - return; - } - if (index == null) { - throw new IOException("no index name given"); - } - if (key == null) { - throw new IOException("no key given"); - } - if (value == null) { - throw new IOException("no value given"); - } - Settings.Builder updateSettingsBuilder = Settings.builder(); - updateSettingsBuilder.put(key, value.toString()); - UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index) - .settings(updateSettingsBuilder); - client().execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet(); - } - - private Map getFilters(GetAliasesResponse getAliasesResponse) { - Map result = new HashMap<>(); - for (ObjectObjectCursor> object : getAliasesResponse.getAliases()) { - List aliasMetaDataList = object.value; - for (AliasMetaData aliasMetaData : aliasMetaDataList) { - if (aliasMetaData.filteringRequired()) { - String metaData = new String(aliasMetaData.getFilter().uncompressed(), StandardCharsets.UTF_8); - result.put(aliasMetaData.alias(), metaData); - } else { - result.put(aliasMetaData.alias(), null); - } - } - } - return result; - } - - private Settings findSettings() { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put("host", "localhost"); - try { - String hostname = NetworkUtils.getLocalAddress().getHostName(); - logger.debug("the hostname is {}", hostname); - settingsBuilder.put("host", hostname) - .put("port", 9300); - } catch (Exception e) { - logger.warn(e.getMessage(), e); - } - return settingsBuilder.build(); - } -} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java b/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java deleted file mode 100644 index fc9c1fd..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/BulkControl.java +++ /dev/null @@ -1,19 +0,0 @@ -package org.xbib.elasticsearch.client; - -import java.util.Map; -import java.util.Set; - -public interface BulkControl { - - void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval); - - boolean isBulk(String indexName); - - void finishBulk(String indexName); - - Set indices(); - - Map getStartBulkRefreshIntervals(); - - Map getStopBulkRefreshIntervals(); -} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java b/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java deleted file mode 100644 index 2865266..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/ClientBuilder.java +++ /dev/null @@ -1,100 +0,0 @@ -package org.xbib.elasticsearch.client; - -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; - -import java.util.HashMap; -import java.util.Map; -import java.util.ServiceLoader; - -public final class ClientBuilder implements Parameters { - - private final Settings.Builder settingsBuilder; - - private Map, ClientMethods> clientMethodsMap; - - private BulkMetric metric; - - private BulkControl control; - - public ClientBuilder() { - this(Thread.currentThread().getContextClassLoader()); - } - - public ClientBuilder(ClassLoader classLoader) { - this.settingsBuilder = Settings.builder(); - //settingsBuilder.put("node.name", "clientnode"); - this.clientMethodsMap = new HashMap<>(); - ServiceLoader serviceLoader = ServiceLoader.load(ClientMethods.class, - classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader()); - for (ClientMethods clientMethods : serviceLoader) { - clientMethodsMap.put(clientMethods.getClass(), clientMethods); - } - } - - public static ClientBuilder builder() { - return new ClientBuilder(); - } - - public ClientBuilder put(String key, String value) { - settingsBuilder.put(key, value); - return this; - } - - public ClientBuilder put(String key, Integer value) { - settingsBuilder.put(key, value); - return this; - } - - public ClientBuilder put(String key, Long value) { - settingsBuilder.put(key, value); - return this; - } - - public ClientBuilder put(String key, Double value) { - settingsBuilder.put(key, value); - return this; - } - - public ClientBuilder put(String key, ByteSizeValue value) { - settingsBuilder.put(key, value); - return this; - } - - public ClientBuilder put(String key, TimeValue value) { - settingsBuilder.put(key, value); - return this; - } - - public ClientBuilder put(Settings settings) { - settingsBuilder.put(settings); - return this; - } - - public ClientBuilder setMetric(BulkMetric metric) { - this.metric = metric; - return this; - } - - public ClientBuilder setControl(BulkControl control) { - this.control = control; - return this; - } - - public C getClient(Class clientClass) { - return getClient(null, clientClass); - } - - @SuppressWarnings("unchecked") - public C getClient(Client client, Class clientClass) { - Settings settings = settingsBuilder.build(); - return (C) clientMethodsMap.get(clientClass) - .maxActionsPerRequest(settings.getAsInt(MAX_ACTIONS_PER_REQUEST, DEFAULT_MAX_ACTIONS_PER_REQUEST)) - .maxConcurrentRequests(settings.getAsInt(MAX_CONCURRENT_REQUESTS, DEFAULT_MAX_CONCURRENT_REQUESTS)) - .maxVolumePerRequest(settings.get(MAX_VOLUME_PER_REQUEST, DEFAULT_MAX_VOLUME_PER_REQUEST)) - .flushIngestInterval(settings.get(FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL)) - .init(client, settings, metric, control); - } -} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java b/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java deleted file mode 100644 index 4057994..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/ClientMethods.java +++ /dev/null @@ -1,402 +0,0 @@ -package org.xbib.elasticsearch.client; - -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; - -import java.io.IOException; -import java.io.InputStream; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; - -/** - * Interface for providing convenient administrative methods for ingesting data into Elasticsearch. - */ -public interface ClientMethods extends Parameters { - - ClientMethods init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control); - - /** - * Return Elasticsearch client. - * - * @return Elasticsearch client - */ - ElasticsearchClient client(); - - /** - * Bulked index request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * - * @param index the index - * @param type the type - * @param id the id - * @param create true if document must be created - * @param source the source - * @return this - */ - ClientMethods index(String index, String type, String id, boolean create, BytesReference source); - - /** - * Bulked index request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * - * @param index the index - * @param type the type - * @param id the id - * @param create true if document must be created - * @param source the source - * @return this - */ - ClientMethods index(String index, String type, String id, boolean create, String source); - - /** - * Bulked index request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * - * @param indexRequest the index request to add - * @return this ingest - */ - ClientMethods indexRequest(IndexRequest indexRequest); - - /** - * Delete document. - * - * @param index the index - * @param type the type - * @param id the id - * @return this ingest - */ - ClientMethods delete(String index, String type, String id); - - /** - * Bulked delete request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * - * @param deleteRequest the delete request to add - * @return this ingest - */ - ClientMethods deleteRequest(DeleteRequest deleteRequest); - - /** - * Bulked update request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * Note that updates only work correctly when all operations between nodes are synchronized. - * - * @param index the index - * @param type the type - * @param id the id - * @param source the source - * @return this - */ - ClientMethods update(String index, String type, String id, BytesReference source); - - /** - * Bulked update request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * Note that updates only work correctly when all operations between nodes are synchronized. - * - * @param index the index - * @param type the type - * @param id the id - * @param source the source - * @return this - */ - ClientMethods update(String index, String type, String id, String source); - - /** - * Bulked update request. Each request will be added to a queue for bulking requests. - * Submitting request will be done when bulk limits are exceeded. - * Note that updates only work correctly when all operations between nodes are synchronized. - * - * @param updateRequest the update request to add - * @return this ingest - */ - ClientMethods updateRequest(UpdateRequest updateRequest); - - /** - * Set the maximum number of actions per request. - * - * @param maxActionsPerRequest maximum number of actions per request - * @return this ingest - */ - ClientMethods maxActionsPerRequest(int maxActionsPerRequest); - - /** - * Set the maximum concurent requests. - * - * @param maxConcurentRequests maximum number of concurrent ingest requests - * @return this Ingest - */ - ClientMethods maxConcurrentRequests(int maxConcurentRequests); - - /** - * Set the maximum volume for request before flush. - * - * @param maxVolume maximum volume - * @return this ingest - */ - ClientMethods maxVolumePerRequest(String maxVolume); - - /** - * Set the flush interval for automatic flushing outstanding ingest requests. - * - * @param flushInterval the flush interval, default is 30 seconds - * @return this ingest - */ - ClientMethods flushIngestInterval(String flushInterval); - - /** - * Set mapping. - * - * @param type mapping type - * @param in mapping definition as input stream - * @throws IOException if mapping could not be added - */ - void mapping(String type, InputStream in) throws IOException; - - /** - * Set mapping. - * - * @param type mapping type - * @param mapping mapping definition as input stream - * @throws IOException if mapping could not be added - */ - void mapping(String type, String mapping) throws IOException; - - /** - * Put mapping. - * - * @param index index - */ - void putMapping(String index); - - /** - * Create a new index. - * - * @param index index - * @return this ingest - */ - ClientMethods newIndex(String index); - - /** - * Create a new index. - * - * @param index index - * @param type type - * @param settings settings - * @param mappings mappings - * @return this ingest - * @throws IOException if new index creation fails - */ - ClientMethods newIndex(String index, String type, InputStream settings, InputStream mappings) throws IOException; - - /** - * Create a new index. - * - * @param index index - * @param settings settings - * @param mappings mappings - * @return this ingest - */ - ClientMethods newIndex(String index, Settings settings, Map mappings); - - /** - * Create new mapping. - * - * @param index index - * @param type index type - * @param mapping mapping - * @return this ingest - */ - ClientMethods newMapping(String index, String type, Map mapping); - - /** - * Delete index. - * - * @param index index - * @return this ingest - */ - ClientMethods deleteIndex(String index); - - /** - * Start bulk mode. - * - * @param index index - * @param startRefreshIntervalSeconds refresh interval before bulk - * @param stopRefreshIntervalSeconds refresh interval after bulk - * @return this ingest - * @throws IOException if bulk could not be started - */ - ClientMethods startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) throws IOException; - - /** - * Stops bulk mode. - * - * @param index index - * @return this Ingest - * @throws IOException if bulk could not be stopped - */ - ClientMethods stopBulk(String index) throws IOException; - - /** - * Flush ingest, move all pending documents to the cluster. - * - * @return this - */ - ClientMethods flushIngest(); - - /** - * Wait for all outstanding responses. - * - * @param maxWaitTime maximum wait time - * @return this ingest - * @throws InterruptedException if wait is interrupted - * @throws ExecutionException if execution failed - */ - ClientMethods waitForResponses(String maxWaitTime) throws InterruptedException, ExecutionException; - - /** - * Refresh the index. - * - * @param index index - */ - void refreshIndex(String index); - - /** - * Flush the index. - * - * @param index index - */ - void flushIndex(String index); - - /** - * Update replica level. - * - * @param index index - * @param level the replica level - * @return number of shards after updating replica level - * @throws IOException if replica could not be updated - */ - int updateReplicaLevel(String index, int level) throws IOException; - - /** - * Wait for cluster being healthy. - * - * @param healthColor cluster health color to wait for - * @param timeValue time value - * @throws IOException if wait failed - */ - void waitForCluster(String healthColor, String timeValue) throws IOException; - - /** - * Get current health color. - * - * @return the cluster health color - */ - String healthColor(); - - /** - * Wait for index recovery (after replica change). - * - * @param index index - * @return number of shards found - * @throws IOException if wait failed - */ - int waitForRecovery(String index) throws IOException; - - /** - * Resolve alias. - * - * @param alias the alias - * @return one index name behind the alias or the alias if there is no index - */ - String resolveAlias(String alias); - - /** - * Resolve alias to all connected indices, sort index names with most recent timestamp on top, return this index - * name. - * - * @param alias the alias - * @return the most recent index name pointing to the alias - */ - String resolveMostRecentIndex(String alias); - - /** - * Get all alias filters. - * - * @param index index - * @return map of alias filters - */ - Map getAliasFilters(String index); - - /** - * Switch aliases from one index to another. - * - * @param index the index name - * @param concreteIndex the index name with timestamp - * @param extraAliases a list of names that should be set as index aliases - */ - void switchAliases(String index, String concreteIndex, List extraAliases); - - /** - * Switch aliases from one index to another. - * - * @param index the index name - * @param concreteIndex the index name with timestamp - * @param extraAliases a list of names that should be set as index aliases - * @param adder an adder method to create alias term queries - */ - void switchAliases(String index, String concreteIndex, List extraAliases, IndexAliasAdder adder); - - /** - * Retention policy for an index. All indices before timestampdiff should be deleted, - * but mintokeep indices must be kept. - * - * @param index index name - * @param concreteIndex index name with timestamp - * @param timestampdiff timestamp delta (for index timestamps) - * @param mintokeep minimum number of indices to keep - */ - void performRetentionPolicy(String index, String concreteIndex, int timestampdiff, int mintokeep); - - /** - * Find the timestamp of the most recently indexed document in the index. - * - * @param index the index name - * @param timestampfieldname the timestamp field name - * @return millis UTC millis of the most recent document - * @throws IOException if most rcent document can not be found - */ - Long mostRecentDocument(String index, String timestampfieldname) throws IOException; - - /** - * Get metric. - * - * @return metric - */ - BulkMetric getMetric(); - - /** - * Returns true is a throwable exists. - * - * @return true if a Throwable exists - */ - boolean hasThrowable(); - - /** - * Return last throwable if exists. - * - * @return last throwable - */ - Throwable getThrowable(); - - /** - * Shutdown the ingesting. - * @throws IOException is shutdown fails - */ - void shutdown() throws IOException; -} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java b/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java deleted file mode 100644 index 4c6fbb8..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/IndexAliasAdder.java +++ /dev/null @@ -1,9 +0,0 @@ -package org.xbib.elasticsearch.client; - -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; - -@FunctionalInterface -public interface IndexAliasAdder { - - void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias); -} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java b/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java deleted file mode 100644 index 2146977..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/Parameters.java +++ /dev/null @@ -1,20 +0,0 @@ -package org.xbib.elasticsearch.client; - -public interface Parameters { - - int DEFAULT_MAX_ACTIONS_PER_REQUEST = 1000; - - int DEFAULT_MAX_CONCURRENT_REQUESTS = Runtime.getRuntime().availableProcessors(); - - String DEFAULT_MAX_VOLUME_PER_REQUEST = "10mb"; - - String DEFAULT_FLUSH_INTERVAL = "30s"; - - String MAX_ACTIONS_PER_REQUEST = "max_actions_per_request"; - - String MAX_CONCURRENT_REQUESTS = "max_concurrent_requests"; - - String MAX_VOLUME_PER_REQUEST = "max_volume_per_request"; - - String FLUSH_INTERVAL = "flush_interval"; -} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java b/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java deleted file mode 100644 index c12ecc1..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkControl.java +++ /dev/null @@ -1,52 +0,0 @@ -package org.xbib.elasticsearch.client; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -public class SimpleBulkControl implements BulkControl { - - private final Set indexNames = new HashSet<>(); - - private final Map startBulkRefreshIntervals = new HashMap<>(); - - private final Map stopBulkRefreshIntervals = new HashMap<>(); - - @Override - public void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval) { - synchronized (indexNames) { - indexNames.add(indexName); - startBulkRefreshIntervals.put(indexName, startRefreshInterval); - stopBulkRefreshIntervals.put(indexName, stopRefreshInterval); - } - } - - @Override - public boolean isBulk(String indexName) { - return indexNames.contains(indexName); - } - - @Override - public void finishBulk(String indexName) { - synchronized (indexNames) { - indexNames.remove(indexName); - } - } - - @Override - public Set indices() { - return indexNames; - } - - @Override - public Map getStartBulkRefreshIntervals() { - return startBulkRefreshIntervals; - } - - @Override - public Map getStopBulkRefreshIntervals() { - return stopBulkRefreshIntervals; - } - -} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/package-info.java b/common/src/main/java/org/xbib/elasticsearch/client/package-info.java deleted file mode 100644 index 941a500..0000000 --- a/common/src/main/java/org/xbib/elasticsearch/client/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for Elasticsearch client. - */ -package org.xbib.elasticsearch.client; diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java b/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java deleted file mode 100644 index bd1c16d..0000000 --- a/common/src/test/java/org/xbib/elasticsearch/client/common/SearchTests.java +++ /dev/null @@ -1,60 +0,0 @@ -package org.xbib.elasticsearch.client.common; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.bulk.BulkAction; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.testframework.ESSingleNodeTestCase; - -public class SearchTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(SearchTests.class.getName()); - - public void testSearch() throws Exception { - long t0 = System.currentTimeMillis(); - BulkRequestBuilder builder = new BulkRequestBuilder(client(), BulkAction.INSTANCE); - for (int i = 0; i < 1000; i++) { - builder.add(Requests.indexRequest() - .index("pages").type("row") - .source(XContentFactory.jsonBuilder() - .startObject() - .field("user1", "kimchy") - .field("user2", "kimchy") - .field("user3", "kimchy") - .field("user4", "kimchy") - .field("user5", "kimchy") - .field("user6", "kimchy") - .field("user7", "kimchy") - .field("user8", "kimchy") - .field("user9", "kimchy") - .field("rowcount", i) - .field("rs", 1234) - .endObject())); - } - client().bulk(builder.request()).actionGet(); - client().admin().indices().refresh(Requests.refreshRequest()).actionGet(); - long t1 = System.currentTimeMillis(); - logger.info("t1-t0 = {}", t1 - t0); - for (int i = 0; i < 100; i++) { - t1 = System.currentTimeMillis(); - QueryBuilder queryStringBuilder = - QueryBuilders.queryStringQuery("rs:" + 1234); - SearchRequestBuilder requestBuilder = client().prepareSearch() - .setIndices("pages") - .setTypes("row") - .setQuery(queryStringBuilder) - .addSort("rowcount", SortOrder.DESC) - .setFrom(i * 10).setSize(10); - SearchResponse response = requestBuilder.execute().actionGet(); - long t2 = System.currentTimeMillis(); - logger.info("t2-t1 = {}", t2 - t1); - } - } -} diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java b/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java deleted file mode 100644 index aeade4b..0000000 --- a/common/src/test/java/org/xbib/elasticsearch/client/common/WildcardTests.java +++ /dev/null @@ -1,51 +0,0 @@ -package org.xbib.elasticsearch.client.common; - -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.testframework.ESSingleNodeTestCase; - -import java.io.IOException; - -public class WildcardTests extends ESSingleNodeTestCase { - - public void testWildcard() throws Exception { - index("1", "010"); - index("2", "0*0"); - // exact - validateCount(QueryBuilders.queryStringQuery("010").defaultField("field"), 1); - validateCount(QueryBuilders.queryStringQuery("0\\*0").defaultField("field"), 1); - // pattern - validateCount(QueryBuilders.queryStringQuery("0*0").defaultField("field"), 1); // 2? - validateCount(QueryBuilders.queryStringQuery("0?0").defaultField("field"), 1); // 2? - validateCount(QueryBuilders.queryStringQuery("0**0").defaultField("field"), 1); // 2? - validateCount(QueryBuilders.queryStringQuery("0??0").defaultField("field"), 0); - validateCount(QueryBuilders.queryStringQuery("*10").defaultField("field"), 1); - validateCount(QueryBuilders.queryStringQuery("*1*").defaultField("field"), 1); - validateCount(QueryBuilders.queryStringQuery("*\\*0").defaultField("field"), 0); // 1? - validateCount(QueryBuilders.queryStringQuery("*\\**").defaultField("field"), 0); // 1? - } - - private void index(String id, String fieldValue) throws IOException { - client().index(Requests.indexRequest() - .index("index").type("type").id(id) - .source(XContentFactory.jsonBuilder().startObject().field("field", fieldValue).endObject()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)) - .actionGet(); - } - - private void validateCount(QueryBuilder queryBuilder, long expectedHits) { - final long actualHits = count(queryBuilder); - if (actualHits != expectedHits) { - throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits); - } - } - - private long count(QueryBuilder queryBuilder) { - return client().prepareSearch("index").setTypes("type") - .setQuery(queryBuilder) - .execute().actionGet().getHits().getTotalHits(); - } -} diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java b/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java deleted file mode 100644 index af3209f..0000000 --- a/common/src/test/java/org/xbib/elasticsearch/client/common/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes to test Elasticsearch clients. - */ -package org.xbib.elasticsearch.client.common; diff --git a/elx-api/build.gradle b/elx-api/build.gradle new file mode 100644 index 0000000..6ef61b9 --- /dev/null +++ b/elx-api/build.gradle @@ -0,0 +1,4 @@ +dependencies { + compile "org.xbib:metrics:${project.property('xbib-metrics.version')}" + compile "org.xbib.elasticsearch:elasticsearch:${rootProject.property('elasticsearch-server.version')}" +} \ No newline at end of file diff --git a/elx-api/src/main/java/org/xbib/elx/api/BulkController.java b/elx-api/src/main/java/org/xbib/elx/api/BulkController.java new file mode 100644 index 0000000..69906ca --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/BulkController.java @@ -0,0 +1,36 @@ +package org.xbib.elx.api; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.settings.Settings; + +import java.io.Closeable; +import java.io.Flushable; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +public interface BulkController extends Closeable, Flushable { + + void init(Settings settings); + + Throwable getLastBulkError(); + + void startBulkMode(IndexDefinition indexDefinition) throws IOException; + + void startBulkMode(String indexName, long startRefreshIntervalInSeconds, + long stopRefreshIntervalInSeconds) throws IOException; + + void index(IndexRequest indexRequest); + + void delete(DeleteRequest deleteRequest); + + void update(UpdateRequest updateRequest); + + boolean waitForResponses(long timeout, TimeUnit timeUnit); + + void stopBulkMode(IndexDefinition indexDefinition) throws IOException; + + void stopBulkMode(String index, long timeout, TimeUnit timeUnit) throws IOException; + +} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java b/elx-api/src/main/java/org/xbib/elx/api/BulkMetric.java similarity index 64% rename from common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java rename to elx-api/src/main/java/org/xbib/elx/api/BulkMetric.java index 8ed03bb..3a406fb 100644 --- a/common/src/main/java/org/xbib/elasticsearch/client/BulkMetric.java +++ b/elx-api/src/main/java/org/xbib/elx/api/BulkMetric.java @@ -1,9 +1,14 @@ -package org.xbib.elasticsearch.client; +package org.xbib.elx.api; +import org.elasticsearch.common.settings.Settings; import org.xbib.metrics.Count; import org.xbib.metrics.Metered; -public interface BulkMetric { +import java.io.Closeable; + +public interface BulkMetric extends Closeable { + + void init(Settings settings); Metered getTotalIngest(); @@ -19,9 +24,9 @@ public interface BulkMetric { Count getFailed(); + long elapsed(); + void start(); void stop(); - - long elapsed(); } diff --git a/elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java b/elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java new file mode 100644 index 0000000..4d38d1c --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java @@ -0,0 +1,64 @@ +package org.xbib.elx.api; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; + +import java.io.Closeable; +import java.io.Flushable; +import java.util.concurrent.TimeUnit; + +public interface BulkProcessor extends Closeable, Flushable { + + BulkProcessor add(ActionRequest request); + + BulkProcessor add(ActionRequest request, Object payload); + + boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException; + + boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException; + + interface BulkRequestHandler { + + void execute(BulkRequest bulkRequest, long executionId); + + boolean close(long timeout, TimeUnit unit) throws InterruptedException; + + } + + /** + * A listener for the execution. + */ + public interface Listener { + + /** + * Callback before the bulk is executed. + * + * @param executionId execution ID + * @param request request + */ + void beforeBulk(long executionId, BulkRequest request); + + /** + * Callback after a successful execution of bulk request. + * + * @param executionId execution ID + * @param request request + * @param response response + */ + void afterBulk(long executionId, BulkRequest request, BulkResponse response); + + /** + * Callback after a failed execution of bulk request. + * + * Note that in case an instance of InterruptedException is passed, which means that request + * processing has been + * cancelled externally, the thread's interruption status has been restored prior to calling this method. + * + * @param executionId execution ID + * @param request request + * @param failure failure + */ + void afterBulk(long executionId, BulkRequest request, Throwable failure); + } +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/ExtendedClient.java b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClient.java new file mode 100644 index 0000000..e08f90a --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClient.java @@ -0,0 +1,480 @@ +package org.xbib.elx.api; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; + +import java.io.Closeable; +import java.io.Flushable; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Interface for extended managing and indexing methods of an Elasticsearch client. + */ +public interface ExtendedClient extends Flushable, Closeable { + + /** + * Set an Elasticsearch client to extend from it. May be null for TransportClient. + * @param client client + * @return this client + */ + ExtendedClient setClient(ElasticsearchClient client); + + /** + * Return Elasticsearch client. + * + * @return Elasticsearch client + */ + ElasticsearchClient getClient(); + + /** + * Get bulk metric. + * @return the bulk metric + */ + BulkMetric getBulkMetric(); + + /** + * Get buulk control. + * @return the bulk control + */ + BulkController getBulkController(); + + /** + * Initiative the extended client, the bulk metric and bulk controller, + * creates instances and connect to cluster, if required. + * + * @param settings settings + * @return this client + * @throws IOException if init fails + */ + ExtendedClient init(Settings settings) throws IOException; + + /** + * Build index definition from settings. + * + * @param index the index name + * @param settings the settings for the index + * @return index definition + * @throws IOException if settings/mapping URL is invalid/malformed + */ + IndexDefinition buildIndexDefinitionFromSettings(String index, Settings settings) throws IOException; + + /** + * Add index request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when limits are exceeded. + * + * @param index the index + * @param id the id + * @param create true if document must be created + * @param source the source + * @return this + */ + ExtendedClient index(String index, String id, boolean create, BytesReference source); + + /** + * Index request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when limits are exceeded. + * + * @param index the index + * @param id the id + * @param create true if document is to be created, false otherwise + * @param source the source + * @return this client methods + */ + ExtendedClient index(String index, String id, boolean create, String source); + + /** + * Index request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * + * @param indexRequest the index request to add + * @return this + */ + ExtendedClient index(IndexRequest indexRequest); + + /** + * Delete request. + * + * @param index the index + * @param id the id + * @return this + */ + ExtendedClient delete(String index, String id); + + /** + * Delete request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * + * @param deleteRequest the delete request to add + * @return this + */ + ExtendedClient delete(DeleteRequest deleteRequest); + + /** + * Bulked update request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * Note that updates only work correctly when all operations between nodes are synchronized. + * + * @param index the index + * @param id the id + * @param source the source + * @return this + */ + ExtendedClient update(String index, String id, BytesReference source); + + /** + * Update document. Use with precaution! Does not work in all cases. + * + * @param index the index + * @param id the id + * @param source the source + * @return this + */ + ExtendedClient update(String index, String id, String source); + + /** + * Bulked update request. Each request will be added to a queue for bulking requests. + * Submitting request will be done when bulk limits are exceeded. + * Note that updates only work correctly when all operations between nodes are synchronized. + * + * @param updateRequest the update request to add + * @return this + */ + ExtendedClient update(UpdateRequest updateRequest); + + /** + * Create a new index. + * + * @param index index + * @return this + * @throws IOException if new index creation fails + */ + ExtendedClient newIndex(String index) throws IOException; + + /** + * Create a new index. + * + * @param index index + * @param settings settings + * @param mapping mapping + * @return this + * @throws IOException if settings/mapping is invalid or index creation fails + */ + ExtendedClient newIndex(String index, InputStream settings, InputStream mapping) throws IOException; + + /** + * Create a new index. + * + * @param index index + * @param settings settings + * @return this + * @throws IOException if settings is invalid or index creation fails + */ + ExtendedClient newIndex(String index, Settings settings) throws IOException; + + /** + * Create a new index. + * + * @param index index + * @param settings settings + * @param mapping mapping + * @return this + * @throws IOException if settings/mapping is invalid or index creation fails + */ + ExtendedClient newIndex(String index, Settings settings, String mapping) throws IOException; + + /** + * Create a new index. + * + * @param index index + * @param settings settings + * @param mapping mapping + * @return this + * @throws IOException if settings/mapping is invalid or index creation fails + */ + ExtendedClient newIndex(String index, Settings settings, Map mapping) throws IOException; + + /** + * Create a new index. + * @param indexDefinition the index definition + * @return this + * @throws IOException if settings/mapping is invalid or index creation fails + */ + ExtendedClient newIndex(IndexDefinition indexDefinition) throws IOException; + + /** + * Delete an index. + * @param indexDefinition the index definition + * @return this + */ + ExtendedClient deleteIndex(IndexDefinition indexDefinition); + + /** + * Delete an index. + * + * @param index index + * @return this + */ + ExtendedClient deleteIndex(String index); + + /** + * Start bulk mode for indexes. + * @param indexDefinition index definition + * @return this + * @throws IOException if bulk could not be started + */ + ExtendedClient startBulk(IndexDefinition indexDefinition) throws IOException; + + /** + * Start bulk mode. + * + * @param index index + * @param startRefreshIntervalSeconds refresh interval before bulk + * @param stopRefreshIntervalSeconds refresh interval after bulk + * @return this + * @throws IOException if bulk could not be started + */ + ExtendedClient startBulk(String index, long startRefreshIntervalSeconds, + long stopRefreshIntervalSeconds) throws IOException; + + /** + * Stop bulk mode. + * + * @param indexDefinition index definition + * @return this + * @throws IOException if bulk could not be startet + */ + ExtendedClient stopBulk(IndexDefinition indexDefinition) throws IOException; + + /** + * Stops bulk mode. + * + * @param index index + * @param timeout maximum wait time + * @param timeUnit time unit for timeout + * @return this + * @throws IOException if bulk could not be stopped + */ + ExtendedClient stopBulk(String index, long timeout, TimeUnit timeUnit) throws IOException; + + /** + * Update replica level. + * @param indexDefinition the index definition + * @param level the replica level + * @return this + * @throws IOException if replica setting could not be updated + */ + ExtendedClient updateReplicaLevel(IndexDefinition indexDefinition, int level) throws IOException; + + /** + * Update replica level. + * + * @param index index + * @param level the replica level + * @param maxWaitTime maximum wait time + * @param timeUnit time unit + * @return this + * @throws IOException if replica setting could not be updated + */ + ExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) throws IOException; + + /** + * Get replica level. + * @param indexDefinition the index name + * @return the replica level of the index + */ + int getReplicaLevel(IndexDefinition indexDefinition); + + /** + * Get replica level. + * @param index the index name + * @return the replica level of the index + */ + int getReplicaLevel(String index); + + /** + * Refresh the index. + * + * @param index index + * @return this + */ + ExtendedClient refreshIndex(String index); + + /** + * Flush the index. The cluster clears cache and completes indexing. + * + * @param index index + * @return this + */ + ExtendedClient flushIndex(String index); + + /** + * Force segment merge of an index. + * @param indexDefinition th eindex definition + * @return this + */ + boolean forceMerge(IndexDefinition indexDefinition); + + /** + * Force segment merge of an index. + * @param index the index + * @param maxWaitTime maximum wait time + * @param timeUnit time unit + * @return this + */ + boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit); + + /** + * Wait for all outstanding bulk responses. + * + * @param timeout maximum wait time + * @param timeUnit unit of timeout value + * @return true if wait succeeded, false if wait timed out + */ + boolean waitForResponses(long timeout, TimeUnit timeUnit); + + /** + * Wait for cluster being healthy. + * + * @param healthColor cluster health color to wait for + * @param maxWaitTime time value + * @param timeUnit time unit + * @return true if wait succeeded, false if wait timed out + */ + boolean waitForCluster(String healthColor, long maxWaitTime, TimeUnit timeUnit); + + /** + * Get current health color. + * + * @param maxWaitTime maximum wait time + * @param timeUnit time unit + * @return the cluster health color + */ + String getHealthColor(long maxWaitTime, TimeUnit timeUnit); + + /** + * Wait for index recovery (after replica change). + * + * @param index index + * @param maxWaitTime maximum wait time + * @param timeUnit time unit + * @return true if wait succeeded, false if wait timed out + */ + boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit); + + /** + * Update index setting. + * @param index the index + * @param key the key of the value to be updated + * @param value the new value + * @param timeout timeout + * @param timeUnit time unit + * @throws IOException if update index setting failed + */ + void updateIndexSetting(String index, String key, Object value, long timeout, TimeUnit timeUnit) throws IOException; + + /** + * Resolve alias. + * + * @param alias the alias + * @return this index name behind the alias or the alias if there is no index + */ + String resolveAlias(String alias); + + /** + * Resolve alias to all connected indices, sort index names with most recent timestamp on top, return this index + * name. + * + * @param alias the alias + * @return the most recent index name pointing to the alias + */ + String resolveMostRecentIndex(String alias); + + /** + * Get all index filters. + * @param index the index + * @return map of index filters + */ + Map getAliases(String index); + + /** + * Shift from one index to another. + * @param indexDefinition the index definition + * @param additionalAliases new aliases + * @return this + */ + IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List additionalAliases); + + /** + * Shift from one index to another. + * @param indexDefinition the index definition + * @param additionalAliases new aliases + * @param indexAliasAdder method to add aliases + * @return this + */ + IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List additionalAliases, + IndexAliasAdder indexAliasAdder); + + /** + * Shift from one index to another. + * @param index the index name + * @param fullIndexName the index name with timestamp + * @param additionalAliases a list of names that should be set as index aliases + * @return this + */ + IndexShiftResult shiftIndex(String index, String fullIndexName, List additionalAliases); + + /** + * Shift from one index to another. + * @param index the index name + * @param fullIndexName the index name with timestamp + * @param additionalAliases a list of names that should be set as index aliases + * @param adder an adder method to create alias term queries + * @return this + */ + IndexShiftResult shiftIndex(String index, String fullIndexName, List additionalAliases, + IndexAliasAdder adder); + + /** + * Prune index. + * @param indexDefinition the index definition + * @return the index prune result + */ + IndexPruneResult pruneIndex(IndexDefinition indexDefinition); + + /** + * Apply retention policy to prune indices. All indices before delta should be deleted, + * but the number of mintokeep indices must be kept. + * + * @param index index name + * @param fullIndexName index name with timestamp + * @param delta timestamp delta (for index timestamps) + * @param mintokeep minimum number of indices to keep + * @param perform true if pruning should be executed, false if not + * @return the index prune result + */ + IndexPruneResult pruneIndex(String index, String fullIndexName, int delta, int mintokeep, boolean perform); + + /** + * Find the timestamp of the most recently indexed document in the index. + * + * @param index the index name + * @param timestampfieldname the timestamp field name + * @return millis UTC millis of the most recent document + * @throws IOException if most rcent document can not be found + */ + Long mostRecentDocument(String index, String timestampfieldname) throws IOException; + + /** + * Get cluster name. + * @return the cluster name + */ + String getClusterName(); +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/ExtendedClientProvider.java b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClientProvider.java new file mode 100644 index 0000000..2a8904a --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/ExtendedClientProvider.java @@ -0,0 +1,7 @@ +package org.xbib.elx.api; + +@FunctionalInterface +public interface ExtendedClientProvider { + + C getExtendedClient(); +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexAliasAdder.java b/elx-api/src/main/java/org/xbib/elx/api/IndexAliasAdder.java new file mode 100644 index 0000000..03dd6e6 --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/IndexAliasAdder.java @@ -0,0 +1,9 @@ +package org.xbib.elx.api; + +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; + +@FunctionalInterface +public interface IndexAliasAdder { + + void addIndexAlias(IndicesAliasesRequest requwst, String index, String alias); +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexDefinition.java b/elx-api/src/main/java/org/xbib/elx/api/IndexDefinition.java new file mode 100644 index 0000000..49544a7 --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/IndexDefinition.java @@ -0,0 +1,70 @@ +package org.xbib.elx.api; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.concurrent.TimeUnit; + +public interface IndexDefinition { + + IndexDefinition setIndex(String index); + + String getIndex(); + + IndexDefinition setFullIndexName(String fullIndexName); + + String getFullIndexName(); + + IndexDefinition setSettingsUrl(String settingsUrlString) throws MalformedURLException; + + IndexDefinition setSettingsUrl(URL settingsUrl); + + URL getSettingsUrl(); + + IndexDefinition setMappingsUrl(String mappingsUrlString) throws MalformedURLException; + + IndexDefinition setMappingsUrl(URL mappingsUrl); + + URL getMappingsUrl(); + + IndexDefinition setDateTimePattern(String timeWindow); + + String getDateTimePattern(); + + IndexDefinition setEnabled(boolean enabled); + + boolean isEnabled(); + + IndexDefinition setIgnoreErrors(boolean ignoreErrors); + + boolean ignoreErrors(); + + IndexDefinition setShift(boolean shift); + + boolean isShiftEnabled(); + + IndexDefinition setForceMerge(boolean hasForceMerge); + + boolean hasForceMerge(); + + IndexDefinition setReplicaLevel(int replicaLevel); + + int getReplicaLevel(); + + IndexDefinition setRetention(IndexRetention indexRetention); + + IndexRetention getRetention(); + + IndexDefinition setMaxWaitTime(long maxWaitTime, TimeUnit timeUnit); + + long getMaxWaitTime(); + + TimeUnit getMaxWaitTimeUnit(); + + IndexDefinition setStartRefreshInterval(long seconds); + + long getStartRefreshInterval(); + + IndexDefinition setStopRefreshInterval(long seconds); + + long getStopRefreshInterval(); +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java b/elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java new file mode 100644 index 0000000..0c118f8 --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java @@ -0,0 +1,16 @@ +package org.xbib.elx.api; + +import java.util.List; + +public interface IndexPruneResult { + + enum State { NOTHING_TO_DO, SUCCESS, NONE }; + + State getState(); + + List getCandidateIndices(); + + List getDeletedIndices(); + + boolean isAcknowledged(); +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexRetention.java b/elx-api/src/main/java/org/xbib/elx/api/IndexRetention.java new file mode 100644 index 0000000..44116e2 --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/IndexRetention.java @@ -0,0 +1,13 @@ +package org.xbib.elx.api; + +public interface IndexRetention { + + IndexRetention setDelta(int delta); + + int getDelta(); + + IndexRetention setMinToKeep(int minToKeep); + + int getMinToKeep(); + +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java b/elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java new file mode 100644 index 0000000..02a2e8c --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java @@ -0,0 +1,10 @@ +package org.xbib.elx.api; + +import java.util.List; + +public interface IndexShiftResult { + + List getMovedAliases(); + + List getNewAliases(); +} diff --git a/elx-api/src/main/java/org/xbib/elx/api/package-info.java b/elx-api/src/main/java/org/xbib/elx/api/package-info.java new file mode 100644 index 0000000..03fd0e3 --- /dev/null +++ b/elx-api/src/main/java/org/xbib/elx/api/package-info.java @@ -0,0 +1,4 @@ +/** + * The API of the extended Elasticsearch clients. + */ +package org.xbib.elx.api; diff --git a/elx-common/build.gradle b/elx-common/build.gradle new file mode 100644 index 0000000..4336a23 --- /dev/null +++ b/elx-common/build.gradle @@ -0,0 +1,5 @@ +dependencies{ + compile project(':elx-api') + testCompile "org.xbib.elasticsearch:elasticsearch-analysis-common:${rootProject.property('elasticsearch-server.version')}" + testCompile "org.xbib.elasticsearch:transport-netty4:${rootProject.property('elasticsearch-server.version')}" +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/AbstractExtendedClient.java b/elx-common/src/main/java/org/xbib/elx/common/AbstractExtendedClient.java new file mode 100644 index 0000000..78cdce0 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/AbstractExtendedClient.java @@ -0,0 +1,1097 @@ +package org.xbib.elx.common; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.flush.FlushAction; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.xbib.elx.api.BulkController; +import org.xbib.elx.api.BulkMetric; +import org.xbib.elx.api.ExtendedClient; +import org.xbib.elx.api.IndexAliasAdder; +import org.xbib.elx.api.IndexDefinition; +import org.xbib.elx.api.IndexPruneResult; +import org.xbib.elx.api.IndexRetention; +import org.xbib.elx.api.IndexShiftResult; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.time.LocalDate; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public abstract class AbstractExtendedClient implements ExtendedClient { + + private static final Logger logger = LogManager.getLogger(AbstractExtendedClient.class.getName()); + + /** + * The one and only index type name used in the extended client. + * Notr that all Elasticsearch version < 6.2.0 do not allow a prepending "_". + */ + private static final String TYPE_NAME = "doc"; + + /** + * The Elasticsearch client. + */ + private ElasticsearchClient client; + + private BulkMetric bulkMetric; + + private BulkController bulkController; + + private AtomicBoolean closed; + + private static final IndexShiftResult EMPTY_INDEX_SHIFT_RESULT = new IndexShiftResult() { + @Override + public List getMovedAliases() { + return Collections.emptyList(); + } + + @Override + public List getNewAliases() { + return Collections.emptyList(); + } + }; + + private static final IndexPruneResult EMPTY_INDEX_PRUNE_RESULT = new IndexPruneResult() { + @Override + public State getState() { + return State.NONE; + } + + @Override + public List getCandidateIndices() { + return Collections.emptyList(); + } + + @Override + public List getDeletedIndices() { + return Collections.emptyList(); + } + + @Override + public boolean isAcknowledged() { + return false; + } + }; + + protected abstract ElasticsearchClient createClient(Settings settings) throws IOException; + + protected abstract void closeClient() throws IOException; + + protected AbstractExtendedClient() { + closed = new AtomicBoolean(false); + } + + @Override + public AbstractExtendedClient setClient(ElasticsearchClient client) { + this.client = client; + return this; + } + + @Override + public ElasticsearchClient getClient() { + return client; + } + + @Override + public BulkMetric getBulkMetric() { + return bulkMetric; + } + + @Override + public BulkController getBulkController() { + return bulkController; + } + + @Override + public AbstractExtendedClient init(Settings settings) throws IOException { + if (client == null) { + client = createClient(settings); + } + if (bulkMetric == null) { + this.bulkMetric = new DefaultBulkMetric(); + this.bulkMetric.init(settings); + } + if (bulkController == null) { + this.bulkController = new DefaultBulkController(this, bulkMetric); + this.bulkController.init(settings); + } + return this; + } + + @Override + public void flush() throws IOException { + if (bulkController != null) { + bulkController.flush(); + } + } + + @Override + public void close() throws IOException { + ensureActive(); + if (closed.compareAndSet(false, true)) { + if (bulkMetric != null) { + logger.info("closing bulk metric"); + bulkMetric.close(); + } + if (bulkController != null) { + logger.info("closing bulk controller"); + bulkController.close(); + } + closeClient(); + } + } + + @Override + public String getClusterName() { + ensureActive(); + try { + ClusterStateRequest clusterStateRequest = new ClusterStateRequest().all(); + ClusterStateResponse clusterStateResponse = + client.execute(ClusterStateAction.INSTANCE, clusterStateRequest).actionGet(); + return clusterStateResponse.getClusterName().value(); + } catch (ElasticsearchTimeoutException e) { + logger.warn(e.getMessage(), e); + return "TIMEOUT"; + } catch (NoNodeAvailableException e) { + logger.warn(e.getMessage(), e); + return "DISCONNECTED"; + } catch (Exception e) { + logger.warn(e.getMessage(), e); + return "[" + e.getMessage() + "]"; + } + } + + @Override + public ExtendedClient newIndex(IndexDefinition indexDefinition) throws IOException { + ensureActive(); + waitForCluster("YELLOW", 30L, TimeUnit.SECONDS); + URL indexSettings = indexDefinition.getSettingsUrl(); + if (indexSettings == null) { + logger.warn("warning while creating index '{}', no settings/mappings", + indexDefinition.getFullIndexName()); + newIndex(indexDefinition.getFullIndexName()); + return this; + } + URL indexMappings = indexDefinition.getMappingsUrl(); + if (indexMappings == null) { + logger.warn("warning while creating index '{}', no mappings", + indexDefinition.getFullIndexName()); + newIndex(indexDefinition.getFullIndexName(), indexSettings.openStream(), null); + return this; + } + try (InputStream indexSettingsInput = indexSettings.openStream(); + InputStream indexMappingsInput = indexMappings.openStream()) { + newIndex(indexDefinition.getFullIndexName(), indexSettingsInput, indexMappingsInput); + } catch (IOException e) { + if (indexDefinition.ignoreErrors()) { + logger.warn(e.getMessage(), e); + logger.warn("warning while creating index '{}' with settings at {} and mappings at {}", + indexDefinition.getFullIndexName(), indexSettings, indexMappings); + } else { + logger.error("error while creating index '{}' with settings at {} and mappings at {}", + indexDefinition.getFullIndexName(), indexSettings, indexMappings); + throw new IOException(e); + } + } + return this; + } + + @Override + public ExtendedClient newIndex(String index) throws IOException { + return newIndex(index, Settings.EMPTY, (Map) null); + } + + @Override + public ExtendedClient newIndex(String index, InputStream settings, InputStream mapping) throws IOException { + return newIndex(index, + Settings.builder().loadFromStream(".json", settings, true).build(), + mapping != null ? JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, mapping).mapOrdered() : null); + } + + @Override + public ExtendedClient newIndex(String index, Settings settings) throws IOException { + return newIndex(index, settings, (Map) null); + } + + @Override + public ExtendedClient newIndex(String index, Settings settings, String mapping) throws IOException { + return newIndex(index, settings, + mapping != null ? JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, mapping).mapOrdered() : null); + } + + @Override + public ExtendedClient newIndex(String index, Settings settings, Map mapping) throws IOException { + ensureActive(); + if (index == null) { + logger.warn("no index name given to create index"); + return this; + } + CreateIndexRequest createIndexRequest = new CreateIndexRequest().index(index); + if (settings != null) { + createIndexRequest.settings(settings); + } + if (mapping != null) { + createIndexRequest.mapping(TYPE_NAME, mapping); + } + CreateIndexResponse createIndexResponse = client.execute(CreateIndexAction.INSTANCE, createIndexRequest).actionGet(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + logger.info("index {} created: {}", index, + Strings.toString(createIndexResponse.toXContent(builder, ToXContent.EMPTY_PARAMS))); + return this; + } + + @Override + public ExtendedClient deleteIndex(IndexDefinition indexDefinition) { + return deleteIndex(indexDefinition.getFullIndexName()); + } + + @Override + public ExtendedClient deleteIndex(String index) { + ensureActive(); + if (index == null) { + logger.warn("no index name given to delete index"); + return this; + } + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest().indices(index); + client.execute(DeleteIndexAction.INSTANCE, deleteIndexRequest).actionGet(); + return this; + } + + @Override + public ExtendedClient startBulk(IndexDefinition indexDefinition) throws IOException { + startBulk(indexDefinition.getFullIndexName(), -1, 1); + return this; + } + + @Override + public ExtendedClient startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds) + throws IOException { + if (bulkController != null) { + ensureActive(); + bulkController.startBulkMode(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds); + } + return this; + } + + @Override + public ExtendedClient stopBulk(IndexDefinition indexDefinition) throws IOException { + if (bulkController != null) { + ensureActive(); + bulkController.stopBulkMode(indexDefinition); + } + return this; + } + + @Override + public ExtendedClient stopBulk(String index, long timeout, TimeUnit timeUnit) throws IOException { + if (bulkController != null) { + ensureActive(); + bulkController.stopBulkMode(index, timeout, timeUnit); + } + return this; + } + + @Override + public ExtendedClient index(String index, String id, boolean create, String source) { + return index(new IndexRequest(index, TYPE_NAME, id).create(create) + .source(source.getBytes(StandardCharsets.UTF_8), XContentType.JSON)); + } + + @Override + public ExtendedClient index(String index, String id, boolean create, BytesReference source) { + return index(new IndexRequest(index, TYPE_NAME, id).create(create) + .source(source, XContentType.JSON)); + } + + @Override + public ExtendedClient index(IndexRequest indexRequest) { + ensureActive(); + bulkController.index(indexRequest); + return this; + } + + @Override + public ExtendedClient delete(String index, String id) { + return delete(new DeleteRequest(index, TYPE_NAME, id)); + } + + @Override + public ExtendedClient delete(DeleteRequest deleteRequest) { + ensureActive(); + bulkController.delete(deleteRequest); + return this; + } + + @Override + public ExtendedClient update(String index, String id, BytesReference source) { + return update(new UpdateRequest(index, TYPE_NAME, id) + .doc(source, XContentType.JSON)); + } + + @Override + public ExtendedClient update(String index, String id, String source) { + return update(new UpdateRequest(index, TYPE_NAME, id) + .doc(source.getBytes(StandardCharsets.UTF_8), XContentType.JSON)); + } + + @Override + public ExtendedClient update(UpdateRequest updateRequest) { + ensureActive(); + bulkController.update(updateRequest); + return this; + } + + @Override + public boolean waitForResponses(long timeout, TimeUnit timeUnit) { + ensureActive(); + return bulkController.waitForResponses(timeout, timeUnit); + } + + @Override + public boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit) { + ensureActive(); + ensureIndexGiven(index); + RecoveryRequest recoveryRequest = new RecoveryRequest(); + recoveryRequest.indices(index); + recoveryRequest.activeOnly(true); + RecoveryResponse response = client.execute(RecoveryAction.INSTANCE, recoveryRequest).actionGet(); + int shards = response.getTotalShards(); + TimeValue timeout = toTimeValue(maxWaitTime, timeUnit); + ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest() + .indices(index) + .waitForActiveShards(shards).timeout(timeout); + ClusterHealthResponse healthResponse = + client.execute(ClusterHealthAction.INSTANCE, clusterHealthRequest).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + logger.error("timeout waiting for recovery"); + return false; + } + return true; + } + + @Override + public boolean waitForCluster(String statusString, long maxWaitTime, TimeUnit timeUnit) { + ensureActive(); + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString); + TimeValue timeout = toTimeValue(maxWaitTime, timeUnit); + ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().timeout(timeout).waitForStatus(status)).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + if (logger.isErrorEnabled()) { + logger.error("timeout, cluster state is " + healthResponse.getStatus().name() + " and not " + status.name()); + } + return false; + } + return true; + } + + @Override + public String getHealthColor(long maxWaitTime, TimeUnit timeUnit) { + ensureActive(); + try { + TimeValue timeout = toTimeValue(maxWaitTime, timeUnit); + ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().timeout(timeout)).actionGet(); + ClusterHealthStatus status = healthResponse.getStatus(); + return status.name(); + } catch (ElasticsearchTimeoutException e) { + logger.warn(e.getMessage(), e); + return "TIMEOUT"; + } catch (NoNodeAvailableException e) { + logger.warn(e.getMessage(), e); + return "DISCONNECTED"; + } catch (Exception e) { + logger.warn(e.getMessage(), e); + return "[" + e.getMessage() + "]"; + } + } + + @Override + public ExtendedClient updateReplicaLevel(IndexDefinition indexDefinition, int level) throws IOException { + return updateReplicaLevel(indexDefinition.getFullIndexName(), level, + indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit()); + } + + @Override + public ExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) throws IOException { + waitForCluster("YELLOW", maxWaitTime, timeUnit); // let cluster settle down from critical operations + if (level > 0) { + updateIndexSetting(index, "number_of_replicas", level, maxWaitTime, timeUnit); + waitForRecovery(index, maxWaitTime, timeUnit); + } + return this; + } + + @Override + public int getReplicaLevel(IndexDefinition indexDefinition) { + return getReplicaLevel(indexDefinition.getFullIndexName()); + } + + @Override + public int getReplicaLevel(String index) { + GetSettingsRequest request = new GetSettingsRequest().indices(index); + GetSettingsResponse response = client.execute(GetSettingsAction.INSTANCE, request).actionGet(); + int replica = -1; + for (ObjectObjectCursor cursor : response.getIndexToSettings()) { + Settings settings = cursor.value; + if (index.equals(cursor.key)) { + replica = settings.getAsInt("index.number_of_replicas", null); + } + } + return replica; + } + + @Override + public ExtendedClient flushIndex(String index) { + if (index != null) { + ensureActive(); + client.execute(FlushAction.INSTANCE, new FlushRequest(index)).actionGet(); + } + return this; + } + + @Override + public ExtendedClient refreshIndex(String index) { + if (index != null) { + ensureActive(); + client.execute(RefreshAction.INSTANCE, new RefreshRequest(index)).actionGet(); + } + return this; + } + + @Override + public String resolveMostRecentIndex(String alias) { + ensureActive(); + if (alias == null) { + return null; + } + GetAliasesRequest getAliasesRequest = new GetAliasesRequest().aliases(alias); + GetAliasesResponse getAliasesResponse = client.execute(GetAliasesAction.INSTANCE, getAliasesRequest).actionGet(); + Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); + Set indices = new TreeSet<>(Collections.reverseOrder()); + for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) { + Matcher m = pattern.matcher(indexName.value); + if (m.matches() && alias.equals(m.group(1))) { + indices.add(indexName.value); + } + } + return indices.isEmpty() ? alias : indices.iterator().next(); + } + + @Override + public Map getAliases(String index) { + if (index == null) { + return Collections.emptyMap(); + } + GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index); + return getFilters(client.execute(GetAliasesAction.INSTANCE, getAliasesRequest).actionGet()); + } + + @Override + public String resolveAlias(String alias) { + ensureActive(); + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.metaData(true); + ClusterStateResponse clusterStateResponse = + client.execute(ClusterStateAction.INSTANCE, clusterStateRequest).actionGet(); + SortedMap map = clusterStateResponse.getState().getMetaData().getAliasAndIndexLookup(); + AliasOrIndex aliasOrIndex = map.get(alias); + return aliasOrIndex != null ? aliasOrIndex.getIndices().iterator().next().getIndex().getName() : null; + } + + @Override + public IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List additionalAliases) { + return shiftIndex(indexDefinition, additionalAliases, null); + } + + @Override + public IndexShiftResult shiftIndex(IndexDefinition indexDefinition, + List additionalAliases, IndexAliasAdder indexAliasAdder) { + if (additionalAliases == null) { + return EMPTY_INDEX_SHIFT_RESULT; + } + if (indexDefinition.isShiftEnabled()) { + return shiftIndex(indexDefinition.getIndex(), + indexDefinition.getFullIndexName(), additionalAliases.stream() + .filter(a -> a != null && !a.isEmpty()) + .collect(Collectors.toList()), indexAliasAdder); + } + return EMPTY_INDEX_SHIFT_RESULT; + } + + @Override + public IndexShiftResult shiftIndex(String index, String fullIndexName, List additionalAliases) { + return shiftIndex(index, fullIndexName, additionalAliases, null); + } + + @Override + public IndexShiftResult shiftIndex(String index, String fullIndexName, + List additionalAliases, IndexAliasAdder adder) { + ensureActive(); + if (index == null) { + return EMPTY_INDEX_SHIFT_RESULT; // nothing to shift to + } + if (index.equals(fullIndexName)) { + return EMPTY_INDEX_SHIFT_RESULT; // nothing to shift to + } + waitForCluster("YELLOW", 30L, TimeUnit.SECONDS); + // two situations: 1. a new alias 2. there is already an old index with the alias + String oldIndex = resolveAlias(index); + Map oldAliasMap = index.equals(oldIndex) ? null : getAliases(oldIndex); + logger.debug("old index = {} old alias map = {}", oldIndex, oldAliasMap); + final List newAliases = new ArrayList<>(); + final List moveAliases = new ArrayList<>(); + IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); + if (oldAliasMap == null || !oldAliasMap.containsKey(index)) { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(fullIndexName).alias(index)); + newAliases.add(index); + } + // move existing aliases + if (oldAliasMap != null) { + for (Map.Entry entry : oldAliasMap.entrySet()) { + String alias = entry.getKey(); + String filter = entry.getValue(); + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove() + .indices(oldIndex).alias(alias)); + if (filter != null) { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(fullIndexName).alias(alias).filter(filter)); + } else { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(fullIndexName).alias(alias)); + } + moveAliases.add(alias); + } + } + // a list of aliases that should be added, check if new or old + if (additionalAliases != null) { + for (String additionalAlias : additionalAliases) { + if (oldAliasMap == null || !oldAliasMap.containsKey(additionalAlias)) { + // index alias adder only active on extra aliases, and if alias is new + if (adder != null) { + adder.addIndexAlias(indicesAliasesRequest, fullIndexName, additionalAlias); + } else { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(fullIndexName).alias(additionalAlias)); + } + newAliases.add(additionalAlias); + } else { + String filter = oldAliasMap.get(additionalAlias); + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove() + .indices(oldIndex).alias(additionalAlias)); + if (filter != null) { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(fullIndexName).alias(additionalAlias).filter(filter)); + } else { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(fullIndexName).alias(additionalAlias)); + } + moveAliases.add(additionalAlias); + } + } + } + if (!indicesAliasesRequest.getAliasActions().isEmpty()) { + logger.debug("indices alias request = {}", indicesAliasesRequest.getAliasActions().toString()); + IndicesAliasesResponse indicesAliasesResponse = + client.execute(IndicesAliasesAction.INSTANCE, indicesAliasesRequest).actionGet(); + logger.debug("response isAcknowledged = {} isFragment = {}", + indicesAliasesResponse.isAcknowledged(), indicesAliasesResponse.isFragment()); + } + return new SuccessIndexShiftResult(moveAliases, newAliases); + } + + @Override + public IndexPruneResult pruneIndex(IndexDefinition indexDefinition) { + return pruneIndex(indexDefinition.getIndex(), indexDefinition.getFullIndexName(), + indexDefinition.getRetention().getDelta(), indexDefinition.getRetention().getMinToKeep(), true); + } + + @Override + public IndexPruneResult pruneIndex(String index, String fullIndexName, int delta, int mintokeep, boolean perform) { + if (delta == 0 && mintokeep == 0) { + return EMPTY_INDEX_PRUNE_RESULT; + } + if (index.equals(fullIndexName)) { + return EMPTY_INDEX_PRUNE_RESULT; + } + ensureActive(); + GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client, GetIndexAction.INSTANCE); + GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet(); + Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); + logger.info("{} indices", getIndexResponse.getIndices().length); + List candidateIndices = new ArrayList<>(); + for (String s : getIndexResponse.getIndices()) { + Matcher m = pattern.matcher(s); + if (m.matches() && index.equals(m.group(1)) && !s.equals(fullIndexName)) { + candidateIndices.add(s); + } + } + if (candidateIndices.isEmpty()) { + return EMPTY_INDEX_PRUNE_RESULT; + } + if (mintokeep > 0 && candidateIndices.size() <= mintokeep) { + return new NothingToDoPruneResult(candidateIndices, Collections.emptyList()); + } + List indicesToDelete = new ArrayList<>(); + Matcher m1 = pattern.matcher(fullIndexName); + if (m1.matches()) { + Integer i1 = Integer.parseInt(m1.group(2)); + for (String s : candidateIndices) { + Matcher m2 = pattern.matcher(s); + if (m2.matches()) { + Integer i2 = Integer.parseInt(m2.group(2)); + int kept = candidateIndices.size() - indicesToDelete.size(); + if ((delta == 0 || (delta > 0 && i1 - i2 > delta)) && mintokeep <= kept) { + indicesToDelete.add(s); + } + } + } + } + if (indicesToDelete.isEmpty()) { + return new NothingToDoPruneResult(candidateIndices, indicesToDelete); + } + String[] s = new String[indicesToDelete.size()]; + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest() + .indices(indicesToDelete.toArray(s)); + DeleteIndexResponse response = client.execute(DeleteIndexAction.INSTANCE, deleteIndexRequest).actionGet(); + return new SuccessPruneResult(candidateIndices, indicesToDelete, response); + } + + @Override + public Long mostRecentDocument(String index, String timestampfieldname) { + ensureActive(); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE); + SortBuilder sort = SortBuilders.fieldSort(timestampfieldname).order(SortOrder.DESC); + SearchResponse searchResponse = searchRequestBuilder.setIndices(index) + .addStoredField(timestampfieldname) + .setSize(1) + .addSort(sort) + .execute().actionGet(); + if (searchResponse.getHits().getHits().length == 1) { + SearchHit hit = searchResponse.getHits().getHits()[0]; + if (hit.getFields().get(timestampfieldname) != null) { + return hit.getFields().get(timestampfieldname).getValue(); + } else { + return 0L; + } + } + return null; + } + + @Override + public boolean forceMerge(IndexDefinition indexDefinition) { + if (indexDefinition.hasForceMerge()) { + return forceMerge(indexDefinition.getFullIndexName(), indexDefinition.getMaxWaitTime(), + indexDefinition.getMaxWaitTimeUnit()); + } + return false; + } + + @Override + public boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit) { + TimeValue timeout = toTimeValue(maxWaitTime, timeUnit); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest(); + forceMergeRequest.indices(index); + try { + client.execute(ForceMergeAction.INSTANCE, forceMergeRequest).get(timeout.getMillis(), TimeUnit.MILLISECONDS); + return true; + } catch (TimeoutException e) { + logger.error("timeout"); + } catch (ExecutionException e) { + logger.error(e.getMessage(), e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.error(e.getMessage(), e); + } + return false; + } + + @Override + public IndexDefinition buildIndexDefinitionFromSettings(String index, Settings settings) + throws IOException { + boolean isEnabled = settings.getAsBoolean("enabled", !(client instanceof MockExtendedClient)); + String indexName = settings.get("name", index); + String fullIndexName; + String dateTimePattern = settings.get("dateTimePattern"); + if (dateTimePattern != null) { + // check if index name with current date already exists, resolve to it + fullIndexName = resolveAlias(indexName + DateTimeFormatter.ofPattern(dateTimePattern) + .withZone(ZoneId.systemDefault()) // not GMT + .format(LocalDate.now())); + } else { + // check if index name already exists, resolve to it + fullIndexName = resolveMostRecentIndex(indexName); + } + IndexRetention indexRetention = new DefaultIndexRetention() + .setMinToKeep(settings.getAsInt("retention.mintokeep", 0)) + .setDelta(settings.getAsInt("retention.delta", 0)); + return new DefaultIndexDefinition() + .setEnabled(isEnabled) + .setIndex(indexName) + .setFullIndexName(fullIndexName) + .setSettingsUrl(settings.get("settings")) + .setMappingsUrl(settings.get("mapping")) + .setDateTimePattern(dateTimePattern) + .setIgnoreErrors(settings.getAsBoolean("skiperrors", false)) + .setShift(settings.getAsBoolean("shift", true)) + .setReplicaLevel(settings.getAsInt("replica", 0)) + .setMaxWaitTime(settings.getAsLong("timeout", 30L), TimeUnit.SECONDS) + .setRetention(indexRetention) + .setStartRefreshInterval(settings.getAsLong("bulk.startrefreshinterval", -1L)) + .setStopRefreshInterval(settings.getAsLong("bulk.stoprefreshinterval", -1L)); + } + + @Override + public void updateIndexSetting(String index, String key, Object value, long timeout, TimeUnit timeUnit) throws IOException { + ensureActive(); + if (index == null) { + throw new IOException("no index name given"); + } + if (key == null) { + throw new IOException("no key given"); + } + if (value == null) { + throw new IOException("no value given"); + } + Settings.Builder updateSettingsBuilder = Settings.builder(); + updateSettingsBuilder.put(key, value.toString()); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(index) + .settings(updateSettingsBuilder).timeout(toTimeValue(timeout, timeUnit)); + client.execute(UpdateSettingsAction.INSTANCE, updateSettingsRequest).actionGet(); + } + + private void ensureActive() { + if (this instanceof MockExtendedClient) { + return; + } + if (client == null) { + throw new IllegalStateException("no client"); + } + } + + private void ensureIndexGiven(String index) { + if (index == null) { + throw new IllegalArgumentException("no index given"); + } + } + + private Map getFilters(GetAliasesResponse getAliasesResponse) { + Map result = new HashMap<>(); + for (ObjectObjectCursor> object : getAliasesResponse.getAliases()) { + List aliasMetaDataList = object.value; + for (AliasMetaData aliasMetaData : aliasMetaDataList) { + if (aliasMetaData.filteringRequired()) { + result.put(aliasMetaData.alias(), + new String(aliasMetaData.getFilter().uncompressed(), StandardCharsets.UTF_8)); + } else { + result.put(aliasMetaData.alias(), null); + } + } + } + return result; + } + + public void checkMapping(String index) { + ensureActive(); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(index); + GetMappingsResponse getMappingsResponse = client.execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + ImmutableOpenMap> map = getMappingsResponse.getMappings(); + map.keys().forEach((Consumer>) stringObjectCursor -> { + ImmutableOpenMap mappings = map.get(stringObjectCursor.value); + for (ObjectObjectCursor cursor : mappings) { + String mappingName = cursor.key; + MappingMetaData mappingMetaData = cursor.value; + checkMapping(index, mappingName, mappingMetaData); + } + }); + } + + private void checkMapping(String index, String type, MappingMetaData mappingMetaData) { + try { + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE); + SearchResponse searchResponse = searchRequestBuilder.setSize(0) + .setIndices(index) + .setTypes(type) + .setQuery(QueryBuilders.matchAllQuery()) + .execute() + .actionGet(); + long total = searchResponse.getHits().getTotalHits(); + if (total > 0L) { + Map fields = new TreeMap<>(); + Map root = mappingMetaData.getSourceAsMap(); + checkMapping(index, type, "", "", root, fields); + AtomicInteger empty = new AtomicInteger(); + Map map = sortByValue(fields); + map.forEach((key, value) -> { + logger.info("{} {} {}", + key, + value, + (double) value * 100 / total); + if (value == 0) { + empty.incrementAndGet(); + } + }); + logger.info("index={} type={} numfields={} fieldsnotused={}", + index, type, map.size(), empty.get()); + } + } catch (Exception e) { + logger.error(e.getMessage(), e); + } + } + + @SuppressWarnings("unchecked") + private void checkMapping(String index, String type, + String pathDef, String fieldName, Map map, + Map fields) { + String path = pathDef; + if (!path.isEmpty() && !path.endsWith(".")) { + path = path + "."; + } + if (!"properties".equals(fieldName)) { + path = path + fieldName; + } + if (map.containsKey("index")) { + String mode = (String) map.get("index"); + if ("no".equals(mode)) { + return; + } + } + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + Object o = entry.getValue(); + if (o instanceof Map) { + Map child = (Map) o; + o = map.get("type"); + String fieldType = o instanceof String ? o.toString() : null; + // do not recurse into our custom field mapper + if (!"standardnumber".equals(fieldType) && !"ref".equals(fieldType)) { + checkMapping(index, type, path, key, child, fields); + } + } else if ("type".equals(key)) { + QueryBuilder filterBuilder = QueryBuilders.existsQuery(path); + QueryBuilder queryBuilder = QueryBuilders.constantScoreQuery(filterBuilder); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client, SearchAction.INSTANCE); + SearchResponse searchResponse = searchRequestBuilder.setSize(0) + .setIndices(index) + .setTypes(type) + .setQuery(queryBuilder) + .execute() + .actionGet(); + fields.put(path, searchResponse.getHits().getTotalHits()); + } + } + } + + private static > Map sortByValue(Map map) { + Map result = new LinkedHashMap<>(); + map.entrySet().stream().sorted(Comparator.comparing(Map.Entry::getValue)) + .forEachOrdered(e -> result.put(e.getKey(), e.getValue())); + return result; + } + + private static TimeValue toTimeValue(long timeValue, TimeUnit timeUnit) { + switch (timeUnit) { + case DAYS: + return TimeValue.timeValueHours(24 * timeValue); + case HOURS: + return TimeValue.timeValueHours(timeValue); + case MINUTES: + return TimeValue.timeValueMinutes(timeValue); + case SECONDS: + return TimeValue.timeValueSeconds(timeValue); + case MILLISECONDS: + return TimeValue.timeValueMillis(timeValue); + case MICROSECONDS: + return TimeValue.timeValueNanos(1000 * timeValue); + case NANOSECONDS: + return TimeValue.timeValueNanos(timeValue); + default: + throw new IllegalArgumentException("unknown time unit: " + timeUnit); + } + } + + private static class SuccessIndexShiftResult implements IndexShiftResult { + + List movedAliases; + + List newAliases; + + SuccessIndexShiftResult(List movedAliases, List newAliases) { + this.movedAliases = movedAliases; + this.newAliases = newAliases; + } + + @Override + public List getMovedAliases() { + return movedAliases; + } + + @Override + public List getNewAliases() { + return newAliases; + } + } + + private static class SuccessPruneResult implements IndexPruneResult { + + List candidateIndices; + + List indicesToDelete; + + DeleteIndexResponse response; + + SuccessPruneResult(List candidateIndices, List indicesToDelete, + DeleteIndexResponse response) { + this.candidateIndices = candidateIndices; + this.indicesToDelete = indicesToDelete; + this.response = response; + } + + @Override + public IndexPruneResult.State getState() { + return IndexPruneResult.State.SUCCESS; + } + + @Override + public List getCandidateIndices() { + return candidateIndices; + } + + @Override + public List getDeletedIndices() { + return indicesToDelete; + } + + @Override + public boolean isAcknowledged() { + return response.isAcknowledged(); + } + } + + private static class NothingToDoPruneResult implements IndexPruneResult { + + List candidateIndices; + + List indicesToDelete; + + NothingToDoPruneResult(List candidateIndices, List indicesToDelete) { + this.candidateIndices = candidateIndices; + this.indicesToDelete = indicesToDelete; + } + + @Override + public IndexPruneResult.State getState() { + return IndexPruneResult.State.SUCCESS; + } + + @Override + public List getCandidateIndices() { + return candidateIndices; + } + + @Override + public List getDeletedIndices() { + return indicesToDelete; + } + + @Override + public boolean isAcknowledged() { + return false; + } + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/ClientBuilder.java b/elx-common/src/main/java/org/xbib/elx/common/ClientBuilder.java new file mode 100644 index 0000000..ba9150f --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/ClientBuilder.java @@ -0,0 +1,102 @@ +package org.xbib.elx.common; + +import org.elasticsearch.Version; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.xbib.elx.api.ExtendedClient; +import org.xbib.elx.api.ExtendedClientProvider; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.ServiceLoader; + +@SuppressWarnings("rawtypes") +public class ClientBuilder { + + private final ElasticsearchClient client; + + private final Settings.Builder settingsBuilder; + + private Map, ExtendedClientProvider> providerMap; + + private Class provider; + + public ClientBuilder() { + this(null); + } + + public ClientBuilder(ElasticsearchClient client) { + this(client, Thread.currentThread().getContextClassLoader()); + } + + public ClientBuilder(ElasticsearchClient client, ClassLoader classLoader) { + this.client = client; + this.settingsBuilder = Settings.builder(); + settingsBuilder.put("node.name", "elx-client-" + Version.CURRENT); + this.providerMap = new HashMap<>(); + ServiceLoader serviceLoader = ServiceLoader.load(ExtendedClientProvider.class, + classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader()); + for (ExtendedClientProvider provider : serviceLoader) { + providerMap.put(provider.getClass(), provider); + } + } + + public static ClientBuilder builder() { + return new ClientBuilder(); + } + + public static ClientBuilder builder(ElasticsearchClient client) { + return new ClientBuilder(client); + } + + public ClientBuilder provider(Class provider) { + this.provider = provider; + return this; + } + + public ClientBuilder put(String key, String value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Integer value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Long value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, Double value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, ByteSizeValue value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(String key, TimeValue value) { + settingsBuilder.put(key, value); + return this; + } + + public ClientBuilder put(Settings settings) { + settingsBuilder.put(settings); + return this; + } + + @SuppressWarnings("unchecked") + public C build() throws IOException { + if (provider == null) { + throw new IllegalArgumentException("no provider"); + } + return (C) providerMap.get(provider).getExtendedClient().setClient(client).init(settingsBuilder.build()); + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkController.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkController.java new file mode 100644 index 0000000..30d5b52 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkController.java @@ -0,0 +1,312 @@ +package org.xbib.elx.common; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.xbib.elx.api.BulkController; +import org.xbib.elx.api.BulkMetric; +import org.xbib.elx.api.BulkProcessor; +import org.xbib.elx.api.ExtendedClient; +import org.xbib.elx.api.IndexDefinition; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +public class DefaultBulkController implements BulkController { + + private static final Logger logger = LogManager.getLogger(DefaultBulkController.class); + + private final ExtendedClient client; + + private final BulkMetric bulkMetric; + + private final List indexNames; + + private final Map startBulkRefreshIntervals; + + private final Map stopBulkRefreshIntervals; + + private long maxWaitTime; + + private TimeUnit maxWaitTimeUnit; + + private BulkProcessor bulkProcessor; + + private BulkListener bulkListener; + + private AtomicBoolean active; + + public DefaultBulkController(ExtendedClient client, BulkMetric bulkMetric) { + this.client = client; + this.bulkMetric = bulkMetric; + this.indexNames = new ArrayList<>(); + this.active = new AtomicBoolean(false); + this.startBulkRefreshIntervals = new HashMap<>(); + this.stopBulkRefreshIntervals = new HashMap<>(); + this.maxWaitTime = 30L; + this.maxWaitTimeUnit = TimeUnit.SECONDS; + } + + @Override + public Throwable getLastBulkError() { + return bulkListener.getLastBulkError(); + } + + @Override + public void init(Settings settings) { + int maxActionsPerRequest = settings.getAsInt(Parameters.MAX_ACTIONS_PER_REQUEST.name(), + Parameters.DEFAULT_MAX_ACTIONS_PER_REQUEST.getNum()); + int maxConcurrentRequests = settings.getAsInt(Parameters.MAX_CONCURRENT_REQUESTS.name(), + Parameters.DEFAULT_MAX_CONCURRENT_REQUESTS.getNum()); + TimeValue flushIngestInterval = settings.getAsTime(Parameters.FLUSH_INTERVAL.name(), + TimeValue.timeValueSeconds(Parameters.DEFAULT_FLUSH_INTERVAL.getNum())); + ByteSizeValue maxVolumePerRequest = settings.getAsBytesSize(Parameters.MAX_VOLUME_PER_REQUEST.name(), + ByteSizeValue.parseBytesSizeValue(Parameters.DEFAULT_MAX_VOLUME_PER_REQUEST.getString(), + "maxVolumePerRequest")); + if (logger.isInfoEnabled()) { + logger.info("bulk processor up with maxActionsPerRequest = {} maxConcurrentRequests = {} " + + "flushIngestInterval = {} maxVolumePerRequest = {}", + maxActionsPerRequest, maxConcurrentRequests, flushIngestInterval, maxVolumePerRequest); + } + this.bulkListener = new BulkListener(); + DefaultBulkProcessor.Builder builder = DefaultBulkProcessor.builder((Client) client.getClient(), bulkListener) + .setBulkActions(maxActionsPerRequest) + .setConcurrentRequests(maxConcurrentRequests) + .setFlushInterval(flushIngestInterval) + .setBulkSize(maxVolumePerRequest); + this.bulkProcessor = builder.build(); + this.active.set(true); + } + + @Override + public void startBulkMode(IndexDefinition indexDefinition) throws IOException { + startBulkMode(indexDefinition.getFullIndexName(), indexDefinition.getStartRefreshInterval(), + indexDefinition.getStopRefreshInterval()); + } + + @Override + public void startBulkMode(String indexName, + long startRefreshIntervalInSeconds, + long stopRefreshIntervalInSeconds) throws IOException { + if (!indexNames.contains(indexName)) { + indexNames.add(indexName); + startBulkRefreshIntervals.put(indexName, startRefreshIntervalInSeconds); + stopBulkRefreshIntervals.put(indexName, stopRefreshIntervalInSeconds); + if (startRefreshIntervalInSeconds != 0L) { + client.updateIndexSetting(indexName, "refresh_interval", startRefreshIntervalInSeconds + "s", + 30L, TimeUnit.SECONDS); + } + } + } + + @Override + public void index(IndexRequest indexRequest) { + if (!active.get()) { + throw new IllegalStateException("inactive"); + } + try { + if (bulkMetric != null) { + bulkMetric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id()); + } + bulkProcessor.add(indexRequest); + } catch (Exception e) { + bulkListener.lastBulkError = e; + active.set(false); + if (logger.isErrorEnabled()) { + logger.error("bulk add of index failed: " + e.getMessage(), e); + } + } + } + + @Override + public void delete(DeleteRequest deleteRequest) { + if (!active.get()) { + throw new IllegalStateException("inactive"); + } + try { + if (bulkMetric != null) { + bulkMetric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); + } + bulkProcessor.add(deleteRequest); + } catch (Exception e) { + bulkListener.lastBulkError = e; + active.set(false); + if (logger.isErrorEnabled()) { + logger.error("bulk add of delete failed: " + e.getMessage(), e); + } + } + } + + @Override + public void update(UpdateRequest updateRequest) { + if (!active.get()) { + throw new IllegalStateException("inactive"); + } + try { + if (bulkMetric != null) { + bulkMetric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); + } + bulkProcessor.add(updateRequest); + } catch (Exception e) { + bulkListener.lastBulkError = e; + active.set(false); + if (logger.isErrorEnabled()) { + logger.error("bulk add of update failed: " + e.getMessage(), e); + } + } + } + + @Override + public boolean waitForResponses(long timeout, TimeUnit timeUnit) { + try { + return bulkProcessor.awaitFlush(timeout, timeUnit); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.error("interrupted"); + return false; + } + } + + @Override + public void stopBulkMode(IndexDefinition indexDefinition) throws IOException { + stopBulkMode(indexDefinition.getFullIndexName(), + indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit()); + } + + @Override + public void stopBulkMode(String index, long timeout, TimeUnit timeUnit) throws IOException { + flush(); + if (waitForResponses(timeout, timeUnit)) { + if (indexNames.contains(index)) { + Long secs = stopBulkRefreshIntervals.get(index); + if (secs != null && secs != 0L) { + client.updateIndexSetting(index, "refresh_interval", secs + "s", + 30L, TimeUnit.SECONDS); + } + indexNames.remove(index); + } + } + } + + @Override + public void flush() throws IOException { + if (bulkProcessor != null) { + bulkProcessor.flush(); + } + } + + @Override + public void close() throws IOException { + flush(); + if (client.waitForResponses(maxWaitTime, maxWaitTimeUnit)) { + for (String index : indexNames) { + Long secs = stopBulkRefreshIntervals.get(index); + if (secs != null && secs != 0L) + client.updateIndexSetting(index, "refresh_interval", secs + "s", + 30L, TimeUnit.SECONDS); + } + indexNames.clear(); + } + if (bulkProcessor != null) { + bulkProcessor.close(); + } + } + + private class BulkListener implements DefaultBulkProcessor.Listener { + + private final Logger logger = LogManager.getLogger("org.xbib.elx.BulkProcessor.Listener"); + + private Throwable lastBulkError = null; + + @Override + public void beforeBulk(long executionId, BulkRequest request) { + long l = 0; + if (bulkMetric != null) { + l = bulkMetric.getCurrentIngest().getCount(); + bulkMetric.getCurrentIngest().inc(); + int n = request.numberOfActions(); + bulkMetric.getSubmitted().inc(n); + bulkMetric.getCurrentIngestNumDocs().inc(n); + bulkMetric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes()); + } + if (logger.isDebugEnabled()) { + logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]", + executionId, + request.numberOfActions(), + request.estimatedSizeInBytes(), + l); + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + long l = 0; + if (bulkMetric != null) { + l = bulkMetric.getCurrentIngest().getCount(); + bulkMetric.getCurrentIngest().dec(); + bulkMetric.getSucceeded().inc(response.getItems().length); + } + int n = 0; + for (BulkItemResponse itemResponse : response.getItems()) { + if (bulkMetric != null) { + bulkMetric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId()); + } + if (itemResponse.isFailed()) { + n++; + if (bulkMetric != null) { + bulkMetric.getSucceeded().dec(1); + bulkMetric.getFailed().inc(1); + } + } + } + if (bulkMetric != null && logger.isDebugEnabled()) { + logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests", + executionId, + bulkMetric.getSucceeded().getCount(), + bulkMetric.getFailed().getCount(), + response.getTook().millis(), + l); + } + if (n > 0) { + if (logger.isErrorEnabled()) { + logger.error("bulk [{}] failed with {} failed items, failure message = {}", + executionId, n, response.buildFailureMessage()); + } + } else { + if (bulkMetric != null) { + bulkMetric.getCurrentIngestNumDocs().dec(response.getItems().length); + } + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + if (bulkMetric != null) { + bulkMetric.getCurrentIngest().dec(); + } + lastBulkError = failure; + active.set(false); + if (logger.isErrorEnabled()) { + logger.error("after bulk [" + executionId + "] error", failure); + } + } + + Throwable getLastBulkError() { + return lastBulkError; + } + } +} diff --git a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkMetric.java similarity index 74% rename from common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java rename to elx-common/src/main/java/org/xbib/elx/common/DefaultBulkMetric.java index 1a181cb..a956c4d 100644 --- a/common/src/main/java/org/xbib/elasticsearch/client/SimpleBulkMetric.java +++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkMetric.java @@ -1,16 +1,15 @@ -package org.xbib.elasticsearch.client; +package org.xbib.elx.common; +import org.elasticsearch.common.settings.Settings; +import org.xbib.elx.api.BulkMetric; import org.xbib.metrics.Count; import org.xbib.metrics.CountMetric; import org.xbib.metrics.Meter; import org.xbib.metrics.Metered; import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -public class SimpleBulkMetric implements BulkMetric { - - private final ScheduledExecutorService executorService; +public class DefaultBulkMetric implements BulkMetric { private final Meter totalIngest; @@ -30,13 +29,8 @@ public class SimpleBulkMetric implements BulkMetric { private Long stopped; - public SimpleBulkMetric() { - this(Executors.newSingleThreadScheduledExecutor()); - } - - public SimpleBulkMetric(ScheduledExecutorService executorService) { - this.executorService = executorService; - totalIngest = new Meter(executorService); + public DefaultBulkMetric() { + totalIngest = new Meter(Executors.newSingleThreadScheduledExecutor()); totalIngestSizeInBytes = new CountMetric(); currentIngest = new CountMetric(); currentIngestNumDocs = new CountMetric(); @@ -45,6 +39,11 @@ public class SimpleBulkMetric implements BulkMetric { failed = new CountMetric(); } + @Override + public void init(Settings settings) { + start(); + } + @Override public Metered getTotalIngest() { return totalIngest; @@ -80,6 +79,11 @@ public class SimpleBulkMetric implements BulkMetric { return failed; } + @Override + public long elapsed() { + return started != null ? ((stopped != null ? stopped : System.nanoTime()) - started) : -1L; + } + @Override public void start() { this.started = System.nanoTime(); @@ -90,12 +94,11 @@ public class SimpleBulkMetric implements BulkMetric { public void stop() { this.stopped = System.nanoTime(); totalIngest.stop(); - executorService.shutdownNow(); } @Override - public long elapsed() { - return (stopped != null ? stopped : System.nanoTime()) - started; + public void close() { + stop(); + totalIngest.shutdown(); } - } diff --git a/common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkProcessor.java similarity index 64% rename from common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java rename to elx-common/src/main/java/org/xbib/elx/common/DefaultBulkProcessor.java index 59ea5b2..99a72ec 100644 --- a/common/src/main/java/org/xbib/elasticsearch/client/BulkProcessor.java +++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultBulkProcessor.java @@ -1,19 +1,21 @@ -package org.xbib.elasticsearch.client; +package org.xbib.elx.common; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.client.Client; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.xbib.elx.api.BulkProcessor; -import java.io.Closeable; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; @@ -27,36 +29,38 @@ import java.util.concurrent.atomic.AtomicLong; * requests allowed to be executed in parallel. * In order to create a new bulk processor, use the {@link Builder}. */ -public class BulkProcessor implements Closeable { +public class DefaultBulkProcessor implements BulkProcessor { - private final int maximumBulkActionsPerRequest; + private final int bulkActions; - private final long maximumBulkRequestByteSize; + private final long bulkSize; private final ScheduledThreadPoolExecutor scheduler; private final ScheduledFuture scheduledFuture; - private final AtomicLong executionIdGen = new AtomicLong(); + private final AtomicLong executionIdGen; - private final BulkExecutor bulkExecutor; + private final BulkRequestHandler bulkRequestHandler; private BulkRequest bulkRequest; - private volatile boolean closed = false; + private volatile boolean closed; - private BulkProcessor(ElasticsearchClient client, Listener listener, int maximumConcurrentBulkRequests, - int maximumBulkActionsPerRequest, ByteSizeValue maximumBulkRequestByteSize, - @Nullable TimeValue flushInterval) { - this.maximumBulkActionsPerRequest = maximumBulkActionsPerRequest; - this.maximumBulkRequestByteSize = maximumBulkRequestByteSize.getBytes(); + private DefaultBulkProcessor(Client client, Listener listener, String name, int concurrentRequests, + int bulkActions, ByteSizeValue bulkSize, TimeValue flushInterval) { + this.executionIdGen = new AtomicLong(); + this.closed = false; + this.bulkActions = bulkActions; + this.bulkSize = bulkSize.getBytes(); this.bulkRequest = new BulkRequest(); - this.bulkExecutor = maximumConcurrentBulkRequests == 0 ? - new SyncBulkExecutor(client, listener) : - new AsyncBulkExecutor(client, listener, maximumConcurrentBulkRequests); - + this.bulkRequestHandler = concurrentRequests == 0 ? + new SyncBulkRequestHandler(client, listener) : + new AsyncBulkRequestHandler(client, listener, concurrentRequests); if (flushInterval != null) { - this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1); + this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, + EsExecutors.daemonThreadFactory(client.settings(), + name != null ? "[" + name + "]" : "" + "bulk_processor")); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), @@ -67,7 +71,7 @@ public class BulkProcessor implements Closeable { } } - public static Builder builder(ElasticsearchClient client, Listener listener) { + public static Builder builder(Client client, Listener listener) { if (client == null) { throw new NullPointerException("The client you specified while building a BulkProcessor is null"); } @@ -75,20 +79,28 @@ public class BulkProcessor implements Closeable { } /** - * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed. + * Wait for bulk request handler with flush. + * @param timeout the timeout value + * @param unit the timeout unit + * @return true is method was successful, false if timeout + * @throws InterruptedException if timeout */ @Override - public void close() { - try { - awaitClose(0, TimeUnit.NANOSECONDS); - } catch (InterruptedException exc) { - Thread.currentThread().interrupt(); + public synchronized boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException { + if (closed) { + return true; } + // flush + if (bulkRequest.numberOfActions() > 0) { + execute(); + } + // wait for all bulk responses + return this.bulkRequestHandler.close(timeout, unit); } /** - * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are - * flushed. + * Closes the processor. Any remaining bulk actions are flushed and then closed. This emthod can only be called + * once as the last action of a bulk processor. * * If concurrent requests are not enabled, returns {@code true} immediately. * If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then @@ -101,98 +113,50 @@ public class BulkProcessor implements Closeable { * bulk requests completed * @throws InterruptedException If the current thread is interrupted */ + @Override public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { if (closed) { return true; } closed = true; if (this.scheduledFuture != null) { - this.scheduledFuture.cancel(false); + FutureUtils.cancel(this.scheduledFuture); this.scheduler.shutdown(); } if (bulkRequest.numberOfActions() > 0) { execute(); } - return bulkExecutor.awaitClose(timeout, unit); + return this.bulkRequestHandler.close(timeout, unit); } /** - * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest} - * (for example, if no id is provided, one will be generated, or usage of the create flag). + * Adds either a delete or an index request. * * @param request request * @return his bulk processor */ - public synchronized BulkProcessor add(IndexRequest request) { - if (request == null) { - return this; - } - ensureOpen(); - bulkRequest.add(request); - if (isOverTheLimit()) { - execute(); - } - return this; + @Override + public DefaultBulkProcessor add(ActionRequest request) { + return add(request, null); } /** - * Adds an {@link DeleteRequest} to the list of actions to execute. + * Adds either a delete or an index request with a payload. * * @param request request + * @param payload payload * @return his bulk processor */ - public synchronized BulkProcessor add(DeleteRequest request) { - if (request == null) { - return this; - } - ensureOpen(); - bulkRequest.add(request); - if (isOverTheLimit()) { - execute(); - } + @Override + public DefaultBulkProcessor add(ActionRequest request, Object payload) { + internalAdd(request, payload); return this; } - /** - * Adds an {@link UpdateRequest} to the list of actions to execute. - * - * @param request request - * @return his bulk processor - */ - public synchronized BulkProcessor add(UpdateRequest request) { - if (request == null) { - return this; - } - ensureOpen(); - bulkRequest.add(request); - if (isOverTheLimit()) { - execute(); - } - return this; - } - - private void ensureOpen() { - if (closed) { - throw new IllegalStateException("bulk process already closed"); - } - } - - private boolean isOverTheLimit() { - final int count = bulkRequest.numberOfActions(); - return count > 0 && - (maximumBulkActionsPerRequest != -1 && count >= maximumBulkActionsPerRequest) || - (maximumBulkRequestByteSize != -1 && bulkRequest.estimatedSizeInBytes() >= maximumBulkRequestByteSize); - } - - private void execute() { - final BulkRequest myBulkRequest = this.bulkRequest; - bulkExecutor.execute(myBulkRequest, executionIdGen.incrementAndGet()); - this.bulkRequest = new BulkRequest(); - } - /** * Flush pending delete or index requests. */ + @Override public synchronized void flush() { ensureOpen(); if (bulkRequest.numberOfActions() > 0) { @@ -201,39 +165,58 @@ public class BulkProcessor implements Closeable { } /** - * A listener for the execution. + * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed. */ - public interface Listener { + @Override + public void close() { + try { + // 0 = immediate close + awaitClose(0, TimeUnit.NANOSECONDS); + } catch (InterruptedException exc) { + Thread.currentThread().interrupt(); + } + } - /** - * Callback before the bulk is executed. - * - * @param executionId execution ID - * @param request request - */ - void beforeBulk(long executionId, BulkRequest request); + private void ensureOpen() { + if (closed) { + throw new IllegalStateException("bulk processor already closed"); + } + } - /** - * Callback after a successful execution of bulk request. - * - * @param executionId execution ID - * @param request request - * @param response response - */ - void afterBulk(long executionId, BulkRequest request, BulkResponse response); + private synchronized void internalAdd(ActionRequest request, Object payload) { + ensureOpen(); + if (request instanceof IndexRequest) { + bulkRequest.add((IndexRequest) request, payload); + } else if (request instanceof DeleteRequest) { + bulkRequest.add((DeleteRequest) request, payload); + } else if (request instanceof UpdateRequest) { + bulkRequest.add((UpdateRequest) request, payload); + } else { + throw new UnsupportedOperationException(); + } + executeIfNeeded(); + } - /** - * Callback after a failed execution of bulk request. - * - * Note that in case an instance of InterruptedException is passed, which means that request - * processing has been - * cancelled externally, the thread's interruption status has been restored prior to calling this method. - * - * @param executionId execution ID - * @param request request - * @param failure failure - */ - void afterBulk(long executionId, BulkRequest request, Throwable failure); + private void executeIfNeeded() { + ensureOpen(); + if (!isOverTheLimit()) { + return; + } + execute(); + } + + private void execute() { + final BulkRequest myBulkRequest = this.bulkRequest; + final long executionId = executionIdGen.incrementAndGet(); + this.bulkRequest = new BulkRequest(); + this.bulkRequestHandler.execute(myBulkRequest, executionId); + } + + private boolean isOverTheLimit() { + return bulkActions != -1 && + bulkRequest.numberOfActions() >= bulkActions || + bulkSize != -1 && + bulkRequest.estimatedSizeInBytes() >= bulkSize; } /** @@ -241,11 +224,18 @@ public class BulkProcessor implements Closeable { */ public static class Builder { - private final ElasticsearchClient client; + private final Client client; + private final Listener listener; + + private String name; + private int concurrentRequests = 1; + private int bulkActions = 1000; - private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); + + private ByteSizeValue bulkSize = new ByteSizeValue(10, ByteSizeUnit.MB); + private TimeValue flushInterval = null; /** @@ -255,11 +245,22 @@ public class BulkProcessor implements Closeable { * @param client the client * @param listener the listener */ - Builder(ElasticsearchClient client, Listener listener) { + Builder(Client client, Listener listener) { this.client = client; this.listener = listener; } + /** + * Sets an optional name to identify this bulk processor. + * + * @param name name + * @return this builder + */ + public Builder setName(String name) { + this.name = name; + return this; + } + /** * Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single * request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed @@ -277,7 +278,7 @@ public class BulkProcessor implements Closeable { * Sets when to flush a new bulk request based on the number of actions currently added. Defaults to * {@code 1000}. Can be set to {@code -1} to disable it. * - * @param bulkActions mbulk actions + * @param bulkActions bulk actions * @return this builder */ public Builder setBulkActions(int bulkActions) { @@ -299,7 +300,7 @@ public class BulkProcessor implements Closeable { /** * Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set. - * Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(ByteSizeValue)} + * Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)} * can be set to {@code -1} with the flush interval set allowing for complete async processing of bulk actions. * * @param flushInterval flush interval @@ -315,8 +316,8 @@ public class BulkProcessor implements Closeable { * * @return a bulk processor */ - public BulkProcessor build() { - return new BulkProcessor(client, listener, concurrentRequests, bulkActions, bulkSize, flushInterval); + public DefaultBulkProcessor build() { + return new DefaultBulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); } } @@ -324,32 +325,25 @@ public class BulkProcessor implements Closeable { @Override public void run() { - synchronized (BulkProcessor.this) { + synchronized (DefaultBulkProcessor.this) { if (closed) { return; } - if (bulkRequest.numberOfActions() > 0) { - execute(); + if (bulkRequest.numberOfActions() == 0) { + return; } + execute(); } } } - interface BulkExecutor { + private static class SyncBulkRequestHandler implements BulkRequestHandler { - void execute(BulkRequest bulkRequest, long executionId); + private final Client client; - boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException; + private final DefaultBulkProcessor.Listener listener; - } - - private static class SyncBulkExecutor implements BulkExecutor { - - private final ElasticsearchClient client; - - private final BulkProcessor.Listener listener; - - SyncBulkExecutor(ElasticsearchClient client, BulkProcessor.Listener listener) { + SyncBulkRequestHandler(Client client, DefaultBulkProcessor.Listener listener) { this.client = client; this.listener = listener; } @@ -370,22 +364,22 @@ public class BulkProcessor implements Closeable { } @Override - public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + public boolean close(long timeout, TimeUnit unit) { return true; } } - private static class AsyncBulkExecutor implements BulkExecutor { + private static class AsyncBulkRequestHandler implements BulkRequestHandler { - private final ElasticsearchClient client; + private final Client client; - private final BulkProcessor.Listener listener; + private final DefaultBulkProcessor.Listener listener; private final Semaphore semaphore; private final int concurrentRequests; - private AsyncBulkExecutor(ElasticsearchClient client, BulkProcessor.Listener listener, int concurrentRequests) { + private AsyncBulkRequestHandler(Client client, DefaultBulkProcessor.Listener listener, int concurrentRequests) { this.client = client; this.listener = listener; this.concurrentRequests = concurrentRequests; @@ -400,7 +394,7 @@ public class BulkProcessor implements Closeable { listener.beforeBulk(executionId, bulkRequest); semaphore.acquire(); acquired = true; - client.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener() { + client.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener<>() { @Override public void onResponse(BulkResponse response) { try { @@ -433,9 +427,9 @@ public class BulkProcessor implements Closeable { } @Override - public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { - if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { - semaphore.release(this.concurrentRequests); + public boolean close(long timeout, TimeUnit unit) throws InterruptedException { + if (semaphore.tryAcquire(concurrentRequests, timeout, unit)) { + semaphore.release(concurrentRequests); return true; } return false; diff --git a/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexDefinition.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexDefinition.java new file mode 100644 index 0000000..52127e1 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexDefinition.java @@ -0,0 +1,214 @@ +package org.xbib.elx.common; + +import org.xbib.elx.api.IndexDefinition; +import org.xbib.elx.api.IndexRetention; + +import java.net.MalformedURLException; +import java.net.URL; +import java.util.concurrent.TimeUnit; + +public class DefaultIndexDefinition implements IndexDefinition { + + private String index; + + private String fullIndexName; + + private String dateTimePattern; + + private URL settingsUrl; + + private URL mappingsUrl; + + private boolean enabled; + + private boolean ignoreErrors; + + private boolean switchAliases; + + private boolean hasForceMerge; + + private int replicaLevel; + + private IndexRetention indexRetention; + + private long maxWaitTime; + + private TimeUnit maxWaitTimeUnit; + + private long startRefreshInterval; + + private long stopRefreshInterval; + + @Override + public IndexDefinition setIndex(String index) { + this.index = index; + return this; + } + + @Override + public String getIndex() { + return index; + } + + @Override + public IndexDefinition setFullIndexName(String fullIndexName) { + this.fullIndexName = fullIndexName; + return this; + } + + @Override + public String getFullIndexName() { + return fullIndexName; + } + + @Override + public IndexDefinition setSettingsUrl(String settingsUrlString) throws MalformedURLException { + this.settingsUrl = settingsUrlString != null ? new URL(settingsUrlString) : null; + return this; + } + + @Override + public IndexDefinition setSettingsUrl(URL settingsUrl) { + this.settingsUrl = settingsUrl; + return this; + } + + @Override + public URL getSettingsUrl() { + return settingsUrl; + } + + @Override + public IndexDefinition setMappingsUrl(String mappingsUrlString) throws MalformedURLException { + this.mappingsUrl = mappingsUrlString != null ? new URL(mappingsUrlString) : null; + return this; + } + + @Override + public IndexDefinition setMappingsUrl(URL mappingsUrl) { + this.mappingsUrl = mappingsUrl; + return this; + } + + @Override + public URL getMappingsUrl() { + return mappingsUrl; + } + + @Override + public IndexDefinition setDateTimePattern(String timeWindow) { + this.dateTimePattern = timeWindow; + return this; + } + + @Override + public String getDateTimePattern() { + return dateTimePattern; + } + + @Override + public IndexDefinition setEnabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + @Override + public boolean isEnabled() { + return enabled; + } + + @Override + public IndexDefinition setIgnoreErrors(boolean ignoreErrors) { + this.ignoreErrors = ignoreErrors; + return this; + } + + @Override + public boolean ignoreErrors() { + return ignoreErrors; + } + + @Override + public IndexDefinition setShift(boolean switchAliases) { + this.switchAliases = switchAliases; + return this; + } + + @Override + public boolean isShiftEnabled() { + return switchAliases; + } + + @Override + public IndexDefinition setForceMerge(boolean hasForceMerge) { + this.hasForceMerge = hasForceMerge; + return this; + } + + @Override + public boolean hasForceMerge() { + return hasForceMerge; + } + + @Override + public IndexDefinition setReplicaLevel(int replicaLevel) { + this.replicaLevel = replicaLevel; + return this; + } + + @Override + public int getReplicaLevel() { + return replicaLevel; + } + + @Override + public IndexDefinition setRetention(IndexRetention indexRetention) { + this.indexRetention = indexRetention; + return this; + } + + @Override + public IndexRetention getRetention() { + return indexRetention; + } + + @Override + public IndexDefinition setMaxWaitTime(long maxWaitTime, TimeUnit timeUnit) { + this.maxWaitTime = maxWaitTime; + this.maxWaitTimeUnit = timeUnit; + return this; + } + + @Override + public long getMaxWaitTime() { + return maxWaitTime; + } + + @Override + public TimeUnit getMaxWaitTimeUnit() { + return maxWaitTimeUnit; + } + + @Override + public IndexDefinition setStartRefreshInterval(long seconds) { + this.startRefreshInterval = seconds; + return this; + } + + @Override + public long getStartRefreshInterval() { + return startRefreshInterval; + } + + @Override + public IndexDefinition setStopRefreshInterval(long seconds) { + this.stopRefreshInterval = seconds; + return this; + } + + @Override + public long getStopRefreshInterval() { + return stopRefreshInterval; + } + +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexRetention.java b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexRetention.java new file mode 100644 index 0000000..4e49be3 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/DefaultIndexRetention.java @@ -0,0 +1,32 @@ +package org.xbib.elx.common; + +import org.xbib.elx.api.IndexRetention; + +public class DefaultIndexRetention implements IndexRetention { + + private int delta; + + private int minToKeep; + + @Override + public IndexRetention setDelta(int delta) { + this.delta = delta; + return this; + } + + @Override + public int getDelta() { + return delta; + } + + @Override + public IndexRetention setMinToKeep(int minToKeep) { + this.minToKeep = minToKeep; + return this; + } + + @Override + public int getMinToKeep() { + return minToKeep; + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClient.java b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClient.java new file mode 100644 index 0000000..647894b --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClient.java @@ -0,0 +1,129 @@ +package org.xbib.elx.common; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; + +import java.util.concurrent.TimeUnit; + +/** + * A mocked client, it does not perform any actions on a cluster. Useful for testing. + */ +public class MockExtendedClient extends AbstractExtendedClient { + + @Override + public ElasticsearchClient getClient() { + return null; + } + + @Override + public MockExtendedClient init(Settings settings) { + return this; + } + + @Override + protected ElasticsearchClient createClient(Settings settings) { + return null; + } + + @Override + protected void closeClient() { + } + + @Override + public MockExtendedClient index(String index, String id, boolean create, String source) { + return this; + } + + @Override + public MockExtendedClient delete(String index, String id) { + return this; + } + + @Override + public MockExtendedClient update(String index, String id, String source) { + return this; + } + + @Override + public MockExtendedClient index(IndexRequest indexRequest) { + return this; + } + + @Override + public MockExtendedClient delete(DeleteRequest deleteRequest) { + return this; + } + + @Override + public MockExtendedClient update(UpdateRequest updateRequest) { + return this; + } + + @Override + public MockExtendedClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) { + return this; + } + + @Override + public MockExtendedClient stopBulk(String index, long maxWaitTime, TimeUnit timeUnit) { + return this; + } + + @Override + public MockExtendedClient newIndex(String index) { + return this; + } + + @Override + public MockExtendedClient deleteIndex(String index) { + return this; + } + + @Override + public MockExtendedClient refreshIndex(String index) { + return this; + } + + @Override + public MockExtendedClient flushIndex(String index) { + return this; + } + + @Override + public boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit) { + return true; + } + + @Override + public boolean waitForCluster(String healthColor, long timeValue, TimeUnit timeUnit) { + return true; + } + + @Override + public boolean waitForResponses(long maxWaitTime, TimeUnit timeUnit) { + return true; + } + + @Override + public boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit) { + return true; + } + + @Override + public MockExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) { + return this; + } + + @Override + public void flush() { + // nothing to do + } + + @Override + public void close() { + // nothing to do + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClientProvider.java b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClientProvider.java new file mode 100644 index 0000000..87e65cc --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/MockExtendedClientProvider.java @@ -0,0 +1,10 @@ +package org.xbib.elx.common; + +import org.xbib.elx.api.ExtendedClientProvider; + +public class MockExtendedClientProvider implements ExtendedClientProvider { + @Override + public MockExtendedClient getExtendedClient() { + return new MockExtendedClient(); + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/Parameters.java b/elx-common/src/main/java/org/xbib/elx/common/Parameters.java new file mode 100644 index 0000000..28d10d7 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/Parameters.java @@ -0,0 +1,40 @@ +package org.xbib.elx.common; + +public enum Parameters { + + DEFAULT_MAX_ACTIONS_PER_REQUEST(1000), + + DEFAULT_MAX_CONCURRENT_REQUESTS(Runtime.getRuntime().availableProcessors()), + + DEFAULT_MAX_VOLUME_PER_REQUEST("10mb"), + + DEFAULT_FLUSH_INTERVAL(30), + + MAX_ACTIONS_PER_REQUEST ("max_actions_per_request"), + + MAX_CONCURRENT_REQUESTS("max_concurrent_requests"), + + MAX_VOLUME_PER_REQUEST("max_volume_per_request"), + + FLUSH_INTERVAL("flush_interval"); + + int num; + + String string; + + Parameters(int num) { + this.num = num; + } + + Parameters(String string) { + this.string = string; + } + + int getNum() { + return num; + } + + String getString() { + return string; + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandler.java b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandler.java new file mode 100644 index 0000000..e7d8727 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandler.java @@ -0,0 +1,25 @@ +package org.xbib.elx.common.io; + +import java.io.IOException; +import java.net.URL; +import java.net.URLConnection; +import java.net.URLStreamHandler; + +public class ClasspathURLStreamHandler extends URLStreamHandler { + + private final ClassLoader classLoader; + + public ClasspathURLStreamHandler() { + this.classLoader = getClass().getClassLoader(); + } + + public ClasspathURLStreamHandler(ClassLoader classLoader) { + this.classLoader = classLoader; + } + + @Override + protected URLConnection openConnection(URL u) throws IOException { + final URL resourceUrl = classLoader.getResource(u.getPath()); + return resourceUrl != null ? resourceUrl.openConnection() : null; + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandlerFactory.java b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandlerFactory.java new file mode 100644 index 0000000..00c7c83 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/io/ClasspathURLStreamHandlerFactory.java @@ -0,0 +1,12 @@ +package org.xbib.elx.common.io; + +import java.net.URLStreamHandler; +import java.net.URLStreamHandlerFactory; + +public class ClasspathURLStreamHandlerFactory implements URLStreamHandlerFactory { + + @Override + public URLStreamHandler createURLStreamHandler(String protocol) { + return "classpath".equals(protocol) ? new ClasspathURLStreamHandler() : null; + } +} diff --git a/elx-common/src/main/java/org/xbib/elx/common/io/package-info.java b/elx-common/src/main/java/org/xbib/elx/common/io/package-info.java new file mode 100644 index 0000000..492a3e2 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/io/package-info.java @@ -0,0 +1,4 @@ +/** + * + */ +package org.xbib.elx.common.io; diff --git a/elx-common/src/main/java/org/xbib/elx/common/package-info.java b/elx-common/src/main/java/org/xbib/elx/common/package-info.java new file mode 100644 index 0000000..ced4357 --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/package-info.java @@ -0,0 +1,4 @@ +/** + * + */ +package org.xbib.elx.common; diff --git a/common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java b/elx-common/src/main/java/org/xbib/elx/common/util/NetworkUtils.java similarity index 95% rename from common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java rename to elx-common/src/main/java/org/xbib/elx/common/util/NetworkUtils.java index 63a6fdf..11dd014 100644 --- a/common/src/main/java/org/xbib/elasticsearch/client/NetworkUtils.java +++ b/elx-common/src/main/java/org/xbib/elx/common/util/NetworkUtils.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.client; +package org.xbib.elx.common.util; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -16,6 +16,9 @@ import java.util.Enumeration; import java.util.List; import java.util.Locale; +/** + * + */ public class NetworkUtils { private static final Logger logger = LogManager.getLogger(NetworkUtils.class.getName()); @@ -100,10 +103,8 @@ public class NetworkUtils { NetworkInterface networkInterface = interfaces.nextElement(); allInterfaces.add(networkInterface); Enumeration subInterfaces = networkInterface.getSubInterfaces(); - if (subInterfaces.hasMoreElements()) { - while (subInterfaces.hasMoreElements()) { - allInterfaces.add(subInterfaces.nextElement()); - } + while (subInterfaces.hasMoreElements()) { + allInterfaces.add(subInterfaces.nextElement()); } } sortInterfaces(allInterfaces); @@ -221,10 +222,8 @@ public class NetworkUtils { NetworkInterface networkInterface = interfaces.nextElement(); networkInterfaces.add(networkInterface); Enumeration subInterfaces = networkInterface.getSubInterfaces(); - if (subInterfaces.hasMoreElements()) { - while (subInterfaces.hasMoreElements()) { - networkInterfaces.add(subInterfaces.nextElement()); - } + while (subInterfaces.hasMoreElements()) { + networkInterfaces.add(subInterfaces.nextElement()); } } sortInterfaces(networkInterfaces); @@ -250,6 +249,9 @@ public class NetworkUtils { return left.length - right.length; } + /** + * + */ public enum ProtocolVersion { IPV4, IPV6, IPV46, NONE } diff --git a/elx-common/src/main/java/org/xbib/elx/common/util/package-info.java b/elx-common/src/main/java/org/xbib/elx/common/util/package-info.java new file mode 100644 index 0000000..20a7cbb --- /dev/null +++ b/elx-common/src/main/java/org/xbib/elx/common/util/package-info.java @@ -0,0 +1,4 @@ +/** + * + */ +package org.xbib.elx.common.util; diff --git a/elx-common/src/main/resources/META-INF/services/java.net.URLStreamHandlerFactory b/elx-common/src/main/resources/META-INF/services/java.net.URLStreamHandlerFactory new file mode 100644 index 0000000..bb6d620 --- /dev/null +++ b/elx-common/src/main/resources/META-INF/services/java.net.URLStreamHandlerFactory @@ -0,0 +1 @@ +org.xbib.elx.common.io.ClasspathURLStreamHandlerFactory \ No newline at end of file diff --git a/elx-common/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider b/elx-common/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider new file mode 100644 index 0000000..9729b83 --- /dev/null +++ b/elx-common/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider @@ -0,0 +1 @@ +org.xbib.elx.common.MockExtendedClientProvider \ No newline at end of file diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java b/elx-common/src/test/java/org/xbib/elx/common/test/AliasTest.java similarity index 56% rename from common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java rename to elx-common/src/test/java/org/xbib/elx/common/test/AliasTest.java index e0ef8d5..9fa4a40 100644 --- a/common/src/test/java/org/xbib/elasticsearch/client/common/AliasTests.java +++ b/elx-common/src/test/java/org/xbib/elx/common/test/AliasTest.java @@ -1,4 +1,7 @@ -package org.xbib.elasticsearch.client.common; +package org.xbib.elx.common.test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; @@ -9,8 +12,9 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; -import org.elasticsearch.testframework.ESSingleNodeTestCase; +import org.junit.Test; import java.util.Collections; import java.util.Iterator; @@ -19,58 +23,72 @@ import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; -public class AliasTests extends ESSingleNodeTestCase { +public class AliasTest extends TestBase { - private static final Logger logger = LogManager.getLogger(AliasTests.class.getName()); + private static final Logger logger = LogManager.getLogger(AliasTest.class.getName()); + @Test public void testAlias() { + Client client = client("1"); CreateIndexRequest indexRequest = new CreateIndexRequest("test"); - client().admin().indices().create(indexRequest).actionGet(); + client.admin().indices().create(indexRequest).actionGet(); // put alias IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() - .index("test").alias("test_alias") - ); - client().admin().indices().aliases(indicesAliasesRequest).actionGet(); + String[] indices = new String[]{"test"}; + String[] aliases = new String[]{"test_alias"}; + IndicesAliasesRequest.AliasActions aliasAction = + new IndicesAliasesRequest.AliasActions(IndicesAliasesRequest.AliasActions.Type.ADD) + .indices(indices) + .aliases(aliases); + indicesAliasesRequest.addAliasAction(aliasAction); + client.admin().indices().aliases(indicesAliasesRequest).actionGet(); // get alias GetAliasesRequest getAliasesRequest = new GetAliasesRequest(Strings.EMPTY_ARRAY); long t0 = System.nanoTime(); - GetAliasesResponse getAliasesResponse = client().admin().indices().getAliases(getAliasesRequest).actionGet(); + GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(getAliasesRequest).actionGet(); long t1 = (System.nanoTime() - t0) / 1000000; logger.info("{} time(ms) = {}", getAliasesResponse.getAliases(), t1); assertTrue(t1 >= 0); } + @Test public void testMostRecentIndex() { + Client client = client("1"); String alias = "test"; CreateIndexRequest indexRequest = new CreateIndexRequest("test20160101"); - client().admin().indices().create(indexRequest).actionGet(); + client.admin().indices().create(indexRequest).actionGet(); indexRequest = new CreateIndexRequest("test20160102"); - client().admin().indices().create(indexRequest).actionGet(); + client.admin().indices().create(indexRequest).actionGet(); indexRequest = new CreateIndexRequest("test20160103"); - client().admin().indices().create(indexRequest).actionGet(); + client.admin().indices().create(indexRequest).actionGet(); IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() - .indices("test20160101", "test20160102", "test20160103") - .alias(alias) - ); - client().admin().indices().aliases(indicesAliasesRequest).actionGet(); + String[] indices = new String[]{"test20160101", "test20160102", "test20160103"}; + String[] aliases = new String[]{alias}; + IndicesAliasesRequest.AliasActions aliasAction = + new IndicesAliasesRequest.AliasActions(IndicesAliasesRequest.AliasActions.Type.ADD) + .indices(indices) + .aliases(aliases); + indicesAliasesRequest.addAliasAction(aliasAction); + client.admin().indices().aliases(indicesAliasesRequest).actionGet(); - GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client(), + GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client, GetAliasesAction.INSTANCE); GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.setAliases(alias).execute().actionGet(); Pattern pattern = Pattern.compile("^(.*?)(\\d+)$"); Set result = new TreeSet<>(Collections.reverseOrder()); for (ObjectCursor indexName : getAliasesResponse.getAliases().keys()) { Matcher m = pattern.matcher(indexName.value); - if (m.matches() && alias.equals(m.group(1))) { - result.add(indexName.value); + if (m.matches()) { + if (alias.equals(m.group(1))) { + result.add(indexName.value); + } } } Iterator it = result.iterator(); assertEquals("test20160103", it.next()); assertEquals("test20160102", it.next()); assertEquals("test20160101", it.next()); - logger.info("result={}", result); + logger.info("success: result={}", result); } + } diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/ClusterBlockTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/ClusterBlockTest.java new file mode 100644 index 0000000..243ed93 --- /dev/null +++ b/elx-common/src/test/java/org/xbib/elx/common/test/ClusterBlockTest.java @@ -0,0 +1,50 @@ +package org.xbib.elx.common.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + +@Ignore +public class ClusterBlockTest extends TestBase { + + private static final Logger logger = LogManager.getLogger("test"); + + @Before + public void startNodes() { + try { + setClusterName("test-cluster"); + startNode("1"); + // do not wait for green health state + logger.info("ready"); + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @Override + protected Settings getNodeSettings() { + return Settings.builder() + .put(super.getNodeSettings()) + .put("discovery.zen.minimum_master_nodes", 2) // block until we have two nodes + .build(); + } + + @Test(expected = ClusterBlockException.class) + public void testClusterBlock() throws Exception { + Client client = client("1"); + XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("field1", "value1").endObject(); + IndexRequestBuilder irb = client.prepareIndex("test", "test", "1").setSource(builder); + BulkRequestBuilder brb = client.prepareBulk(); + brb.add(irb); + brb.execute().actionGet(); + } +} diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/MockExtendedClientProviderTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/MockExtendedClientProviderTest.java new file mode 100644 index 0000000..cbe7972 --- /dev/null +++ b/elx-common/src/test/java/org/xbib/elx/common/test/MockExtendedClientProviderTest.java @@ -0,0 +1,19 @@ +package org.xbib.elx.common.test; + +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.common.MockExtendedClient; +import org.xbib.elx.common.MockExtendedClientProvider; + +import java.io.IOException; + +import static org.junit.Assert.assertNotNull; + +public class MockExtendedClientProviderTest { + + @Test + public void testMockExtendedProvider() throws IOException { + MockExtendedClient client = ClientBuilder.builder().provider(MockExtendedClientProvider.class).build(); + assertNotNull(client); + } +} diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/MockNode.java b/elx-common/src/test/java/org/xbib/elx/common/test/MockNode.java new file mode 100644 index 0000000..b83aa8a --- /dev/null +++ b/elx-common/src/test/java/org/xbib/elx/common/test/MockNode.java @@ -0,0 +1,15 @@ +package org.xbib.elx.common.test; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.InternalSettingsPreparer; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; + +import java.util.List; + +public class MockNode extends Node { + + public MockNode(Settings settings, List> classpathPlugins) { + super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins); + } +} diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/NetworkTest.java similarity index 81% rename from common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java rename to elx-common/src/test/java/org/xbib/elx/common/test/NetworkTest.java index 0ed4fc8..7933343 100644 --- a/common/src/test/java/org/xbib/elasticsearch/client/common/NetworkTest.java +++ b/elx-common/src/test/java/org/xbib/elx/common/test/NetworkTest.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.client.common; +package org.xbib.elx.common.test; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -13,17 +13,12 @@ public class NetworkTest { private static final Logger logger = LogManager.getLogger(NetworkTest.class); - /** - * Demonstrates the slowness oj Java network interface lookup on certain environments. - * May be a killer for ES node startup - so avoid automatic traversal of NICs at all costs. - * - * @throws Exception if test fails - */ @Test public void testNetwork() throws Exception { + // walk over all found interfaces (this is slow - multicast/pings are performed) Enumeration nets = NetworkInterface.getNetworkInterfaces(); for (NetworkInterface netint : Collections.list(nets)) { - logger.info("checking network interface = " + netint.getName()); + System.out.println("checking network interface = " + netint.getName()); Enumeration inetAddresses = netint.getInetAddresses(); for (InetAddress addr : Collections.list(inetAddresses)) { logger.info("found address = " + addr.getHostAddress() diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/SearchTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/SearchTest.java new file mode 100644 index 0000000..5d7420a --- /dev/null +++ b/elx-common/src/test/java/org/xbib/elx/common/test/SearchTest.java @@ -0,0 +1,58 @@ +package org.xbib.elx.common.test; + +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.junit.Test; + +import static org.junit.Assert.assertTrue; + +public class SearchTest extends TestBase { + + @Test + public void testSearch() throws Exception { + Client client = client("1"); + BulkRequestBuilder builder = new BulkRequestBuilder(client, BulkAction.INSTANCE); + for (int i = 0; i < 1000; i++) { + IndexRequest indexRequest = new IndexRequest("pages", "row") + .source(XContentFactory.jsonBuilder() + .startObject() + .field("user1", "joerg") + .field("user2", "joerg") + .field("user3", "joerg") + .field("user4", "joerg") + .field("user5", "joerg") + .field("user6", "joerg") + .field("user7", "joerg") + .field("user8", "joerg") + .field("user9", "joerg") + .field("rowcount", i) + .field("rs", 1234) + .endObject() + ); + builder.add(indexRequest); + } + client.bulk(builder.request()).actionGet(); + client.admin().indices().refresh(new RefreshRequest()).actionGet(); + + for (int i = 0; i < 100; i++) { + QueryBuilder queryStringBuilder = QueryBuilders.queryStringQuery("rs:" + 1234); + SearchRequestBuilder requestBuilder = client.prepareSearch() + .setIndices("pages") + .setTypes("row") + .setQuery(queryStringBuilder) + .addSort("rowcount", SortOrder.DESC) + .setFrom(i * 10).setSize(10); + SearchResponse searchResponse = requestBuilder.execute().actionGet(); + assertTrue(searchResponse.getHits().getTotalHits() > 0); + } + } +} diff --git a/common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java b/elx-common/src/test/java/org/xbib/elx/common/test/SimpleTest.java similarity index 56% rename from common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java rename to elx-common/src/test/java/org/xbib/elx/common/test/SimpleTest.java index 6e2dd8a..ba20e63 100644 --- a/common/src/test/java/org/xbib/elasticsearch/client/common/SimpleTests.java +++ b/elx-common/src/test/java/org/xbib/elx/common/test/SimpleTest.java @@ -1,53 +1,53 @@ -package org.xbib.elasticsearch.client.common; +package org.xbib.elx.common.test; + +import static org.junit.Assert.assertEquals; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.testframework.ESSingleNodeTestCase; +import org.junit.Test; -public class SimpleTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(SimpleTests.class.getName()); +public class SimpleTest extends TestBase { + @Test public void test() throws Exception { try { DeleteIndexRequestBuilder deleteIndexRequestBuilder = - new DeleteIndexRequestBuilder(client(), DeleteIndexAction.INSTANCE, "test"); + new DeleteIndexRequestBuilder(client("1"), DeleteIndexAction.INSTANCE, "test"); deleteIndexRequestBuilder.execute().actionGet(); - } catch (Exception e) { - logger.warn(e.getMessage(), e); + } catch (IndexNotFoundException e) { + // ignore if index not found } - CreateIndexRequestBuilder createIndexRequestBuilder = new CreateIndexRequestBuilder(client(), - CreateIndexAction.INSTANCE) - .setIndex("test") - .setSettings(Settings.builder() - .put("index.analysis.analyzer.default.filter.0", "lowercase") - // where is the trim token filter??? - //.put("index.analysis.analyzer.default.filter.1", "trim") - .put("index.analysis.analyzer.default.tokenizer", "keyword") - .build()); - createIndexRequestBuilder.execute().actionGet(); + Settings indexSettings = Settings.builder() + .put("index.analysis.analyzer.default.filter.0", "lowercase") + .put("index.analysis.analyzer.default.filter.1", "trim") + .put("index.analysis.analyzer.default.tokenizer", "keyword") + .build(); + CreateIndexRequestBuilder createIndexRequestBuilder = new CreateIndexRequestBuilder(client("1"), CreateIndexAction.INSTANCE); + createIndexRequestBuilder.setIndex("test") + .setSettings(indexSettings).execute().actionGet(); - IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client(), IndexAction.INSTANCE); + IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(client("1"), IndexAction.INSTANCE); indexRequestBuilder .setIndex("test") .setType("test") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().field("field", "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8").endObject()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .execute() .actionGet(); - String doc = client().prepareSearch("test") + RefreshRequestBuilder refreshRequestBuilder = new RefreshRequestBuilder(client("1"), RefreshAction.INSTANCE); + refreshRequestBuilder.setIndices("test").execute().actionGet(); + String doc = client("1").prepareSearch("test") .setTypes("test") .setQuery(QueryBuilders.matchQuery("field", "1%2fPJJP3JV2C24iDfEu9XpHBaYxXh%2fdHTbmchB35SDznXO2g8Vz4D7GTIvY54iMiX_149c95f02a8")) diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/TestBase.java b/elx-common/src/test/java/org/xbib/elx/common/test/TestBase.java new file mode 100644 index 0000000..f2a290f --- /dev/null +++ b/elx-common/src/test/java/org/xbib/elx/common/test/TestBase.java @@ -0,0 +1,206 @@ +package org.xbib.elx.common.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.netty4.Netty4Plugin; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +public class TestBase { + + private static final Logger logger = LogManager.getLogger("test"); + + private static final Random random = new Random(); + + private static final char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray(); + + private Map nodes = new HashMap<>(); + + private Map clients = new HashMap<>(); + + private String cluster; + + private String host; + + private int port; + + @Before + public void startNodes() { + try { + logger.info("starting"); + setClusterName("test-cluster"); + startNode("1"); + findNodeAddress(); + try { + ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN) + .timeout(TimeValue.timeValueSeconds(30))).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + throw new IOException("cluster state is " + healthResponse.getStatus().name() + + ", from here on, everything will fail!"); + } + } catch (ElasticsearchTimeoutException e) { + throw new IOException("cluster does not respond to health request, cowardly refusing to continue"); + } + ClusterStateRequestBuilder clusterStateRequestBuilder = + new ClusterStateRequestBuilder(client("1"), ClusterStateAction.INSTANCE).all(); + ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); + logger.info("cluster name = {}", clusterStateResponse.getClusterName().value()); + logger.info("host = {} port = {}", host, port); + } catch (Throwable t) { + logger.error(t.getMessage(), t); + } + } + + @After + public void stopNodes() { + try { + closeNodes(); + } catch (Exception e) { + logger.error("can not close nodes", e); + } finally { + try { + deleteFiles(); + logger.info("data files wiped"); + Thread.sleep(2000L); // let OS commit changes + } catch (IOException e) { + logger.error(e.getMessage(), e); + } catch (InterruptedException e) { + // ignore + } + } + } + + protected Settings getTransportSettings() { + return Settings.builder() + .put("host", host) + .put("port", port) + .put("cluster.name", cluster) + .put("path.home", getHome()) + .build(); + } + + protected Settings getNodeSettings() { + return Settings.builder() + .put("cluster.name", cluster) + .put("transport.type", Netty4Plugin.NETTY_TRANSPORT_NAME) + .put("path.home", getHome()) + .build(); + } + + protected static String getHome() { + return System.getProperty("path.home", System.getProperty("user.dir")); + } + + protected void startNode(String id) throws NodeValidationException { + buildNode(id).start(); + } + + protected AbstractClient client(String id) { + return clients.get(id); + } + + protected void setClusterName(String cluster) { + this.cluster = cluster; + } + + protected String getClusterName() { + return cluster; + } + + protected String randomString(int len) { + final char[] buf = new char[len]; + final int n = numbersAndLetters.length - 1; + for (int i = 0; i < buf.length; i++) { + buf[i] = numbersAndLetters[random.nextInt(n)]; + } + return new String(buf); + } + + private void closeNodes() throws IOException { + logger.info("closing all clients"); + for (AbstractClient client : clients.values()) { + client.close(); + } + clients.clear(); + logger.info("closing all nodes"); + for (Node node : nodes.values()) { + if (node != null) { + node.close(); + } + } + nodes.clear(); + logger.info("all nodes closed"); + } + + private void findNodeAddress() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true); + NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + TransportAddress address= response.getNodes().iterator().next().getTransport().getAddress() + .publishAddress(); + host = address.address().getHostName(); + port = address.address().getPort(); + } + + private Node buildNode(String id) { + Settings nodeSettings = Settings.builder() + .put(getNodeSettings()) + .put("node.name", id) + .build(); + List> plugins = Arrays.asList(CommonAnalysisPlugin.class, Netty4Plugin.class); + Node node = new MockNode(nodeSettings, plugins); + AbstractClient client = (AbstractClient) node.client(); + nodes.put(id, node); + clients.put(id, client); + logger.info("clients={}", clients); + return node; + } + + private static void deleteFiles() throws IOException { + Path directory = Paths.get(getHome() + "/data"); + Files.walkFileTree(directory, new SimpleFileVisitor<>() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } +} diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/WildcardTest.java b/elx-common/src/test/java/org/xbib/elx/common/test/WildcardTest.java new file mode 100644 index 0000000..d3d2b95 --- /dev/null +++ b/elx-common/src/test/java/org/xbib/elx/common/test/WildcardTest.java @@ -0,0 +1,52 @@ +package org.xbib.elx.common.test; + +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Test; + +import java.io.IOException; + +public class WildcardTest extends TestBase { + + @Test + public void testWildcard() throws Exception { + index(client("1"), "1", "010"); + index(client("1"), "2", "0*0"); + // exact + validateCount(client("1"), QueryBuilders.queryStringQuery("010").defaultField("field"), 1); + validateCount(client("1"), QueryBuilders.queryStringQuery("0\\*0").defaultField("field"), 1); + // pattern + validateCount(client("1"), QueryBuilders.queryStringQuery("0*0").defaultField("field"), 1); // 2? + validateCount(client("1"), QueryBuilders.queryStringQuery("0?0").defaultField("field"), 1); // 2? + validateCount(client("1"), QueryBuilders.queryStringQuery("0**0").defaultField("field"), 1); // 2? + validateCount(client("1"), QueryBuilders.queryStringQuery("0??0").defaultField("field"), 0); + validateCount(client("1"), QueryBuilders.queryStringQuery("*10").defaultField("field"), 1); + validateCount(client("1"), QueryBuilders.queryStringQuery("*1*").defaultField("field"), 1); + validateCount(client("1"), QueryBuilders.queryStringQuery("*\\*0").defaultField("field"), 0); // 1? + validateCount(client("1"), QueryBuilders.queryStringQuery("*\\**").defaultField("field"), 0); // 1? + } + + private void index(Client client, String id, String fieldValue) throws IOException { + client.index(new IndexRequest("index", "type", id) + .source(XContentFactory.jsonBuilder().startObject().field("field", fieldValue).endObject())) + .actionGet(); + client.admin().indices().refresh(new RefreshRequest()).actionGet(); + } + + private long count(Client client, QueryBuilder queryBuilder) { + return client.prepareSearch("index").setTypes("type") + .setQuery(queryBuilder) + .execute().actionGet().getHits().getTotalHits(); + } + + private void validateCount(Client client, QueryBuilder queryBuilder, long expectedHits) { + final long actualHits = count(client, queryBuilder); + if (actualHits != expectedHits) { + throw new RuntimeException("actualHits=" + actualHits + ", expectedHits=" + expectedHits); + } + } +} diff --git a/elx-common/src/test/java/org/xbib/elx/common/test/package-info.java b/elx-common/src/test/java/org/xbib/elx/common/test/package-info.java new file mode 100644 index 0000000..9d006c1 --- /dev/null +++ b/elx-common/src/test/java/org/xbib/elx/common/test/package-info.java @@ -0,0 +1,4 @@ +/** + * + */ +package org.xbib.elx.common.test; diff --git a/common/src/test/resources/log4j2.xml b/elx-common/src/test/resources/log4j2.xml similarity index 72% rename from common/src/test/resources/log4j2.xml rename to elx-common/src/test/resources/log4j2.xml index b175dfc..6c323f8 100644 --- a/common/src/test/resources/log4j2.xml +++ b/elx-common/src/test/resources/log4j2.xml @@ -2,11 +2,11 @@ - + - + diff --git a/elx-http/build.gradle b/elx-http/build.gradle new file mode 100644 index 0000000..39534d0 --- /dev/null +++ b/elx-http/build.gradle @@ -0,0 +1,6 @@ +dependencies{ + compile project(':elx-common') + compile "org.xbib.elasticsearch:transport-netty4:${rootProject.property('elasticsearch-server.version')}" + compile "org.xbib:netty-http-client:${project.property('xbib-netty-http.version')}" + testCompile "org.xbib.elasticsearch:elasticsearch-analysis-common:${rootProject.property('elasticsearch-server.version')}" +} diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClient.java similarity index 50% rename from http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java rename to elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClient.java index 31e5d95..c8faf35 100644 --- a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpClient.java +++ b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClient.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.client.http; +package org.xbib.elx.http; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -13,21 +13,16 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.node.Node; import org.elasticsearch.threadpool.ThreadPool; -import org.xbib.elasticsearch.client.AbstractClient; -import org.xbib.elasticsearch.client.BulkControl; -import org.xbib.elasticsearch.client.BulkMetric; +import org.xbib.elx.common.AbstractExtendedClient; import org.xbib.netty.http.client.Client; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.ServiceLoader; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -35,52 +30,44 @@ import java.util.stream.Stream; /** * Elasticsearch HTTP client. */ -public class HttpClient extends AbstractClient implements ElasticsearchClient { +public class ExtendedHttpClient extends AbstractExtendedClient implements ElasticsearchClient { - private static final Logger logger = LogManager.getLogger(HttpClient.class); + private static final Logger logger = LogManager.getLogger(ExtendedHttpClient.class); - private Client client; + private Client nettyHttpClient; - private NamedXContentRegistry registry; + private final ClassLoader classLoader; + + private final NamedXContentRegistry registry; @SuppressWarnings("rawtypes") - private Map actionMap; + private final Map actionMap; - private List urls; + private String url; - //private ThreadPool threadPool; - - @Override - public HttpClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { - init(client, settings, metric, control, null, Collections.emptyList()); - return this; + public ExtendedHttpClient(List namedXContentEntries, ClassLoader classLoader) { + this.registry = new NamedXContentRegistry(Stream.of(getNamedXContents().stream(), + namedXContentEntries.stream()).flatMap(Function.identity()).collect(Collectors.toList())); + this.classLoader = classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader(); + this.actionMap = new HashMap<>(); } + @Override @SuppressWarnings({"unchecked", "rawtypes"}) - private void init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control, - ClassLoader classLoader, List namedXContentEntries) { - //super.init(client, settings, metric, control); - this.urls = settings.getAsList("urls"); - if (urls.isEmpty()) { - throw new IllegalArgumentException("no urls given"); + public ExtendedHttpClient init(Settings settings) throws IOException { + super.init(settings); + if (settings == null) { + return null; } - this.registry = new NamedXContentRegistry(Stream.of(getNamedXContents().stream(), - namedXContentEntries.stream() - ).flatMap(Function.identity()).collect(Collectors.toList())); - this.actionMap = new HashMap<>(); - ServiceLoader httpActionServiceLoader = ServiceLoader.load(HttpAction.class, - classLoader != null ? classLoader : Thread.currentThread().getContextClassLoader()); + this.url = settings.get("url"); + ServiceLoader httpActionServiceLoader = ServiceLoader.load(HttpAction.class, classLoader); for (HttpAction httpAction : httpActionServiceLoader) { httpAction.setSettings(settings); actionMap.put(httpAction.getActionInstance(), httpAction); } - this.client = Client.builder().enableDebug().build(); - Settings threadPoolsettings = Settings.builder() - .put(settings) - .put(Node.NODE_NAME_SETTING.getKey(), "httpclient") - .build(); - //this.threadPool = threadPool != null ? threadPool : new ThreadPool(threadPoolsettings); - logger.info("HTTP client initialized with {} actions", actionMap.size()); + this.nettyHttpClient = Client.builder().enableDebug().build(); + logger.info("extended HTTP client initialized with {} actions", actionMap.size()); + return this; } private static List getNamedXContents() { @@ -91,28 +78,23 @@ public class HttpClient extends AbstractClient implements ElasticsearchClient { return registry; } - public static Builder builder() { - return new Builder(); - } - public Client internalClient() { - return client; + return nettyHttpClient; } @Override - public ElasticsearchClient client() { + public ElasticsearchClient getClient() { return this; } @Override - protected ElasticsearchClient createClient(Settings settings) throws IOException { + protected ElasticsearchClient createClient(Settings settings) { return this; } @Override - public void shutdown() throws IOException { - client.shutdownGracefully(); - //threadPool.close(); + protected void closeClient() throws IOException { + nettyHttpClient.shutdownGracefully(); } @Override @@ -142,68 +124,22 @@ public class HttpClient extends AbstractClient implements ElasticsearchClient { @Override public ThreadPool threadPool() { logger.info("returning null for threadPool() request"); - return null; //threadPool; + return null; } @SuppressWarnings({"unchecked", "rawtypes"}) - public > + private > void doExecute(Action action, R request, ActionListener listener) { HttpAction httpAction = actionMap.get(action); if (httpAction == null) { throw new IllegalStateException("failed to find http action [" + action + "] to execute"); } - logger.info("http action = " + httpAction); - String url = urls.get(0); // TODO try { - logger.info("submitting to URL {}", url); HttpActionContext httpActionContext = new HttpActionContext(this, request, url); httpAction.execute(httpActionContext, listener); - logger.info("submitted to URL {}", url); + logger.debug("submitted to URL {}", url); } catch (Exception e) { logger.error(e.getMessage(), e); } } - - /** - * The Builder for HTTP client. - */ - public static class Builder { - - private final Settings.Builder settingsBuilder = Settings.builder(); - - private ClassLoader classLoader; - - private List namedXContentEntries; - - private ThreadPool threadPool = null; - - public Builder settings(Settings settings) { - this.settingsBuilder.put(settings); - return this; - } - - public Builder classLoader(ClassLoader classLoader) { - this.classLoader = classLoader; - return this; - } - - public Builder namedXContentEntries(List namedXContentEntries) { - this.namedXContentEntries = namedXContentEntries; - return this; - } - - public Builder threadPool(ThreadPool threadPool) { - this.threadPool = threadPool; - return this; - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - public HttpClient build() { - Settings settings = settingsBuilder.build(); - HttpClient httpClient = new HttpClient(); - httpClient.init(null, settings, null, null, - classLoader, namedXContentEntries); - return httpClient; - } - } } diff --git a/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClientProvider.java b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClientProvider.java new file mode 100644 index 0000000..628ba4f --- /dev/null +++ b/elx-http/src/main/java/org/xbib/elx/http/ExtendedHttpClientProvider.java @@ -0,0 +1,12 @@ +package org.xbib.elx.http; + +import org.xbib.elx.api.ExtendedClientProvider; + +import java.util.Collections; + +public class ExtendedHttpClientProvider implements ExtendedClientProvider { + @Override + public ExtendedHttpClient getExtendedClient() { + return new ExtendedHttpClient(Collections.emptyList(), Thread.currentThread().getContextClassLoader()); + } +} diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java b/elx-http/src/main/java/org/xbib/elx/http/HttpAction.java similarity index 95% rename from http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java rename to elx-http/src/main/java/org/xbib/elx/http/HttpAction.java index 674ee6d..844dae3 100644 --- a/http/src/main/java/org/xbib/elasticsearch/client/http/HttpAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/HttpAction.java @@ -1,4 +1,4 @@ -package org.xbib.elasticsearch.client.http; +package org.xbib.elx.http; import io.netty.buffer.ByteBuf; import io.netty.handler.codec.http.HttpHeaderNames; @@ -68,7 +68,7 @@ public abstract class HttpAction { - private final HttpClient httpClient; + private final ExtendedHttpClient extendedHttpClient; private final R request; @@ -23,14 +23,14 @@ public class HttpActionContext extends BaseFuture implements ActionFuture, ActionListener { private Transport httpClientTransport; diff --git a/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/health/HttpClusterHealthAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/health/HttpClusterHealthAction.java new file mode 100644 index 0000000..5ed27be --- /dev/null +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/health/HttpClusterHealthAction.java @@ -0,0 +1,135 @@ +package org.xbib.elx.http.action.admin.cluster.health; + +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elx.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class HttpClusterHealthAction extends HttpAction { + + @Override + public ClusterHealthAction getActionInstance() { + return ClusterHealthAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, ClusterHealthRequest request) { + return newPutRequest(url, "/_cluster/health"); + } + + @Override + protected CheckedFunction entityParser() { + throw new UnsupportedOperationException(); + } + + private static final String CLUSTER_NAME = "cluster_name"; + private static final String STATUS = "status"; + private static final String TIMED_OUT = "timed_out"; + private static final String NUMBER_OF_NODES = "number_of_nodes"; + private static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes"; + private static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks"; + private static final String NUMBER_OF_IN_FLIGHT_FETCH = "number_of_in_flight_fetch"; + private static final String DELAYED_UNASSIGNED_SHARDS = "delayed_unassigned_shards"; + private static final String TASK_MAX_WAIT_TIME_IN_QUEUE = "task_max_waiting_in_queue"; + private static final String TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = "task_max_waiting_in_queue_millis"; + private static final String ACTIVE_SHARDS_PERCENT_AS_NUMBER = "active_shards_percent_as_number"; + private static final String ACTIVE_SHARDS_PERCENT = "active_shards_percent"; + private static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; + private static final String ACTIVE_SHARDS = "active_shards"; + private static final String RELOCATING_SHARDS = "relocating_shards"; + private static final String INITIALIZING_SHARDS = "initializing_shards"; + private static final String UNASSIGNED_SHARDS = "unassigned_shards"; + private static final String INDICES = "indices"; + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("cluster_health_response", true, + parsedObjects -> { + int i = 0; + // ClusterStateHealth fields + int numberOfNodes = (int) parsedObjects[i++]; + int numberOfDataNodes = (int) parsedObjects[i++]; + int activeShards = (int) parsedObjects[i++]; + int relocatingShards = (int) parsedObjects[i++]; + int activePrimaryShards = (int) parsedObjects[i++]; + int initializingShards = (int) parsedObjects[i++]; + int unassignedShards = (int) parsedObjects[i++]; + double activeShardsPercent = (double) parsedObjects[i++]; + String statusStr = (String) parsedObjects[i++]; + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); + @SuppressWarnings("unchecked") List indexList = + (List) parsedObjects[i++]; + final Map indices; + if (indexList == null || indexList.isEmpty()) { + indices = emptyMap(); + } else { + indices = new HashMap<>(indexList.size()); + for (ClusterIndexHealth indexHealth : indexList) { + indices.put(indexHealth.getIndex(), indexHealth); + } + } + /*ClusterStateHealth stateHealth = new ClusterStateHealth(activePrimaryShards, activeShards, relocatingShards, + initializingShards, unassignedShards, numberOfNodes, numberOfDataNodes, activeShardsPercent, status, + indices);*/ + //ClusterState clusterState = new ClusterState(); + //ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices); + + // ClusterHealthResponse fields + String clusterName = (String) parsedObjects[i++]; + int numberOfPendingTasks = (int) parsedObjects[i++]; + int numberOfInFlightFetch = (int) parsedObjects[i++]; + int delayedUnassignedShards = (int) parsedObjects[i++]; + long taskMaxWaitingTimeMillis = (long) parsedObjects[i++]; + boolean timedOut = (boolean) parsedObjects[i]; + + return new ClusterHealthResponse(clusterName, null, null, numberOfPendingTasks, + numberOfInFlightFetch, delayedUnassignedShards, + TimeValue.timeValueMillis(taskMaxWaitingTimeMillis)); + /*return new ClusterHealthResponse(clusterName, numberOfPendingTasks, numberOfInFlightFetch, + delayedUnassignedShards, + TimeValue.timeValueMillis(taskMaxWaitingTimeMillis), timedOut, stateHealth);*/ + }); + + + // private static final ObjectParser.NamedObjectParser INDEX_PARSER = + // (XContentParser parser, Void context, String index) -> ClusterIndexHealth.innerFromXContent(parser, index); + + static { + // ClusterStateHealth fields + PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_NODES)); + PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_DATA_NODES)); + PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(INITIALIZING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(UNASSIGNED_SHARDS)); + PARSER.declareDouble(constructorArg(), new ParseField(ACTIVE_SHARDS_PERCENT_AS_NUMBER)); + PARSER.declareString(constructorArg(), new ParseField(STATUS)); + // Can be absent if LEVEL == 'cluster' + //PARSER.declareNamedObjects(optionalConstructorArg(), INDEX_PARSER, new ParseField(INDICES)); + + // ClusterHealthResponse fields + PARSER.declareString(constructorArg(), new ParseField(CLUSTER_NAME)); + PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_PENDING_TASKS)); + PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_IN_FLIGHT_FETCH)); + PARSER.declareInt(constructorArg(), new ParseField(DELAYED_UNASSIGNED_SHARDS)); + PARSER.declareLong(constructorArg(), new ParseField(TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS)); + PARSER.declareBoolean(constructorArg(), new ParseField(TIMED_OUT)); + } + +} diff --git a/http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/node/info/HttpNodesInfoAction.java similarity index 87% rename from http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/node/info/HttpNodesInfoAction.java index e50358b..619f80a 100644 --- a/http/src/main/java/org/elasticsearch/action/admin/cluster/node/info/HttpNodesInfoAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/node/info/HttpNodesInfoAction.java @@ -1,8 +1,13 @@ -package org.elasticsearch.action.admin.cluster.node.info; +package org.xbib.elx.http.action.admin.cluster.node.info; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedFunction; @@ -16,8 +21,8 @@ import org.elasticsearch.monitor.os.OsInfo; import org.elasticsearch.monitor.process.ProcessInfo; import org.elasticsearch.threadpool.ThreadPoolInfo; import org.elasticsearch.transport.TransportInfo; -import org.xbib.elasticsearch.client.http.HttpAction; -import org.xbib.elasticsearch.client.http.HttpActionContext; +import org.xbib.elx.http.HttpAction; +import org.xbib.elx.http.HttpActionContext; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; @@ -106,11 +111,11 @@ public class HttpNodesInfoAction extends HttpAction map2 = (Map) entry.getValue(); - String nodeName = (String)map2.get("name"); - String hostName = (String)map2.get("host"); - String hostAddress = (String)map2.get("ip"); + String nodeName = (String) map2.get("name"); + String hostName = (String) map2.get("host"); + String hostAddress = (String) map2.get("ip"); // [/][:] - String transportAddressString = (String)map2.get("transport_address"); + String transportAddressString = (String) map2.get("transport_address"); int pos = transportAddressString.indexOf(':'); String host = pos > 0 ? transportAddressString.substring(0, pos) : transportAddressString; int port = Integer.parseInt(pos > 0 ? transportAddressString.substring(pos + 1) : "0"); @@ -121,8 +126,8 @@ public class HttpNodesInfoAction extends HttpAction attributes = Collections.emptyMap(); Set roles = new HashSet<>(); Version version = Version.fromString((String) map2.get("version")); diff --git a/http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java similarity index 79% rename from http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java index b66675c..f5d9631 100644 --- a/http/src/main/java/org/elasticsearch/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/settings/HttpClusterUpdateSettingsAction.java @@ -1,11 +1,14 @@ -package org.elasticsearch.action.admin.cluster.settings; +package org.xbib.elx.http.action.admin.cluster.settings; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; @@ -13,9 +16,6 @@ import java.io.UncheckedIOException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -/** - * - */ public class HttpClusterUpdateSettingsAction extends HttpAction { @Override @@ -41,9 +41,6 @@ public class HttpClusterUpdateSettingsAction extends HttpAction entityParser() { - return parser -> { - // TODO(jprante) - return new ClusterUpdateSettingsResponse(); - }; + return ClusterUpdateSettingsResponse::fromXContent; } } diff --git a/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/state/HttpClusterStateAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/state/HttpClusterStateAction.java new file mode 100644 index 0000000..5adf01c --- /dev/null +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/cluster/state/HttpClusterStateAction.java @@ -0,0 +1,29 @@ +package org.xbib.elx.http.action.admin.cluster.state; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elx.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +public class HttpClusterStateAction extends HttpAction { + + @Override + public ClusterStateAction getActionInstance() { + return ClusterStateAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, ClusterStateRequest request) { + return newPutRequest(url, "/_cluster/state"); + } + + @Override + protected CheckedFunction entityParser() { + throw new UnsupportedOperationException(); + } +} diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/create/HttpCreateIndexAction.java similarity index 75% rename from http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/create/HttpCreateIndexAction.java index da64f8b..1b9410b 100644 --- a/http/src/main/java/org/elasticsearch/action/admin/indices/create/HttpCreateIndexAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/create/HttpCreateIndexAction.java @@ -1,12 +1,15 @@ -package org.elasticsearch.action.admin.indices.create; +package org.xbib.elx.http.action.admin.indices.create; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; @@ -27,9 +30,6 @@ public class HttpCreateIndexAction extends HttpAction entityParser() { - return parser -> { - // TODO(jprante) build real create index response - return new CreateIndexResponse(); - }; + return CreateIndexResponse::fromXContent; } } diff --git a/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/delete/HttpDeleteIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/delete/HttpDeleteIndexAction.java new file mode 100644 index 0000000..c791444 --- /dev/null +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/delete/HttpDeleteIndexAction.java @@ -0,0 +1,29 @@ +package org.xbib.elx.http.action.admin.indices.delete; + +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.xcontent.XContentParser; +import org.xbib.elx.http.HttpAction; +import org.xbib.netty.http.client.RequestBuilder; + +import java.io.IOException; + +public class HttpDeleteIndexAction extends HttpAction { + + @Override + public DeleteIndexAction getActionInstance() { + return DeleteIndexAction.INSTANCE; + } + + @Override + protected RequestBuilder createHttpRequest(String url, DeleteIndexRequest deleteIndexRequest) { + return newPutRequest(url, "/" + String.join(",", deleteIndexRequest.indices())); + } + + @Override + protected CheckedFunction entityParser() { + return DeleteIndexResponse::fromXContent; + } +} diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/refresh/HttpRefreshIndexAction.java similarity index 68% rename from http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/refresh/HttpRefreshIndexAction.java index 88f76ea..a6e37c5 100644 --- a/http/src/main/java/org/elasticsearch/action/admin/indices/refresh/HttpRefreshIndexAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/refresh/HttpRefreshIndexAction.java @@ -1,15 +1,15 @@ -package org.elasticsearch.action.admin.indices.refresh; +package org.xbib.elx.http.action.admin.indices.refresh; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; -/** - * - */ public class HttpRefreshIndexAction extends HttpAction { @Override @@ -25,6 +25,6 @@ public class HttpRefreshIndexAction extends HttpAction entityParser() { - return parser -> new RefreshResponse(); + return RefreshResponse::fromXContent; } } diff --git a/http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/settings/put/HttpUpdateSettingsAction.java similarity index 71% rename from http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/settings/put/HttpUpdateSettingsAction.java index b8facce..f6dc7e8 100644 --- a/http/src/main/java/org/elasticsearch/action/admin/indices/settings/put/HttpUpdateSettingsAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/admin/indices/settings/put/HttpUpdateSettingsAction.java @@ -1,21 +1,21 @@ -package org.elasticsearch.action.admin.indices.settings.put; +package org.xbib.elx.http.action.admin.indices.settings.put; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; import java.io.UncheckedIOException; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -/** - * - */ public class HttpUpdateSettingsAction extends HttpAction { @Override @@ -26,7 +26,7 @@ public class HttpUpdateSettingsAction extends HttpAction entityParser() { - return parser -> new UpdateSettingsResponse(); + return UpdateSettingsResponse::fromXContent; } } diff --git a/http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/bulk/HttpBulkAction.java similarity index 93% rename from http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/bulk/HttpBulkAction.java index 050d608..6a07321 100644 --- a/http/src/main/java/org/elasticsearch/action/bulk/HttpBulkAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/bulk/HttpBulkAction.java @@ -1,18 +1,18 @@ -package org.elasticsearch.action.bulk; +package org.xbib.elx.http.action.bulk; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; -/** - * - */ public class HttpBulkAction extends HttpAction { @Override diff --git a/http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpExistsAction.java similarity index 78% rename from http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/get/HttpExistsAction.java index fd2443e..bb1d5df 100644 --- a/http/src/main/java/org/elasticsearch/action/get/HttpExistsAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpExistsAction.java @@ -1,9 +1,12 @@ -package org.elasticsearch.action.get; +package org.xbib.elx.http.action.get; import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; diff --git a/http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpGetAction.java similarity index 78% rename from http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/get/HttpGetAction.java index 3a72116..b700961 100644 --- a/http/src/main/java/org/elasticsearch/action/get/HttpGetAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/get/HttpGetAction.java @@ -1,9 +1,12 @@ -package org.elasticsearch.action.get; +package org.xbib.elx.http.action.get; import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; diff --git a/http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/index/HttpIndexAction.java similarity index 78% rename from http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/index/HttpIndexAction.java index 5352682..be7aba2 100644 --- a/http/src/main/java/org/elasticsearch/action/index/HttpIndexAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/index/HttpIndexAction.java @@ -1,9 +1,12 @@ -package org.elasticsearch.action.index; +package org.xbib.elx.http.action.index; import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; diff --git a/http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/main/HttpMainAction.java similarity index 76% rename from http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/main/HttpMainAction.java index ee5dc8c..0ee995b 100644 --- a/http/src/main/java/org/elasticsearch/action/main/HttpMainAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/main/HttpMainAction.java @@ -1,15 +1,16 @@ -package org.elasticsearch.action.main; +package org.xbib.elx.http.action.main; import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.main.MainAction; +import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; -/** - */ public class HttpMainAction extends HttpAction { @Override diff --git a/http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/search/HttpSearchAction.java similarity index 71% rename from http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/search/HttpSearchAction.java index 4c637b7..0cd6a15 100644 --- a/http/src/main/java/org/elasticsearch/action/search/HttpSearchAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/search/HttpSearchAction.java @@ -1,15 +1,15 @@ -package org.elasticsearch.action.search; +package org.xbib.elx.http.action.search; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.xcontent.XContentParser; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; -/** - * - */ public class HttpSearchAction extends HttpAction { @Override @@ -20,14 +20,11 @@ public class HttpSearchAction extends HttpAction @Override protected RequestBuilder createHttpRequest(String url, SearchRequest request) { String index = request.indices() != null ? "/" + String.join(",", request.indices()) : ""; - return newPostRequest(url, index + "/_search", request.source().toString() ); + return newPostRequest(url, index + "/_search", request.source().toString()); } @Override protected CheckedFunction entityParser() { - return parser -> { - // TODO(jprante) build search response - return new SearchResponse(); - }; + return SearchResponse::fromXContent; } } diff --git a/http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java b/elx-http/src/main/java/org/xbib/elx/http/action/update/HttpUpdateAction.java similarity index 85% rename from http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java rename to elx-http/src/main/java/org/xbib/elx/http/action/update/HttpUpdateAction.java index c703075..134dbb8 100644 --- a/http/src/main/java/org/elasticsearch/action/update/HttpUpdateAction.java +++ b/elx-http/src/main/java/org/xbib/elx/http/action/update/HttpUpdateAction.java @@ -1,13 +1,16 @@ -package org.elasticsearch.action.update; +package org.xbib.elx.http.action.update; import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.xbib.elasticsearch.client.http.HttpAction; +import org.xbib.elx.http.HttpAction; import org.xbib.netty.http.client.RequestBuilder; import java.io.IOException; @@ -35,8 +38,8 @@ public class HttpUpdateAction extends HttpAction if (updateRequest.upsertRequest() != null) { XContentType upsertContentType = updateRequest.upsertRequest().getContentType(); if ((xContentType != null) && (xContentType != upsertContentType)) { - throw new IllegalStateException("update request cannot have different content types for doc [" + xContentType + "]" + - " and upsert [" + upsertContentType + "] documents"); + throw new IllegalStateException("update request cannot have different content types for doc [" + + xContentType + "]" + " and upsert [" + upsertContentType + "] documents"); } else { xContentType = upsertContentType; } diff --git a/http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java b/elx-http/src/main/java/org/xbib/elx/http/package-info.java similarity index 53% rename from http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java rename to elx-http/src/main/java/org/xbib/elx/http/package-info.java index a9c3ded..ef5876c 100644 --- a/http/src/main/java/org/xbib/elasticsearch/client/http/package-info.java +++ b/elx-http/src/main/java/org/xbib/elx/http/package-info.java @@ -1,4 +1,4 @@ /** * Classes for Elasticsearch HTTP client. */ -package org.xbib.elasticsearch.client.http; +package org.xbib.elx.http; diff --git a/elx-http/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider new file mode 100644 index 0000000..0c75f14 --- /dev/null +++ b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider @@ -0,0 +1 @@ +org.xbib.elx.http.ExtendedHttpClientProvider \ No newline at end of file diff --git a/elx-http/src/main/resources/META-INF/services/org.xbib.elx.http.HttpAction b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.http.HttpAction new file mode 100644 index 0000000..4d35ec6 --- /dev/null +++ b/elx-http/src/main/resources/META-INF/services/org.xbib.elx.http.HttpAction @@ -0,0 +1,12 @@ +org.xbib.elx.http.action.admin.cluster.node.info.HttpNodesInfoAction +org.xbib.elx.http.action.admin.cluster.settings.HttpClusterUpdateSettingsAction +org.xbib.elx.http.action.admin.indices.create.HttpCreateIndexAction +org.xbib.elx.http.action.admin.indices.delete.HttpDeleteIndexAction +org.xbib.elx.http.action.admin.indices.refresh.HttpRefreshIndexAction +org.xbib.elx.http.action.admin.indices.settings.put.HttpUpdateSettingsAction +org.xbib.elx.http.action.bulk.HttpBulkAction +org.xbib.elx.http.action.index.HttpIndexAction +org.xbib.elx.http.action.search.HttpSearchAction +org.xbib.elx.http.action.main.HttpMainAction +org.xbib.elx.http.action.get.HttpExistsAction +org.xbib.elx.http.action.get.HttpGetAction diff --git a/http/src/main/resources/extra-security.policy b/elx-http/src/main/resources/extra-security.policy similarity index 100% rename from http/src/main/resources/extra-security.policy rename to elx-http/src/main/resources/extra-security.policy diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/ClientTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/ClientTest.java new file mode 100644 index 0000000..920dd5e --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/ClientTest.java @@ -0,0 +1,207 @@ +package org.xbib.elx.http.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.common.Parameters; +import org.xbib.elx.http.ExtendedHttpClient; +import org.xbib.elx.http.ExtendedHttpClientProvider; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +@Ignore +public class ClientTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(ClientTest.class.getSimpleName()); + + private static final Long ACTIONS = 25000L; + + private static final Long MAX_ACTIONS_PER_REQUEST = 1000L; + + @Before + public void startNodes() { + try { + super.startNodes(); + startNode("2"); + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @Test + public void testSingleDoc() throws Exception { + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(30)) + .build(); + try { + client.newIndex("test"); + client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + assertEquals(1, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.close(); + } + } + + @Test + public void testNewIndex() throws Exception { + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5)) + .build(); + client.newIndex("test"); + client.close(); + } + + @Test + public void testMapping() throws Exception { + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5)) + .build(); + XContentBuilder builder = jsonBuilder() + .startObject() + .startObject("doc") + .startObject("properties") + .startObject("location") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .endObject(); + client.newIndex("test", Settings.EMPTY, Strings.toString(builder)); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); + GetMappingsResponse getMappingsResponse = + client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + logger.info("mappings={}", getMappingsResponse.getMappings()); + assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc")); + client.close(); + } + + @Test + public void testRandomDocs() throws Exception { + long numactions = ACTIONS; + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60)) + .build(); + try { + client.newIndex("test"); + for (int i = 0; i < ACTIONS; i++) { + client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } finally { + assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setQuery(QueryBuilders.matchAllQuery()).setSize(0); + assertEquals(numactions, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.close(); + } + } + + @Test + public void testThreadedRandomDocs() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + Long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST; + final Long actions = ACTIONS; + logger.info("maxthreads={} maxactions={} maxloop={}", maxthreads, maxActionsPerRequest, actions); + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .put(Parameters.MAX_CONCURRENT_REQUESTS.name(), maxthreads * 2) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), maxActionsPerRequest) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60)) + .build(); + try { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + client.newIndex("test", settings, (String)null) + .startBulk("test", 0, 1000); + logger.info("index created"); + ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + executorService.execute(() -> { + for (int i1 = 0; i1 < actions; i1++) { + client.index("test", null, false,"{ \"name\" : \"" + randomString(32) + "\"}"); + } + latch.countDown(); + }); + } + logger.info("waiting for latch..."); + if (latch.await(60L, TimeUnit.SECONDS)) { + logger.info("flush..."); + client.flush(); + client.waitForResponses(60L, TimeUnit.SECONDS); + logger.info("got all responses, executor service shutdown..."); + executorService.shutdown(); + executorService.awaitTermination(60L, TimeUnit.SECONDS); + logger.info("pool is shut down"); + } else { + logger.warn("latch timeout"); + } + client.stopBulk("test", 30L, TimeUnit.SECONDS); + assertEquals(maxthreads * actions, client.getBulkMetric().getSucceeded().getCount()); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } finally { + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setQuery(QueryBuilders.matchAllQuery()).setSize(0); + assertEquals(maxthreads * actions, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.close(); + } + } +} diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/DuplicateIDTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/DuplicateIDTest.java new file mode 100644 index 0000000..65745ce --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/DuplicateIDTest.java @@ -0,0 +1,64 @@ +package org.xbib.elx.http.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.common.Parameters; +import org.xbib.elx.http.ExtendedHttpClient; +import org.xbib.elx.http.ExtendedHttpClientProvider; + +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +@Ignore +public class DuplicateIDTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(DuplicateIDTest.class.getSimpleName()); + + private static final Long MAX_ACTIONS_PER_REQUEST = 1000L; + + private static final Long ACTIONS = 12345L; + + @Test + public void testDuplicateDocIDs() throws Exception { + long numactions = ACTIONS; + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .build(); + try { + client.newIndex("test"); + for (int i = 0; i < ACTIONS; i++) { + client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/IndexShiftTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/IndexShiftTest.java new file mode 100644 index 0000000..7317978 --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/IndexShiftTest.java @@ -0,0 +1,111 @@ +package org.xbib.elx.http.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.api.IndexShiftResult; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.http.ExtendedHttpClient; +import org.xbib.elx.http.ExtendedHttpClientProvider; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +@Ignore +public class IndexShiftTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(IndexShiftTest.class.getSimpleName()); + + @Test + public void testIndexShift() throws Exception { + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .build(); + try { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + client.newIndex("test1234", settings); + for (int i = 0; i < 1; i++) { + client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + + IndexShiftResult indexShiftResult = + client.shiftIndex("test", "test1234", Arrays.asList("a", "b", "c")); + + assertTrue(indexShiftResult.getNewAliases().contains("a")); + assertTrue(indexShiftResult.getNewAliases().contains("b")); + assertTrue(indexShiftResult.getNewAliases().contains("c")); + assertTrue(indexShiftResult.getMovedAliases().isEmpty()); + + Map aliases = client.getAliases("test1234"); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("test")); + + String resolved = client.resolveAlias("test"); + aliases = client.getAliases(resolved); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("test")); + + client.newIndex("test5678", settings); + for (int i = 0; i < 1; i++) { + client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + + indexShiftResult = client.shiftIndex("test", "test5678", Arrays.asList("d", "e", "f"), + (request, index, alias) -> request.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(index).alias(alias).filter(QueryBuilders.termQuery("my_key", alias))) + ); + assertTrue(indexShiftResult.getNewAliases().contains("d")); + assertTrue(indexShiftResult.getNewAliases().contains("e")); + assertTrue(indexShiftResult.getNewAliases().contains("f")); + assertTrue(indexShiftResult.getMovedAliases().contains("a")); + assertTrue(indexShiftResult.getMovedAliases().contains("b")); + assertTrue(indexShiftResult.getMovedAliases().contains("c")); + + aliases = client.getAliases("test5678"); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("d")); + assertTrue(aliases.containsKey("e")); + assertTrue(aliases.containsKey("f")); + + resolved = client.resolveAlias("test"); + aliases = client.getAliases(resolved); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("d")); + assertTrue(aliases.containsKey("e")); + assertTrue(aliases.containsKey("f")); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/MockNode.java b/elx-http/src/test/java/org/xbib/elx/http/test/MockNode.java new file mode 100644 index 0000000..fc62993 --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/MockNode.java @@ -0,0 +1,15 @@ +package org.xbib.elx.http.test; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.InternalSettingsPreparer; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; + +import java.util.List; + +public class MockNode extends Node { + + public MockNode(Settings settings, List> classpathPlugins) { + super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins); + } +} diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/ReplicaTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/ReplicaTest.java new file mode 100644 index 0000000..c9037ca --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/ReplicaTest.java @@ -0,0 +1,151 @@ +package org.xbib.elx.http.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexingStats; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.http.ExtendedHttpClient; +import org.xbib.elx.http.ExtendedHttpClientProvider; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +@Ignore +public class ReplicaTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(ReplicaTest.class.getSimpleName()); + + @Test + public void testReplicaLevel() throws Exception { + + // we need nodes for replica levels + startNode("2"); + startNode("3"); + startNode("4"); + + Settings settingsTest1 = Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 3) + .build(); + + Settings settingsTest2 = Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 1) + .build(); + + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .build(); + + try { + client.newIndex("test1", settingsTest1, new HashMap<>()) + .newIndex("test2", settingsTest2, new HashMap<>()); + client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS); + for (int i = 0; i < 1234; i++) { + client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + for (int i = 0; i < 1234; i++) { + client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("refreshing"); + client.refreshIndex("test1"); + client.refreshIndex("test2"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setIndices("test1", "test2") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("query total hits={}", hits); + assertEquals(2468, hits); + IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.getClient(), IndicesStatsAction.INSTANCE) + .all(); + IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); + for (Map.Entry m : response.getIndices().entrySet()) { + IndexStats indexStats = m.getValue(); + CommonStats commonStats = indexStats.getTotal(); + IndexingStats indexingStats = commonStats.getIndexing(); + IndexingStats.Stats stats = indexingStats.getTotal(); + logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); + for (Map.Entry me : indexStats.getIndexShards().entrySet()) { + IndexShardStats indexShardStats = me.getValue(); + CommonStats commonShardStats = indexShardStats.getTotal(); + logger.info("shard {} count = {}", me.getKey(), + commonShardStats.getIndexing().getTotal().getIndexCount()); + } + } + try { + client.deleteIndex("test1") + .deleteIndex("test2"); + } catch (Exception e) { + logger.error("delete index failed, ignored. Reason:", e); + } + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } + + @Test + public void testUpdateReplicaLevel() throws Exception { + + long numberOfShards = 2; + int replicaLevel = 3; + + // we need 3 nodes for replica level 3 + startNode("2"); + startNode("3"); + + Settings settings = Settings.builder() + .put("index.number_of_shards", numberOfShards) + .put("index.number_of_replicas", 0) + .build(); + + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .build(); + + try { + client.newIndex("replicatest", settings, new HashMap<>()); + client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS); + for (int i = 0; i < 12345; i++) { + client.index("replicatest",null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + client.updateReplicaLevel("replicatest", replicaLevel, 30L, TimeUnit.SECONDS); + assertEquals(replicaLevel, client.getReplicaLevel("replicatest")); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } + +} diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/SmokeTest.java b/elx-http/src/test/java/org/xbib/elx/http/test/SmokeTest.java new file mode 100644 index 0000000..30dc44a --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/SmokeTest.java @@ -0,0 +1,71 @@ +package org.xbib.elx.http.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.api.IndexDefinition; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.http.ExtendedHttpClient; +import org.xbib.elx.http.ExtendedHttpClientProvider; + +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +@Ignore +public class SmokeTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(SmokeTest.class.getSimpleName()); + + @Test + public void smokeTest() throws Exception { + final ExtendedHttpClient client = ClientBuilder.builder() + .provider(ExtendedHttpClientProvider.class) + .build(); + try { + client.newIndex("test"); + client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flush(); + client.waitForResponses(30, TimeUnit.SECONDS); + + assertEquals(getClusterName(), client.getClusterName()); + + client.checkMapping("test"); + + client.update("test", "1", "{ \"name\" : \"Another name\"}"); + client.flush(); + + client.waitForRecovery("test", 10L, TimeUnit.SECONDS); + + client.delete("test", "1"); + client.deleteIndex("test"); + + IndexDefinition indexDefinition = client.buildIndexDefinitionFromSettings("test", Settings.builder() + .build()); + assertEquals(0, indexDefinition.getReplicaLevel()); + client.newIndex(indexDefinition); + client.index(indexDefinition.getFullIndexName(), "1", true, "{ \"name\" : \"Hello World\"}"); + client.flush(); + client.updateReplicaLevel(indexDefinition, 2); + + int replica = client.getReplicaLevel(indexDefinition); + assertEquals(2, replica); + + client.deleteIndex(indexDefinition); + assertEquals(0, client.getBulkMetric().getFailed().getCount()); + assertEquals(4, client.getBulkMetric().getSucceeded().getCount()); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/TestBase.java b/elx-http/src/test/java/org/xbib/elx/http/test/TestBase.java new file mode 100644 index 0000000..d15a279 --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/TestBase.java @@ -0,0 +1,198 @@ +package org.xbib.elx.http.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.netty4.Netty4Plugin; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +public class TestBase { + + private static final Logger logger = LogManager.getLogger("test"); + + private static final Random random = new Random(); + + private static final char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray(); + + private Map nodes = new HashMap<>(); + + private Map clients = new HashMap<>(); + + private AtomicInteger counter = new AtomicInteger(); + + private String cluster; + + private String host; + + private int port; + + @Before + public void startNodes() { + try { + logger.info("starting"); + this.cluster = "test-helper-cluster-" + counter.incrementAndGet(); + startNode("1"); + findNodeAddress(); + try { + ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN) + .timeout(TimeValue.timeValueSeconds(30))).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + throw new IOException("cluster state is " + healthResponse.getStatus().name() + + ", from here on, everything will fail!"); + } + } catch (ElasticsearchTimeoutException e) { + throw new IOException("cluster does not respond to health request, cowardly refusing to continue"); + } + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @After + public void stopNodes() { + try { + closeNodes(); + } catch (Exception e) { + logger.error("can not close nodes", e); + } finally { + try { + deleteFiles(); + logger.info("data files wiped"); + Thread.sleep(2000L); // let OS commit changes + } catch (IOException e) { + logger.error(e.getMessage(), e); + } catch (InterruptedException e) { + // ignore + } + } + } + + protected Settings getTransportSettings() { + return Settings.builder() + .put("host", host) + .put("port", port) + .put("cluster.name", cluster) + .put("path.home", getHome()) + .build(); + } + + protected Settings getNodeSettings() { + return Settings.builder() + .put("cluster.name", cluster) + .put("discovery.zen.minimum_master_nodes", "1") + .put("transport.type", Netty4Plugin.NETTY_TRANSPORT_NAME) + .put("node.max_local_storage_nodes", 10) // allow many nodes to initialize here + .put("path.home", getHome()) + .build(); + } + + protected static String getHome() { + return System.getProperty("path.home", System.getProperty("user.dir")); + } + + protected void startNode(String id) throws NodeValidationException { + buildNode(id).start(); + } + + protected AbstractClient client(String id) { + return clients.get(id); + } + + protected String getClusterName() { + return cluster; + } + + protected String randomString(int len) { + final char[] buf = new char[len]; + final int n = numbersAndLetters.length - 1; + for (int i = 0; i < buf.length; i++) { + buf[i] = numbersAndLetters[random.nextInt(n)]; + } + return new String(buf); + } + + private void closeNodes() throws IOException { + logger.info("closing all clients"); + for (AbstractClient client : clients.values()) { + client.close(); + } + clients.clear(); + logger.info("closing all nodes"); + for (Node node : nodes.values()) { + if (node != null) { + node.close(); + } + } + nodes.clear(); + logger.info("all nodes closed"); + } + + private void findNodeAddress() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true); + NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + TransportAddress address= response.getNodes().iterator().next().getTransport().getAddress() + .publishAddress(); + host = address.address().getHostName(); + port = address.address().getPort(); + } + + private Node buildNode(String id) { + Settings nodeSettings = Settings.builder() + .put(getNodeSettings()) + .put("node.name", id) + .build(); + List> plugins = Arrays.asList(CommonAnalysisPlugin.class, Netty4Plugin.class); + Node node = new MockNode(nodeSettings, plugins); + AbstractClient client = (AbstractClient) node.client(); + nodes.put(id, node); + clients.put(id, client); + return node; + } + + private static void deleteFiles() throws IOException { + Path directory = Paths.get(getHome() + "/data"); + Files.walkFileTree(directory, new SimpleFileVisitor<>() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } +} diff --git a/elx-http/src/test/java/org/xbib/elx/http/test/package-info.java b/elx-http/src/test/java/org/xbib/elx/http/test/package-info.java new file mode 100644 index 0000000..2bb05c9 --- /dev/null +++ b/elx-http/src/test/java/org/xbib/elx/http/test/package-info.java @@ -0,0 +1,4 @@ +/** + * + */ +package org.xbib.elx.http.test; diff --git a/node/src/test/resources/log4j2.xml b/elx-http/src/test/resources/log4j2.xml similarity index 72% rename from node/src/test/resources/log4j2.xml rename to elx-http/src/test/resources/log4j2.xml index b175dfc..6c323f8 100644 --- a/node/src/test/resources/log4j2.xml +++ b/elx-http/src/test/resources/log4j2.xml @@ -2,11 +2,11 @@ - + - + diff --git a/elx-node/build.gradle b/elx-node/build.gradle new file mode 100644 index 0000000..6f6191c --- /dev/null +++ b/elx-node/build.gradle @@ -0,0 +1,5 @@ +dependencies { + compile project(':elx-common') + compile "org.xbib.elasticsearch:transport-netty4:${rootProject.property('elasticsearch-server.version')}" + testCompile "org.xbib.elasticsearch:elasticsearch-analysis-common:${rootProject.property('elasticsearch-server.version')}" +} diff --git a/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClient.java b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClient.java new file mode 100644 index 0000000..d6e4963 --- /dev/null +++ b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClient.java @@ -0,0 +1,71 @@ +package org.xbib.elx.node; + +import io.netty.util.Version; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.netty4.Netty4Utils; +import org.xbib.elx.common.AbstractExtendedClient; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +public class ExtendedNodeClient extends AbstractExtendedClient { + + private static final Logger logger = LogManager.getLogger(ExtendedNodeClient.class.getName()); + + private Node node; + + @Override + protected ElasticsearchClient createClient(Settings settings) throws IOException { + if (settings == null) { + return null; + } + String version = System.getProperty("os.name") + + " " + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.vendor") + + " " + System.getProperty("java.runtime.version") + + " " + System.getProperty("java.vm.version"); + Settings effectiveSettings = Settings.builder().put(settings) + .put("node.client", true) + .put("node.master", false) + .put("node.data", false) + .build(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + effectiveSettings.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("flat_settings", "true"))); + logger.info("creating node client on {} with effective settings {}", + version, Strings.toString(builder)); + Collection> plugins = Collections.emptyList(); + this.node = new BulkNode(new Environment(effectiveSettings, null), plugins); + try { + node.start(); + } catch (Exception e) { + throw new IOException(e); + } + return node.client(); + } + + @Override + protected void closeClient() throws IOException { + if (node != null) { + logger.debug("closing node client"); + node.close(); + } + } + + private static class BulkNode extends Node { + + BulkNode(Environment env, Collection> classpathPlugins) { + super(env, classpathPlugins); + } + } +} diff --git a/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClientProvider.java b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClientProvider.java new file mode 100644 index 0000000..46a4e9a --- /dev/null +++ b/elx-node/src/main/java/org/xbib/elx/node/ExtendedNodeClientProvider.java @@ -0,0 +1,10 @@ +package org.xbib.elx.node; + +import org.xbib.elx.api.ExtendedClientProvider; + +public class ExtendedNodeClientProvider implements ExtendedClientProvider { + @Override + public ExtendedNodeClient getExtendedClient() { + return new ExtendedNodeClient(); + } +} diff --git a/elx-node/src/main/java/org/xbib/elx/node/package-info.java b/elx-node/src/main/java/org/xbib/elx/node/package-info.java new file mode 100644 index 0000000..1216a48 --- /dev/null +++ b/elx-node/src/main/java/org/xbib/elx/node/package-info.java @@ -0,0 +1,4 @@ +/** + * + */ +package org.xbib.elx.node; diff --git a/elx-node/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider b/elx-node/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider new file mode 100644 index 0000000..372aaad --- /dev/null +++ b/elx-node/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider @@ -0,0 +1 @@ +org.xbib.elx.node.ExtendedNodeClientProvider \ No newline at end of file diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/ClientTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/ClientTest.java new file mode 100644 index 0000000..8e14e21 --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/ClientTest.java @@ -0,0 +1,205 @@ +package org.xbib.elx.node.test; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Before; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.common.Parameters; +import org.xbib.elx.node.ExtendedNodeClient; +import org.xbib.elx.node.ExtendedNodeClientProvider; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class ClientTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(ClientTest.class.getSimpleName()); + + private static final Long ACTIONS = 25000L; + + private static final Long MAX_ACTIONS_PER_REQUEST = 1000L; + + @Before + public void startNodes() { + try { + super.startNodes(); + startNode("2"); + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @Test + public void testSingleDoc() throws Exception { + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(30)) + .build(); + try { + client.newIndex("test"); + client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + assertEquals(1, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.close(); + } + } + + @Test + public void testNewIndex() throws Exception { + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5)) + .build(); + client.newIndex("test"); + client.close(); + } + + @Test + public void testMapping() throws Exception { + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5)) + .build(); + XContentBuilder builder = jsonBuilder() + .startObject() + .startObject("doc") + .startObject("properties") + .startObject("location") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .endObject(); + client.newIndex("test", Settings.EMPTY, Strings.toString(builder)); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); + GetMappingsResponse getMappingsResponse = + client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + logger.info("mappings={}", getMappingsResponse.getMappings()); + assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc")); + client.close(); + } + + @Test + public void testRandomDocs() throws Exception { + long numactions = ACTIONS; + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60)) + .build(); + try { + client.newIndex("test"); + for (int i = 0; i < ACTIONS; i++) { + client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } finally { + assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setQuery(QueryBuilders.matchAllQuery()).setSize(0); + assertEquals(numactions, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.close(); + } + } + + @Test + public void testThreadedRandomDocs() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + Long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST; + final Long actions = ACTIONS; + logger.info("maxthreads={} maxactions={} maxloop={}", maxthreads, maxActionsPerRequest, actions); + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .put(Parameters.MAX_CONCURRENT_REQUESTS.name(), maxthreads * 2) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), maxActionsPerRequest) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60)) + .build(); + try { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + client.newIndex("test", settings, (String)null) + .startBulk("test", 0, 1000); + logger.info("index created"); + ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + executorService.execute(() -> { + for (int i1 = 0; i1 < actions; i1++) { + client.index("test", null, false,"{ \"name\" : \"" + randomString(32) + "\"}"); + } + latch.countDown(); + }); + } + logger.info("waiting for latch..."); + if (latch.await(60L, TimeUnit.SECONDS)) { + logger.info("flush..."); + client.flush(); + client.waitForResponses(60L, TimeUnit.SECONDS); + logger.info("got all responses, executor service shutdown..."); + executorService.shutdown(); + executorService.awaitTermination(60L, TimeUnit.SECONDS); + logger.info("pool is shut down"); + } else { + logger.warn("latch timeout"); + } + client.stopBulk("test", 30L, TimeUnit.SECONDS); + assertEquals(maxthreads * actions, client.getBulkMetric().getSucceeded().getCount()); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } finally { + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setQuery(QueryBuilders.matchAllQuery()).setSize(0); + assertEquals(maxthreads * actions, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.close(); + } + } +} diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/DuplicateIDTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/DuplicateIDTest.java new file mode 100644 index 0000000..43d74fa --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/DuplicateIDTest.java @@ -0,0 +1,60 @@ +package org.xbib.elx.node.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.common.Parameters; +import org.xbib.elx.node.ExtendedNodeClient; +import org.xbib.elx.node.ExtendedNodeClientProvider; + +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.*; + +public class DuplicateIDTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(DuplicateIDTest.class.getSimpleName()); + + private static final Long MAX_ACTIONS_PER_REQUEST = 1000L; + + private static final Long ACTIONS = 12345L; + + @Test + public void testDuplicateDocIDs() throws Exception { + long numactions = ACTIONS; + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .build(); + try { + client.newIndex("test"); + for (int i = 0; i < ACTIONS; i++) { + client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/IndexShiftTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/IndexShiftTest.java new file mode 100644 index 0000000..48cb3e8 --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/IndexShiftTest.java @@ -0,0 +1,109 @@ +package org.xbib.elx.node.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Test; +import org.xbib.elx.api.IndexShiftResult; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.node.ExtendedNodeClient; +import org.xbib.elx.node.ExtendedNodeClientProvider; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class IndexShiftTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(IndexShiftTest.class.getSimpleName()); + + @Test + public void testIndexShift() throws Exception { + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .build(); + try { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + client.newIndex("test1234", settings); + for (int i = 0; i < 1; i++) { + client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + + IndexShiftResult indexShiftResult = + client.shiftIndex("test", "test1234", Arrays.asList("a", "b", "c")); + + assertTrue(indexShiftResult.getNewAliases().contains("a")); + assertTrue(indexShiftResult.getNewAliases().contains("b")); + assertTrue(indexShiftResult.getNewAliases().contains("c")); + assertTrue(indexShiftResult.getMovedAliases().isEmpty()); + + Map aliases = client.getAliases("test1234"); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("test")); + + String resolved = client.resolveAlias("test"); + aliases = client.getAliases(resolved); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("test")); + + client.newIndex("test5678", settings); + for (int i = 0; i < 1; i++) { + client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + + indexShiftResult = client.shiftIndex("test", "test5678", Arrays.asList("d", "e", "f"), + (request, index, alias) -> request.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(index).alias(alias).filter(QueryBuilders.termQuery("my_key", alias))) + ); + assertTrue(indexShiftResult.getNewAliases().contains("d")); + assertTrue(indexShiftResult.getNewAliases().contains("e")); + assertTrue(indexShiftResult.getNewAliases().contains("f")); + assertTrue(indexShiftResult.getMovedAliases().contains("a")); + assertTrue(indexShiftResult.getMovedAliases().contains("b")); + assertTrue(indexShiftResult.getMovedAliases().contains("c")); + + aliases = client.getAliases("test5678"); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("d")); + assertTrue(aliases.containsKey("e")); + assertTrue(aliases.containsKey("f")); + + resolved = client.resolveAlias("test"); + aliases = client.getAliases(resolved); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("d")); + assertTrue(aliases.containsKey("e")); + assertTrue(aliases.containsKey("f")); + + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/MockNode.java b/elx-node/src/test/java/org/xbib/elx/node/test/MockNode.java new file mode 100644 index 0000000..0d0568a --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/MockNode.java @@ -0,0 +1,15 @@ +package org.xbib.elx.node.test; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.InternalSettingsPreparer; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; + +import java.util.List; + +public class MockNode extends Node { + + public MockNode(Settings settings, List> classpathPlugins) { + super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins); + } +} diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/ReplicaTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/ReplicaTest.java new file mode 100644 index 0000000..a2de18c --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/ReplicaTest.java @@ -0,0 +1,151 @@ +package org.xbib.elx.node.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexingStats; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.node.ExtendedNodeClient; +import org.xbib.elx.node.ExtendedNodeClientProvider; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +@Ignore +public class ReplicaTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(ReplicaTest.class.getSimpleName()); + + @Test + public void testReplicaLevel() throws Exception { + + // we need nodes for replica levels + startNode("2"); + startNode("3"); + startNode("4"); + + Settings settingsTest1 = Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 3) + .build(); + + Settings settingsTest2 = Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 1) + .build(); + + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .build(); + + try { + client.newIndex("test1", settingsTest1, new HashMap<>()) + .newIndex("test2", settingsTest2, new HashMap<>()); + client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS); + for (int i = 0; i < 1234; i++) { + client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + for (int i = 0; i < 1234; i++) { + client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("refreshing"); + client.refreshIndex("test1"); + client.refreshIndex("test2"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setIndices("test1", "test2") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("query total hits={}", hits); + assertEquals(2468, hits); + IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.getClient(), IndicesStatsAction.INSTANCE) + .all(); + IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); + for (Map.Entry m : response.getIndices().entrySet()) { + IndexStats indexStats = m.getValue(); + CommonStats commonStats = indexStats.getTotal(); + IndexingStats indexingStats = commonStats.getIndexing(); + IndexingStats.Stats stats = indexingStats.getTotal(); + logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); + for (Map.Entry me : indexStats.getIndexShards().entrySet()) { + IndexShardStats indexShardStats = me.getValue(); + CommonStats commonShardStats = indexShardStats.getTotal(); + logger.info("shard {} count = {}", me.getKey(), + commonShardStats.getIndexing().getTotal().getIndexCount()); + } + } + try { + client.deleteIndex("test1") + .deleteIndex("test2"); + } catch (Exception e) { + logger.error("delete index failed, ignored. Reason:", e); + } + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } + + @Test + public void testUpdateReplicaLevel() throws Exception { + + long numberOfShards = 2; + int replicaLevel = 3; + + // we need 3 nodes for replica level 3 + startNode("2"); + startNode("3"); + + Settings settings = Settings.builder() + .put("index.number_of_shards", numberOfShards) + .put("index.number_of_replicas", 0) + .build(); + + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .build(); + + try { + client.newIndex("replicatest", settings, new HashMap<>()); + client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS); + for (int i = 0; i < 12345; i++) { + client.index("replicatest",null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + client.updateReplicaLevel("replicatest", replicaLevel, 30L, TimeUnit.SECONDS); + assertEquals(replicaLevel, client.getReplicaLevel("replicatest")); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } + +} diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/SmokeTest.java b/elx-node/src/test/java/org/xbib/elx/node/test/SmokeTest.java new file mode 100644 index 0000000..aa548f7 --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/SmokeTest.java @@ -0,0 +1,69 @@ +package org.xbib.elx.node.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.api.IndexDefinition; +import org.xbib.elx.node.ExtendedNodeClient; +import org.xbib.elx.node.ExtendedNodeClientProvider; + +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class SmokeTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(SmokeTest.class.getSimpleName()); + + @Test + public void smokeTest() throws Exception { + final ExtendedNodeClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedNodeClientProvider.class) + .build(); + try { + client.newIndex("test"); + client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flush(); + client.waitForResponses(30, TimeUnit.SECONDS); + + assertEquals(getClusterName(), client.getClusterName()); + + client.checkMapping("test"); + + client.update("test", "1", "{ \"name\" : \"Another name\"}"); + client.flush(); + + client.waitForRecovery("test", 10L, TimeUnit.SECONDS); + + client.delete("test", "1"); + client.deleteIndex("test"); + + IndexDefinition indexDefinition = client.buildIndexDefinitionFromSettings("test", Settings.builder() + .build()); + assertEquals(0, indexDefinition.getReplicaLevel()); + client.newIndex(indexDefinition); + client.index(indexDefinition.getFullIndexName(), "1", true, "{ \"name\" : \"Hello World\"}"); + client.flush(); + client.updateReplicaLevel(indexDefinition, 2); + + int replica = client.getReplicaLevel(indexDefinition); + assertEquals(2, replica); + + client.deleteIndex(indexDefinition); + assertEquals(0, client.getBulkMetric().getFailed().getCount()); + assertEquals(4, client.getBulkMetric().getSucceeded().getCount()); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/TestBase.java b/elx-node/src/test/java/org/xbib/elx/node/test/TestBase.java new file mode 100644 index 0000000..86d511f --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/TestBase.java @@ -0,0 +1,206 @@ +package org.xbib.elx.node.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.netty4.Netty4Plugin; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +public class TestBase { + + private static final Logger logger = LogManager.getLogger("test"); + + private static final Random random = new Random(); + + private static final char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray(); + + private Map nodes = new HashMap<>(); + + private Map clients = new HashMap<>(); + + private AtomicInteger counter = new AtomicInteger(); + + private String cluster; + + private String host; + + private int port; + + @Before + public void startNodes() { + try { + logger.info("starting"); + this.cluster = "test-cluster-" + counter.incrementAndGet(); + startNode("1"); + findNodeAddress(); + try { + ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN) + .timeout(TimeValue.timeValueSeconds(30))).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + throw new IOException("cluster state is " + healthResponse.getStatus().name() + + ", from here on, everything will fail!"); + } + } catch (ElasticsearchTimeoutException e) { + throw new IOException("cluster does not respond to health request, cowardly refusing to continue"); + } + ClusterStateRequestBuilder clusterStateRequestBuilder = + new ClusterStateRequestBuilder(client("1"), ClusterStateAction.INSTANCE).all(); + ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); + logger.info("cluster name = {}", clusterStateResponse.getClusterName().value()); + logger.info("host = {} port = {}", host, port); + } catch (Throwable t) { + logger.error(t.getMessage(), t); + } + } + + @After + public void stopNodes() { + try { + closeNodes(); + } catch (Exception e) { + logger.error("can not close nodes", e); + } finally { + try { + deleteFiles(); + logger.info("data files wiped"); + Thread.sleep(2000L); // let OS commit changes + } catch (IOException e) { + logger.error(e.getMessage(), e); + } catch (InterruptedException e) { + // ignore + } + } + } + + protected Settings getTransportSettings() { + return Settings.builder() + .put("host", host) + .put("port", port) + .put("cluster.name", cluster) + .put("path.home", getHome()) + .build(); + } + + protected Settings getNodeSettings() { + return Settings.builder() + .put("cluster.name", cluster) + .put("discovery.zen.minimum_master_nodes", "1") + .put("transport.type", Netty4Plugin.NETTY_TRANSPORT_NAME) + .put("node.max_local_storage_nodes", 10) // allow many nodes to initialize here + .put("path.home", getHome()) + .build(); + } + + protected static String getHome() { + return System.getProperty("path.home", System.getProperty("user.dir")); + } + + protected void startNode(String id) throws NodeValidationException { + buildNode(id).start(); + } + + protected AbstractClient client(String id) { + return clients.get(id); + } + + protected String getClusterName() { + return cluster; + } + + protected String randomString(int len) { + final char[] buf = new char[len]; + final int n = numbersAndLetters.length - 1; + for (int i = 0; i < buf.length; i++) { + buf[i] = numbersAndLetters[random.nextInt(n)]; + } + return new String(buf); + } + + private void closeNodes() throws IOException { + logger.info("closing all clients"); + for (AbstractClient client : clients.values()) { + client.close(); + } + clients.clear(); + logger.info("closing all nodes"); + for (Node node : nodes.values()) { + if (node != null) { + node.close(); + } + } + nodes.clear(); + logger.info("all nodes closed"); + } + + private void findNodeAddress() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true); + NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + TransportAddress address= response.getNodes().iterator().next().getTransport().getAddress() + .publishAddress(); + host = address.address().getHostName(); + port = address.address().getPort(); + } + + private Node buildNode(String id) { + Settings nodeSettings = Settings.builder() + .put(getNodeSettings()) + .put("node.name", id) + .build(); + List> plugins = Arrays.asList(CommonAnalysisPlugin.class, Netty4Plugin.class); + Node node = new MockNode(nodeSettings, plugins); + AbstractClient client = (AbstractClient) node.client(); + nodes.put(id, node); + clients.put(id, client); + return node; + } + + private static void deleteFiles() throws IOException { + Path directory = Paths.get(getHome() + "/data"); + Files.walkFileTree(directory, new SimpleFileVisitor<>() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } +} diff --git a/elx-node/src/test/java/org/xbib/elx/node/test/package-info.java b/elx-node/src/test/java/org/xbib/elx/node/test/package-info.java new file mode 100644 index 0000000..662c94e --- /dev/null +++ b/elx-node/src/test/java/org/xbib/elx/node/test/package-info.java @@ -0,0 +1,4 @@ +/** + * + */ +package org.xbib.elx.node.test; diff --git a/elx-node/src/test/resources/log4j2.xml b/elx-node/src/test/resources/log4j2.xml new file mode 100644 index 0000000..6c323f8 --- /dev/null +++ b/elx-node/src/test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/elx-transport/build.gradle b/elx-transport/build.gradle new file mode 100644 index 0000000..6f6191c --- /dev/null +++ b/elx-transport/build.gradle @@ -0,0 +1,5 @@ +dependencies { + compile project(':elx-common') + compile "org.xbib.elasticsearch:transport-netty4:${rootProject.property('elasticsearch-server.version')}" + testCompile "org.xbib.elasticsearch:elasticsearch-analysis-common:${rootProject.property('elasticsearch-server.version')}" +} diff --git a/elx-transport/src/main/java/org/xbib/elx/transport/ExtendedTransportClient.java b/elx-transport/src/main/java/org/xbib/elx/transport/ExtendedTransportClient.java new file mode 100644 index 0000000..6eabdf0 --- /dev/null +++ b/elx-transport/src/main/java/org/xbib/elx/transport/ExtendedTransportClient.java @@ -0,0 +1,165 @@ +package org.xbib.elx.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.netty4.Netty4Plugin; +import org.xbib.elx.common.AbstractExtendedClient; +import org.xbib.elx.common.util.NetworkUtils; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * Transport client with additional methods using the BulkProcessor. + */ +public class ExtendedTransportClient extends AbstractExtendedClient { + + private static final Logger logger = LogManager.getLogger(ExtendedTransportClient.class.getName()); + + @Override + protected ElasticsearchClient createClient(Settings settings) throws IOException { + if (settings != null) { + String systemIdentifier = System.getProperty("os.name") + + " " + System.getProperty("java.vm.name") + + " " + System.getProperty("java.vm.vendor") + + " " + System.getProperty("java.vm.version") + + " Elasticsearch " + Version.CURRENT.toString(); + Settings transportClientSettings = getTransportClientSettings(settings); + XContentBuilder settingsBuilder = XContentFactory.jsonBuilder().startObject(); + XContentBuilder effectiveSettingsBuilder = XContentFactory.jsonBuilder().startObject(); + logger.info("creating transport client on {} with custom settings {} and effective settings {}", + systemIdentifier, + Strings.toString(settings.toXContent(settingsBuilder, ToXContent.EMPTY_PARAMS).endObject()), + Strings.toString(transportClientSettings.toXContent(effectiveSettingsBuilder, + ToXContent.EMPTY_PARAMS).endObject())); + return new MyTransportClient(transportClientSettings, Collections.singletonList(Netty4Plugin.class)); + } + return null; + } + + @Override + protected void closeClient() { + if (getClient() != null) { + TransportClient client = (TransportClient) getClient(); + client.close(); + client.threadPool().shutdown(); + } + } + + @Override + public ExtendedTransportClient init(Settings settings) throws IOException { + super.init(settings); + // additional auto-connect + try { + Collection addrs = findAddresses(settings); + if (!connect(addrs, settings.getAsBoolean("autodiscover", false))) { + throw new NoNodeAvailableException("no cluster nodes available, check settings " + + settings.toString()); + } + } catch (IOException e) { + logger.error(e.getMessage(), e); + } + return this; + } + + private Collection findAddresses(Settings settings) throws IOException { + final int defaultPort = settings.getAsInt("port", 9300); + Collection addresses = new ArrayList<>(); + for (String hostname : settings.getAsList("host")) { + String[] splitHost = hostname.split(":", 2); + if (splitHost.length == 2) { + try { + String host = splitHost[0]; + InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); + int port = Integer.parseInt(splitHost[1]); + TransportAddress address = new TransportAddress(inetAddress, port); + addresses.add(address); + } catch (NumberFormatException e) { + logger.warn(e.getMessage(), e); + } + } + if (splitHost.length == 1) { + String host = splitHost[0]; + InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); + TransportAddress address = new TransportAddress(inetAddress, defaultPort); + addresses.add(address); + } + } + logger.info("configured addresses = {}", addresses); + return addresses; + } + + private boolean connect(Collection addresses, boolean autodiscover) { + if (getClient() == null) { + throw new IllegalStateException("no client present"); + } + logger.debug("trying to connect to {}", addresses); + TransportClient transportClient = (TransportClient) getClient(); + for (TransportAddress address : addresses) { + transportClient.addTransportAddresses(address); + } + List nodes = transportClient.connectedNodes(); + logger.info("connected to nodes = {}", nodes); + if (nodes != null && !nodes.isEmpty()) { + if (autodiscover) { + logger.debug("trying to auto-discover all nodes..."); + ClusterStateRequestBuilder clusterStateRequestBuilder = + new ClusterStateRequestBuilder(getClient(), ClusterStateAction.INSTANCE); + ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); + DiscoveryNodes discoveryNodes = clusterStateResponse.getState().getNodes(); + addDiscoveryNodes(transportClient, discoveryNodes); + logger.info("after auto-discovery: connected to {}", transportClient.connectedNodes()); + } + return true; + } + return false; + } + + private Settings getTransportClientSettings(Settings settings) { + return Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), + settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())) + .put(EsExecutors.PROCESSORS_SETTING.getKey(), + settings.get(EsExecutors.PROCESSORS_SETTING.getKey(), + String.valueOf(Runtime.getRuntime().availableProcessors()))) + .put(NetworkModule.TRANSPORT_TYPE_KEY, + Netty4Plugin.NETTY_TRANSPORT_NAME) + .build(); + } + + private void addDiscoveryNodes(TransportClient transportClient, DiscoveryNodes discoveryNodes) { + for (DiscoveryNode discoveryNode : discoveryNodes) { + transportClient.addTransportAddress(discoveryNode.getAddress()); + } + } + + class MyTransportClient extends TransportClient { + + MyTransportClient(Settings settings, Collection> plugins) { + super(settings, plugins); + } + } +} diff --git a/elx-transport/src/main/java/org/xbib/elx/transport/ExtendedTransportClientProvider.java b/elx-transport/src/main/java/org/xbib/elx/transport/ExtendedTransportClientProvider.java new file mode 100644 index 0000000..669d21a --- /dev/null +++ b/elx-transport/src/main/java/org/xbib/elx/transport/ExtendedTransportClientProvider.java @@ -0,0 +1,11 @@ +package org.xbib.elx.transport; + +import org.xbib.elx.api.ExtendedClientProvider; + +public class ExtendedTransportClientProvider implements ExtendedClientProvider { + + @Override + public ExtendedTransportClient getExtendedClient() { + return new ExtendedTransportClient(); + } +} diff --git a/elx-transport/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider b/elx-transport/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider new file mode 100644 index 0000000..640e2f9 --- /dev/null +++ b/elx-transport/src/main/resources/META-INF/services/org.xbib.elx.api.ExtendedClientProvider @@ -0,0 +1 @@ +org.xbib.elx.transport.ExtendedTransportClientProvider \ No newline at end of file diff --git a/elx-transport/src/test/java/org/xbib/elx/transport/test/ClientTest.java b/elx-transport/src/test/java/org/xbib/elx/transport/test/ClientTest.java new file mode 100644 index 0000000..99d2415 --- /dev/null +++ b/elx-transport/src/test/java/org/xbib/elx/transport/test/ClientTest.java @@ -0,0 +1,210 @@ +package org.xbib.elx.transport.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Before; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.common.Parameters; +import org.xbib.elx.transport.ExtendedTransportClient; +import org.xbib.elx.transport.ExtendedTransportClientProvider; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class ClientTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(ClientTest.class.getSimpleName()); + + private static final Long ACTIONS = 25000L; + + private static final Long MAX_ACTIONS_PER_REQUEST = 1000L; + + @Before + public void startNodes() { + try { + super.startNodes(); + startNode("2"); + } catch (Throwable t) { + logger.error("startNodes failed", t); + } + } + + @Test + public void testSingleDoc() throws Exception { + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(30)) + .put(getTransportSettings()) + .build(); + try { + client.newIndex("test"); + client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + assertEquals(1, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.close(); + } + } + + @Test + public void testNewIndex() throws Exception { + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5)) + .put(getTransportSettings()) + .build(); + client.newIndex("test"); + client.close(); + } + + @Test + public void testMapping() throws Exception { + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5)) + .put(getTransportSettings()) + .build(); + XContentBuilder builder = jsonBuilder() + .startObject() + .startObject("doc") + .startObject("properties") + .startObject("location") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .endObject(); + client.newIndex("test", Settings.EMPTY, Strings.toString(builder)); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); + GetMappingsResponse getMappingsResponse = + client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); + logger.info("mappings={}", getMappingsResponse.getMappings()); + assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc")); + client.close(); + } + + @Test + public void testRandomDocs() throws Exception { + long numactions = ACTIONS; + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60)) + .put(getTransportSettings()) + .build(); + try { + client.newIndex("test"); + for (int i = 0; i < ACTIONS; i++) { + client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } finally { + assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setQuery(QueryBuilders.matchAllQuery()).setSize(0); + assertEquals(numactions, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.close(); + } + } + + @Test + public void testThreadedRandomDocs() throws Exception { + int maxthreads = Runtime.getRuntime().availableProcessors(); + Long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST; + final Long actions = ACTIONS; + logger.info("maxthreads={} maxactions={} maxloop={}", maxthreads, maxActionsPerRequest, actions); + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(Parameters.MAX_CONCURRENT_REQUESTS.name(), maxthreads * 2) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), maxActionsPerRequest) + .put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60)) + .put(getTransportSettings()) + .build(); + try { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + client.newIndex("test", settings, (String)null) + .startBulk("test", 0, 1000); + logger.info("index created"); + ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); + final CountDownLatch latch = new CountDownLatch(maxthreads); + for (int i = 0; i < maxthreads; i++) { + executorService.execute(() -> { + for (int i1 = 0; i1 < actions; i1++) { + client.index("test", null, false,"{ \"name\" : \"" + randomString(32) + "\"}"); + } + latch.countDown(); + }); + } + logger.info("waiting for latch..."); + if (latch.await(60L, TimeUnit.SECONDS)) { + logger.info("flush..."); + client.flush(); + client.waitForResponses(60L, TimeUnit.SECONDS); + logger.info("got all responses, executor service shutdown..."); + executorService.shutdown(); + executorService.awaitTermination(60L, TimeUnit.SECONDS); + logger.info("pool is shut down"); + } else { + logger.warn("latch timeout"); + } + client.stopBulk("test", 30L, TimeUnit.SECONDS); + assertEquals(maxthreads * actions, client.getBulkMetric().getSucceeded().getCount()); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } catch (Exception e) { + logger.error(e.getMessage(), e); + } finally { + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setQuery(QueryBuilders.matchAllQuery()).setSize(0); + assertEquals(maxthreads * actions, + searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); + client.close(); + } + } +} diff --git a/elx-transport/src/test/java/org/xbib/elx/transport/test/DuplicateIDTest.java b/elx-transport/src/test/java/org/xbib/elx/transport/test/DuplicateIDTest.java new file mode 100644 index 0000000..338dd74 --- /dev/null +++ b/elx-transport/src/test/java/org/xbib/elx/transport/test/DuplicateIDTest.java @@ -0,0 +1,63 @@ +package org.xbib.elx.transport.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.common.Parameters; +import org.xbib.elx.transport.ExtendedTransportClient; +import org.xbib.elx.transport.ExtendedTransportClientProvider; + +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; + +public class DuplicateIDTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(DuplicateIDTest.class.getSimpleName()); + + private static final Long MAX_ACTIONS_PER_REQUEST = 1000L; + + private static final Long ACTIONS = 12345L; + + @Test + public void testDuplicateDocIDs() throws Exception { + long numactions = ACTIONS; + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(Parameters.MAX_ACTIONS_PER_REQUEST.name(), MAX_ACTIONS_PER_REQUEST) + .put(getTransportSettings()) + .build(); + try { + client.newIndex("test"); + for (int i = 0; i < ACTIONS; i++) { + client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + client.refreshIndex("test"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setIndices("test") + .setTypes("test") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("hits = {}", hits); + assertTrue(hits < ACTIONS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount()); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-transport/src/test/java/org/xbib/elx/transport/test/IndexShiftTest.java b/elx-transport/src/test/java/org/xbib/elx/transport/test/IndexShiftTest.java new file mode 100644 index 0000000..e49f809 --- /dev/null +++ b/elx-transport/src/test/java/org/xbib/elx/transport/test/IndexShiftTest.java @@ -0,0 +1,113 @@ +package org.xbib.elx.transport.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.api.IndexShiftResult; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.transport.ExtendedTransportClient; +import org.xbib.elx.transport.ExtendedTransportClientProvider; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +@Ignore +public class IndexShiftTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(IndexShiftTest.class.getSimpleName()); + + @Test + public void testIndexShift() throws Exception { + final ExtendedTransportClient client = ClientBuilder.builder(client("1")) + .provider(ExtendedTransportClientProvider.class) + .put(getTransportSettings()) + .build(); + try { + Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + client.newIndex("test1234", settings); + for (int i = 0; i < 1; i++) { + client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + + IndexShiftResult indexShiftResult = + client.shiftIndex("test", "test1234", Arrays.asList("a", "b", "c")); + + assertTrue(indexShiftResult.getNewAliases().contains("a")); + assertTrue(indexShiftResult.getNewAliases().contains("b")); + assertTrue(indexShiftResult.getNewAliases().contains("c")); + assertTrue(indexShiftResult.getMovedAliases().isEmpty()); + + Map aliases = client.getAliases("test1234"); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("test")); + + String resolved = client.resolveAlias("test"); + aliases = client.getAliases(resolved); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("test")); + + client.newIndex("test5678", settings); + for (int i = 0; i < 1; i++) { + client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + + indexShiftResult = client.shiftIndex("test", "test5678", Arrays.asList("d", "e", "f"), + (request, index, alias) -> request.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(index).alias(alias).filter(QueryBuilders.termQuery("my_key", alias))) + ); + assertTrue(indexShiftResult.getNewAliases().contains("d")); + assertTrue(indexShiftResult.getNewAliases().contains("e")); + assertTrue(indexShiftResult.getNewAliases().contains("f")); + assertTrue(indexShiftResult.getMovedAliases().contains("a")); + assertTrue(indexShiftResult.getMovedAliases().contains("b")); + assertTrue(indexShiftResult.getMovedAliases().contains("c")); + + aliases = client.getAliases("test5678"); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("d")); + assertTrue(aliases.containsKey("e")); + assertTrue(aliases.containsKey("f")); + + resolved = client.resolveAlias("test"); + aliases = client.getAliases(resolved); + assertTrue(aliases.containsKey("a")); + assertTrue(aliases.containsKey("b")); + assertTrue(aliases.containsKey("c")); + assertTrue(aliases.containsKey("d")); + assertTrue(aliases.containsKey("e")); + assertTrue(aliases.containsKey("f")); + + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-transport/src/test/java/org/xbib/elx/transport/test/MockNode.java b/elx-transport/src/test/java/org/xbib/elx/transport/test/MockNode.java new file mode 100644 index 0000000..f586691 --- /dev/null +++ b/elx-transport/src/test/java/org/xbib/elx/transport/test/MockNode.java @@ -0,0 +1,15 @@ +package org.xbib.elx.transport.test; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.InternalSettingsPreparer; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; + +import java.util.List; + +public class MockNode extends Node { + + public MockNode(Settings settings, List> classpathPlugins) { + super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins); + } +} diff --git a/elx-transport/src/test/java/org/xbib/elx/transport/test/ReplicaTest.java b/elx-transport/src/test/java/org/xbib/elx/transport/test/ReplicaTest.java new file mode 100644 index 0000000..42f62ed --- /dev/null +++ b/elx-transport/src/test/java/org/xbib/elx/transport/test/ReplicaTest.java @@ -0,0 +1,153 @@ +package org.xbib.elx.transport.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexingStats; +import org.junit.Ignore; +import org.junit.Test; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.transport.ExtendedTransportClient; +import org.xbib.elx.transport.ExtendedTransportClientProvider; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +@Ignore +public class ReplicaTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(ReplicaTest.class.getSimpleName()); + + @Test + public void testReplicaLevel() throws Exception { + + // we need nodes for replica levels + startNode("2"); + startNode("3"); + startNode("4"); + + Settings settingsTest1 = Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 3) + .build(); + + Settings settingsTest2 = Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 1) + .build(); + + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(getTransportSettings()) + .build(); + + try { + client.newIndex("test1", settingsTest1, new HashMap<>()) + .newIndex("test2", settingsTest2, new HashMap<>()); + client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS); + for (int i = 0; i < 1234; i++) { + client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + for (int i = 0; i < 1234; i++) { + client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + logger.info("refreshing"); + client.refreshIndex("test1"); + client.refreshIndex("test2"); + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE) + .setIndices("test1", "test2") + .setQuery(matchAllQuery()); + long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); + logger.info("query total hits={}", hits); + assertEquals(2468, hits); + IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.getClient(), IndicesStatsAction.INSTANCE) + .all(); + IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); + for (Map.Entry m : response.getIndices().entrySet()) { + IndexStats indexStats = m.getValue(); + CommonStats commonStats = indexStats.getTotal(); + IndexingStats indexingStats = commonStats.getIndexing(); + IndexingStats.Stats stats = indexingStats.getTotal(); + logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); + for (Map.Entry me : indexStats.getIndexShards().entrySet()) { + IndexShardStats indexShardStats = me.getValue(); + CommonStats commonShardStats = indexShardStats.getTotal(); + logger.info("shard {} count = {}", me.getKey(), + commonShardStats.getIndexing().getTotal().getIndexCount()); + } + } + try { + client.deleteIndex("test1") + .deleteIndex("test2"); + } catch (Exception e) { + logger.error("delete index failed, ignored. Reason:", e); + } + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } + + @Test + public void testUpdateReplicaLevel() throws Exception { + + long numberOfShards = 2; + int replicaLevel = 3; + + // we need 3 nodes for replica level 3 + startNode("2"); + startNode("3"); + + Settings settings = Settings.builder() + .put("index.number_of_shards", numberOfShards) + .put("index.number_of_replicas", 0) + .build(); + + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(getTransportSettings()) + .build(); + + try { + client.newIndex("replicatest", settings, new HashMap<>()); + client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS); + for (int i = 0; i < 12345; i++) { + client.index("replicatest",null, false, "{ \"name\" : \"" + randomString(32) + "\"}"); + } + client.flush(); + client.waitForResponses(30L, TimeUnit.SECONDS); + client.updateReplicaLevel("replicatest", replicaLevel, 30L, TimeUnit.SECONDS); + assertEquals(replicaLevel, client.getReplicaLevel("replicatest")); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } + +} diff --git a/elx-transport/src/test/java/org/xbib/elx/transport/test/SmokeTest.java b/elx-transport/src/test/java/org/xbib/elx/transport/test/SmokeTest.java new file mode 100644 index 0000000..e531632 --- /dev/null +++ b/elx-transport/src/test/java/org/xbib/elx/transport/test/SmokeTest.java @@ -0,0 +1,71 @@ +package org.xbib.elx.transport.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.common.settings.Settings; +import org.junit.Test; +import org.xbib.elx.api.IndexDefinition; +import org.xbib.elx.common.ClientBuilder; +import org.xbib.elx.transport.ExtendedTransportClient; +import org.xbib.elx.transport.ExtendedTransportClientProvider; + +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class SmokeTest extends TestBase { + + private static final Logger logger = LogManager.getLogger(SmokeTest.class.getSimpleName()); + + @Test + public void smokeTest() throws Exception { + + final ExtendedTransportClient client = ClientBuilder.builder() + .provider(ExtendedTransportClientProvider.class) + .put(getTransportSettings()) + .build(); + try { + client.newIndex("test"); + client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest + client.flush(); + client.waitForResponses(30, TimeUnit.SECONDS); + + assertEquals(getClusterName(), client.getClusterName()); + + client.checkMapping("test"); + + client.update("test", "1", "{ \"name\" : \"Another name\"}"); + client.flush(); + + client.waitForRecovery("test", 10L, TimeUnit.SECONDS); + + client.delete("test", "1"); + client.deleteIndex("test"); + + IndexDefinition indexDefinition = client.buildIndexDefinitionFromSettings("test", Settings.builder() + .build()); + assertEquals(0, indexDefinition.getReplicaLevel()); + client.newIndex(indexDefinition); + client.index(indexDefinition.getFullIndexName(), "1", true, "{ \"name\" : \"Hello World\"}"); + client.flush(); + client.updateReplicaLevel(indexDefinition, 2); + + int replica = client.getReplicaLevel(indexDefinition); + assertEquals(2, replica); + + client.deleteIndex(indexDefinition); + assertEquals(0, client.getBulkMetric().getFailed().getCount()); + assertEquals(4, client.getBulkMetric().getSucceeded().getCount()); + } catch (NoNodeAvailableException e) { + logger.warn("skipping, no node available"); + } finally { + client.close(); + if (client.getBulkController().getLastBulkError() != null) { + logger.error("error", client.getBulkController().getLastBulkError()); + } + assertNull(client.getBulkController().getLastBulkError()); + } + } +} diff --git a/elx-transport/src/test/java/org/xbib/elx/transport/test/TestBase.java b/elx-transport/src/test/java/org/xbib/elx/transport/test/TestBase.java new file mode 100644 index 0000000..546b921 --- /dev/null +++ b/elx-transport/src/test/java/org/xbib/elx/transport/test/TestBase.java @@ -0,0 +1,206 @@ +package org.xbib.elx.transport.test; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.netty4.Netty4Plugin; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; + +public class TestBase { + + private static final Logger logger = LogManager.getLogger("test"); + + private static final Random random = new Random(); + + private static final char[] numbersAndLetters = ("0123456789abcdefghijklmnopqrstuvwxyz").toCharArray(); + + private Map nodes = new HashMap<>(); + + private Map clients = new HashMap<>(); + + private AtomicInteger counter = new AtomicInteger(); + + private String cluster; + + private String host; + + private int port; + + @Before + public void startNodes() { + try { + logger.info("starting"); + this.cluster = "test-cluster-" + counter.incrementAndGet(); + startNode("1"); + findNodeAddress(); + try { + ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE, + new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN) + .timeout(TimeValue.timeValueSeconds(30))).actionGet(); + if (healthResponse != null && healthResponse.isTimedOut()) { + throw new IOException("cluster state is " + healthResponse.getStatus().name() + + ", from here on, everything will fail!"); + } + } catch (ElasticsearchTimeoutException e) { + throw new IOException("cluster does not respond to health request, cowardly refusing to continue"); + } + ClusterStateRequestBuilder clusterStateRequestBuilder = + new ClusterStateRequestBuilder(client("1"), ClusterStateAction.INSTANCE).all(); + ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); + logger.info("cluster name = {}", clusterStateResponse.getClusterName().value()); + logger.info("host = {} port = {}", host, port); + } catch (Throwable t) { + logger.error(t.getMessage(), t); + } + } + + @After + public void stopNodes() { + try { + closeNodes(); + } catch (Exception e) { + logger.error("can not close nodes", e); + } finally { + try { + deleteFiles(); + logger.info("data files wiped"); + Thread.sleep(2000L); // let OS commit changes + } catch (IOException e) { + logger.error(e.getMessage(), e); + } catch (InterruptedException e) { + // ignore + } + } + } + + protected Settings getTransportSettings() { + return Settings.builder() + .put("cluster.name", cluster) + .put("path.home", getHome()) + .put("host", host) + .put("port", port) + .build(); + } + + protected Settings getNodeSettings() { + return Settings.builder() + .put("cluster.name", cluster) + .put("discovery.zen.minimum_master_nodes", "1") + .put("transport.type", Netty4Plugin.NETTY_TRANSPORT_NAME) + .put("node.max_local_storage_nodes", 10) // allow many nodes to initialize here + .put("path.home", getHome()) + .build(); + } + + protected static String getHome() { + return System.getProperty("path.home", System.getProperty("user.dir")); + } + + protected void startNode(String id) throws NodeValidationException { + buildNode(id).start(); + } + + protected AbstractClient client(String id) { + return clients.get(id); + } + + protected String getClusterName() { + return cluster; + } + + protected String randomString(int len) { + final char[] buf = new char[len]; + final int n = numbersAndLetters.length - 1; + for (int i = 0; i < buf.length; i++) { + buf[i] = numbersAndLetters[random.nextInt(n)]; + } + return new String(buf); + } + + private void closeNodes() throws IOException { + logger.info("closing all clients"); + for (AbstractClient client : clients.values()) { + client.close(); + } + clients.clear(); + logger.info("closing all nodes"); + for (Node node : nodes.values()) { + if (node != null) { + node.close(); + } + } + nodes.clear(); + logger.info("all nodes closed"); + } + + private void findNodeAddress() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true); + NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + TransportAddress address= response.getNodes().iterator().next().getTransport().getAddress() + .publishAddress(); + host = address.address().getHostName(); + port = address.address().getPort(); + } + + private Node buildNode(String id) { + Settings nodeSettings = Settings.builder() + .put(getNodeSettings()) + .put("node.name", id) + .build(); + List> plugins = Arrays.asList(CommonAnalysisPlugin.class, Netty4Plugin.class); + Node node = new MockNode(nodeSettings, plugins); + AbstractClient client = (AbstractClient) node.client(); + nodes.put(id, node); + clients.put(id, client); + return node; + } + + private static void deleteFiles() throws IOException { + Path directory = Paths.get(getHome() + "/data"); + Files.walkFileTree(directory, new SimpleFileVisitor<>() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } +} diff --git a/elx-transport/src/test/resources/log4j2.xml b/elx-transport/src/test/resources/log4j2.xml new file mode 100644 index 0000000..6c323f8 --- /dev/null +++ b/elx-transport/src/test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/gradle.properties b/gradle.properties index d3e6671..e29c682 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,30 +1,17 @@ -group = org.xbib.elasticsearch -name = elasticsearch-client +group = org.xbib +name = elx version = 6.3.2.0 profile = default release = 0 -elasticsearch.version = 6.3.2 -lucene.version = 7.3.1 -tcnative.version = 2.0.15.Final -alpnagent.version = 2.0.7 -netty.version = 4.1.33.Final -netty-http.version = 4.1.33.0 -xbib-metrics.version = 1.1.0 - -elasticsearch-libs.version = 6.3.2.1 -elasticsearch-server.version = 6.3.2.1 -elasticsearch-client.version = 6.3.2.1 -elasticsearch-devkit.version = 6.3.2.4 -spatial4j.version = 0.7 -jts.version = 1.15.1 -jna.version = 4.5.2 +elasticsearch-server.version = 6.3.2.2 log4j.version = 2.11.1 -checkstyle.version = 8.13 +xbib-metrics.version = 1.1.0 +xbib-netty-http.version = 4.1.33.0 # test junit.version = 4.12 wagon.version = 3.0.0 asciidoclet.version = 1.6.0.0 -org.gradle.warning.mode=all +org.gradle.warning.mode = all diff --git a/gradle/ext.gradle b/gradle/ext.gradle deleted file mode 100644 index e69de29..0000000 diff --git a/gradle/publish.gradle b/gradle/publish.gradle index c05a223..8675487 100644 --- a/gradle/publish.gradle +++ b/gradle/publish.gradle @@ -1,12 +1,19 @@ +ext { + description = 'Extensions for Elasticsearch clients (node and transport)' + scmUrl = 'https://github.com/jprante/elx' + scmConnection = 'scm:git:git://github.com/jprante/elx.git' + scmDeveloperConnection = 'scm:git:git://github.com/jprante/elx.git' +} -task xbibUpload(type: Upload) { +task xbibUpload(type: Upload, dependsOn: build) { + group = 'publish' configuration = configurations.archives uploadDescriptor = true repositories { if (project.hasProperty("xbibUsername")) { mavenDeployer { configuration = configurations.wagon - repository(url: 'sftp://xbib.org/repository') { + repository(url: uri(project.property('xbibUrl'))) { authentication(userName: xbibUsername, privateKey: xbibPrivateKey) } } @@ -14,7 +21,8 @@ task xbibUpload(type: Upload) { } } -task sonaTypeUpload(type: Upload) { +task sonaTypeUpload(type: Upload, dependsOn: build) { + group = 'publish' configuration = configurations.archives uploadDescriptor = true repositories { @@ -34,7 +42,7 @@ task sonaTypeUpload(type: Upload) { name project.name description description packaging 'jar' - inceptionYear '2012' + inceptionYear '2019' url scmUrl organization { name 'xbib' @@ -42,7 +50,7 @@ task sonaTypeUpload(type: Upload) { } developers { developer { - id user + id 'xbib' name 'Jörg Prante' email 'joergprante@gmail.com' url 'https://github.com/jprante' @@ -64,3 +72,7 @@ task sonaTypeUpload(type: Upload) { } } } + +nexusStaging { + packageGroup = "org.xbib" +} diff --git a/gradle/publish.gradle~ b/gradle/publish.gradle~ deleted file mode 100644 index e04b20b..0000000 --- a/gradle/publish.gradle~ +++ /dev/null @@ -1,104 +0,0 @@ - -task xbibUpload(type: Upload) { - configuration = configurations.archives - uploadDescriptor = true - repositories { - if (project.hasProperty("xbibUsername")) { - mavenDeployer { - configuration = configurations.wagon - repository(url: 'scpexe://xbib.org/repository') { - authentication(userName: xbibUsername, privateKey: xbibPrivateKey) - } - } - } - } -} - -task sonaTypeUpload(type: Upload) { - configuration = configurations.archives - uploadDescriptor = true - repositories { - if (project.hasProperty('ossrhUsername')) { - mavenDeployer { - beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) } - repository(url: 'https://oss.sonatype.org/service/local/staging/deploy/maven2') { - authentication(userName: ossrhUsername, password: ossrhPassword) - } - snapshotRepository(url: 'https://oss.sonatype.org/content/repositories/snapshots') { - authentication(userName: ossrhUsername, password: ossrhPassword) - } - pom.project { - name name - description description - packaging 'jar' - inceptionYear '2012' - url scmUrl - organization { - name 'xbib' - url 'http://xbib.org' - } - developers { - developer { - id user - name 'Jörg Prante' - email 'joergprante@gmail.com' - url 'https://github.com/jprante' - } - } - scm { - url scmUrl - connection scmConnection - developerConnection scmDeveloperConnection - } - licenses { - license { - name 'The Apache License, Version 2.0' - url 'http://www.apache.org/licenses/LICENSE-2.0.txt' - } - } - } - } - } - } -} - - -task hbzUpload(type: Upload) { - configuration = configurations.archives - uploadDescriptor = true - repositories { - if (project.hasProperty('hbzUserName')) { - mavenDeployer { - configuration = configurations.wagon - beforeDeployment { MavenDeployment deployment -> - signing.signPom(deployment) - } - repository(url: uri(hbzUrl)) { - authentication(userName: hbzUserName, privateKey: hbzPrivateKey) - } - pom.project { - developers { - developer { - id 'jprante' - name 'Jörg Prante' - email 'joergprante@gmail.com' - url 'https://github.com/jprante' - } - } - scm { - url 'https://github.com/xbib/elasticsearch-webapp-libraryservice' - connection 'scm:git:git://github.com/xbib/elasticsaerch-webapp-libraryservice.git' - developerConnection 'scm:git:git://github.com/xbib/elasticsaerch-webapp-libraryservice.git' - } - inceptionYear '2016' - licenses { - license { - name 'The Apache License, Version 2.0' - url 'http://www.apache.org/licenses/LICENSE-2.0.txt' - } - } - } - } - } - } -} diff --git a/gradle/sonarqube.gradle b/gradle/sonarqube.gradle deleted file mode 100644 index d759e4c..0000000 --- a/gradle/sonarqube.gradle +++ /dev/null @@ -1,30 +0,0 @@ -tasks.withType(FindBugs) { - ignoreFailures = true - reports { - xml.enabled = false - html.enabled = true - } -} -tasks.withType(Pmd) { - ignoreFailures = true - reports { - xml.enabled = true - html.enabled = true - } -} -tasks.withType(Checkstyle) { - ignoreFailures = true - reports { - xml.enabled = true - html.enabled = true - } -} - -sonarqube { - properties { - property "sonar.projectName", "${project.group} ${project.name}" - property "sonar.sourceEncoding", "UTF-8" - property "sonar.scm.provider", "git" - property "sonar.junit.reportsPath", "build/test-results/test/" - } -} diff --git a/http/build.gradle b/http/build.gradle deleted file mode 100644 index a9a5f98..0000000 --- a/http/build.gradle +++ /dev/null @@ -1,66 +0,0 @@ -buildscript { - repositories { - jcenter() - maven { - url 'http://xbib.org/repository' - } - } - dependencies { - classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" - } -} - -apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' - -configurations { - main - tests -} - -dependencies { - compile project(':common') - compile "org.xbib:netty-http-client:${project.property('netty-http.version')}" - compile "org.xbib.elasticsearch:transport-netty4:${project.property('elasticsearch-server.version')}" - testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" - testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" -} - -jar { - baseName "${rootProject.name}-common" -} - -/* -task testJar(type: Jar, dependsOn: testClasses) { - baseName = "${project.archivesBaseName}-tests" - from sourceSets.test.output -} -*/ - -artifacts { - main jar - tests testJar - archives sourcesJar, javadocJar -} - -test { - enabled = true - include '**/SimpleTest.*' - testLogging { - showStandardStreams = true - exceptionFormat = 'full' - } -} - -randomizedTest { - enabled = false -} - -esTest { - enabled = true - dependsOn jar, testJar - // test with the jars, not the classes, for security manager - // classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files - systemProperty 'tests.security.manager', 'true' - // some extra security policy for our code - systemProperty 'tests.security.policy', '/extra-security.policy' -} diff --git a/http/config/checkstyle/checkstyle.xml b/http/config/checkstyle/checkstyle.xml deleted file mode 100644 index 7af8d6d..0000000 --- a/http/config/checkstyle/checkstyle.xml +++ /dev/null @@ -1,323 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/http/src/docs/asciidoc/css/foundation.css b/http/src/docs/asciidoc/css/foundation.css deleted file mode 100644 index 27be611..0000000 --- a/http/src/docs/asciidoc/css/foundation.css +++ /dev/null @@ -1,684 +0,0 @@ -/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ -/* ========================================================================== HTML5 display definitions ========================================================================== */ -/** Correct `block` display not defined in IE 8/9. */ -article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } - -/** Correct `inline-block` display not defined in IE 8/9. */ -audio, canvas, video { display: inline-block; } - -/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ -audio:not([controls]) { display: none; height: 0; } - -/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ -[hidden], template { display: none; } - -script { display: none !important; } - -/* ========================================================================== Base ========================================================================== */ -/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ -html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } - -/** Remove default margin. */ -body { margin: 0; } - -/* ========================================================================== Links ========================================================================== */ -/** Remove the gray background color from active links in IE 10. */ -a { background: transparent; } - -/** Address `outline` inconsistency between Chrome and other browsers. */ -a:focus { outline: thin dotted; } - -/** Improve readability when focused and also mouse hovered in all browsers. */ -a:active, a:hover { outline: 0; } - -/* ========================================================================== Typography ========================================================================== */ -/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ -h1 { font-size: 2em; margin: 0.67em 0; } - -/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ -abbr[title] { border-bottom: 1px dotted; } - -/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ -b, strong { font-weight: bold; } - -/** Address styling not present in Safari 5 and Chrome. */ -dfn { font-style: italic; } - -/** Address differences between Firefox and other browsers. */ -hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } - -/** Address styling not present in IE 8/9. */ -mark { background: #ff0; color: #000; } - -/** Correct font family set oddly in Safari 5 and Chrome. */ -code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } - -/** Improve readability of pre-formatted text in all browsers. */ -pre { white-space: pre-wrap; } - -/** Set consistent quote types. */ -q { quotes: "\201C" "\201D" "\2018" "\2019"; } - -/** Address inconsistent and variable font size in all browsers. */ -small { font-size: 80%; } - -/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ -sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } - -sup { top: -0.5em; } - -sub { bottom: -0.25em; } - -/* ========================================================================== Embedded content ========================================================================== */ -/** Remove border when inside `a` element in IE 8/9. */ -img { border: 0; } - -/** Correct overflow displayed oddly in IE 9. */ -svg:not(:root) { overflow: hidden; } - -/* ========================================================================== Figures ========================================================================== */ -/** Address margin not present in IE 8/9 and Safari 5. */ -figure { margin: 0; } - -/* ========================================================================== Forms ========================================================================== */ -/** Define consistent border, margin, and padding. */ -fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } - -/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ -legend { border: 0; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ -button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } - -/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ -button, input { line-height: normal; } - -/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ -button, select { text-transform: none; } - -/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ -button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } - -/** Re-set default cursor for disabled elements. */ -button[disabled], html input[disabled] { cursor: default; } - -/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ -input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ -input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } - -/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ -input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } - -/** Remove inner padding and border in Firefox 4+. */ -button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } - -/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ -textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } - -/* ========================================================================== Tables ========================================================================== */ -/** Remove most spacing between table cells. */ -table { border-collapse: collapse; border-spacing: 0; } - -meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } - -meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } - -meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } - -*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } - -html, body { font-size: 100%; } - -body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } - -a:hover { cursor: pointer; } - -img, object, embed { max-width: 100%; height: auto; } - -object, embed { height: 100%; } - -img { -ms-interpolation-mode: bicubic; } - -#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } - -.left { float: left !important; } - -.right { float: right !important; } - -.text-left { text-align: left !important; } - -.text-right { text-align: right !important; } - -.text-center { text-align: center !important; } - -.text-justify { text-align: justify !important; } - -.hide { display: none; } - -.antialiased { -webkit-font-smoothing: antialiased; } - -img { display: inline-block; vertical-align: middle; } - -textarea { height: auto; min-height: 50px; } - -select { width: 100%; } - -object, svg { display: inline-block; vertical-align: middle; } - -.center { margin-left: auto; margin-right: auto; } - -.spread { width: 100%; } - -p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } - -.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } - -/* Typography resets */ -div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } - -/* Default Link Styles */ -a { color: #2ba6cb; text-decoration: none; line-height: inherit; } -a:hover, a:focus { color: #2795b6; } -a img { border: none; } - -/* Default paragraph styles */ -p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } -p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } - -/* Default header styles */ -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } -h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } - -h1 { font-size: 2.125em; } - -h2 { font-size: 1.6875em; } - -h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } - -h4 { font-size: 1.125em; } - -h5 { font-size: 1.125em; } - -h6 { font-size: 1em; } - -hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } - -/* Helpful Typography Defaults */ -em, i { font-style: italic; line-height: inherit; } - -strong, b { font-weight: bold; line-height: inherit; } - -small { font-size: 60%; line-height: inherit; } - -code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } - -/* Lists */ -ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } - -ul, ol { margin-left: 1.5em; } -ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } - -/* Unordered Lists */ -ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } -ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } -ul.square { list-style-type: square; } -ul.circle { list-style-type: circle; } -ul.disc { list-style-type: disc; } -ul.no-bullet { list-style: none; } - -/* Ordered Lists */ -ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } - -/* Definition Lists */ -dl dt { margin-bottom: 0.3125em; font-weight: bold; } -dl dd { margin-bottom: 1.25em; } - -/* Abbreviations */ -abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } - -abbr { text-transform: none; } - -/* Blockquotes */ -blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } -blockquote cite { display: block; font-size: 0.8125em; color: #555555; } -blockquote cite:before { content: "\2014 \0020"; } -blockquote cite a, blockquote cite a:visited { color: #555555; } - -blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } - -/* Microformats */ -.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } -.vcard li { margin: 0; display: block; } -.vcard .fn { font-weight: bold; font-size: 0.9375em; } - -.vevent .summary { font-weight: bold; } -.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } - -@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - h1 { font-size: 2.75em; } - h2 { font-size: 2.3125em; } - h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } - h4 { font-size: 1.4375em; } } -/* Tables */ -table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } -table thead, table tfoot { background: whitesmoke; font-weight: bold; } -table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } -table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } -table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } -table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } - -body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } - -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - -.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } -.clearfix:after, .float-group:after { clear: both; } - -*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } -*:not(pre) > code.nobreak { word-wrap: normal; } -*:not(pre) > code.nowrap { white-space: nowrap; } - -pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } - -em em { font-style: normal; } - -strong strong { font-weight: normal; } - -.keyseq { color: #555555; } - -kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } - -.keyseq kbd:first-child { margin-left: 0; } - -.keyseq kbd:last-child { margin-right: 0; } - -.menuseq, .menu { color: #090909; } - -b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } - -b.button:before { content: "["; padding: 0 3px 0 2px; } - -b.button:after { content: "]"; padding: 0 2px 0 3px; } - -#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } -#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } -#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } - -#content { margin-top: 1.25em; } - -#content:before { content: none; } - -#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } -#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } -#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } -#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } -#header .details span:first-child { margin-left: -0.125em; } -#header .details span.email a { color: #6f6f6f; } -#header .details br { display: none; } -#header .details br + span:before { content: "\00a0\2013\00a0"; } -#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } -#header .details br + span#revremark:before { content: "\00a0|\00a0"; } -#header #revnumber { text-transform: capitalize; } -#header #revnumber:after { content: "\00a0"; } - -#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } - -#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } -#toc > ul { margin-left: 0.125em; } -#toc ul.sectlevel0 > li > a { font-style: italic; } -#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } -#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } -#toc li { line-height: 1.3334; margin-top: 0.3334em; } -#toc a { text-decoration: none; } -#toc a:active { text-decoration: underline; } - -#toctitle { color: #6f6f6f; font-size: 1.2em; } - -@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } - body.toc2 { padding-left: 15em; padding-right: 0; } - #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } - #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } - #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } - #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } - #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } - body.toc2.toc-right { padding-left: 0; padding-right: 15em; } - body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } -@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } - #toc.toc2 { width: 20em; } - #toc.toc2 #toctitle { font-size: 1.375em; } - #toc.toc2 > ul { font-size: 0.95em; } - #toc.toc2 ul ul { padding-left: 1.25em; } - body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } -#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -#content #toc > :first-child { margin-top: 0; } -#content #toc > :last-child { margin-bottom: 0; } - -#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } - -#footer-text { color: #dddddd; line-height: 1.44; } - -.sect1 { padding-bottom: 0.625em; } - -@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } -.sect1 + .sect1 { border-top: 1px solid #dddddd; } - -#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } -#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } -#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } -#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } -#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } - -.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } - -.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } - -table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } - -.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } - -table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } - -.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } -.admonitionblock > table td.icon { text-align: center; width: 80px; } -.admonitionblock > table td.icon img { max-width: initial; } -.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } -.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } -.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } - -.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } -.exampleblock > .content > :first-child { margin-top: 0; } -.exampleblock > .content > :last-child { margin-bottom: 0; } - -.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -.sidebarblock > :first-child { margin-top: 0; } -.sidebarblock > :last-child { margin-bottom: 0; } -.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } - -.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } - -.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } -.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } - -.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } -.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } -@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } -@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } - -.literalblock.output pre { color: #eeeeee; background-color: black; } - -.listingblock pre.highlightjs { padding: 0; } -.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } - -.listingblock > .content { position: relative; } - -.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } - -.listingblock:hover code[data-lang]:before { display: block; } - -.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } - -.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } - -table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } - -table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } - -table.pyhltable td.code { padding-left: .75em; padding-right: 0; } - -pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } - -pre.pygments .lineno { display: inline-block; margin-right: .25em; } - -table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } - -.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } -.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } -.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } -.quoteblock blockquote { margin: 0; padding: 0; border: 0; } -.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } -.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } -.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } -.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } -.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } -.quoteblock .quoteblock blockquote:before { display: none; } - -.verseblock { margin: 0 1em 1.25em 1em; } -.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } -.verseblock pre strong { font-weight: 400; } -.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } - -.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } -.quoteblock .attribution br, .verseblock .attribution br { display: none; } -.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } - -.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } -.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } -.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } - -table.tableblock { max-width: 100%; border-collapse: separate; } -table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } - -table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } - -table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } - -table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } - -table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } - -table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } - -table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } - -table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } - -table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } - -table.frame-all { border-width: 1px; } - -table.frame-sides { border-width: 0 1px; } - -table.frame-topbot { border-width: 1px 0; } - -th.halign-left, td.halign-left { text-align: left; } - -th.halign-right, td.halign-right { text-align: right; } - -th.halign-center, td.halign-center { text-align: center; } - -th.valign-top, td.valign-top { vertical-align: top; } - -th.valign-bottom, td.valign-bottom { vertical-align: bottom; } - -th.valign-middle, td.valign-middle { vertical-align: middle; } - -table thead th, table tfoot th { font-weight: bold; } - -tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } - -tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } - -p.tableblock > code:only-child { background: none; padding: 0; } - -p.tableblock { font-size: 1em; } - -td > div.verse { white-space: pre; } - -ol { margin-left: 1.75em; } - -ul li ol { margin-left: 1.5em; } - -dl dd { margin-left: 1.125em; } - -dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } - -ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } - -ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } - -ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } - -ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } - -ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } - -ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } -ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } -ul.inline > li > * { display: block; } - -.unstyled dl dt { font-weight: normal; font-style: normal; } - -ol.arabic { list-style-type: decimal; } - -ol.decimal { list-style-type: decimal-leading-zero; } - -ol.loweralpha { list-style-type: lower-alpha; } - -ol.upperalpha { list-style-type: upper-alpha; } - -ol.lowerroman { list-style-type: lower-roman; } - -ol.upperroman { list-style-type: upper-roman; } - -ol.lowergreek { list-style-type: lower-greek; } - -.hdlist > table, .colist > table { border: 0; background: none; } -.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } - -td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } - -td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } - -.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } - -.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } -.colist > table tr > td:first-of-type img { max-width: initial; } -.colist > table tr > td:last-of-type { padding: 0.25em 0; } - -.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } - -.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } -.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } -.imageblock > .title { margin-bottom: 0; } -.imageblock.thumb, .imageblock.th { border-width: 6px; } -.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } - -.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } -.image.left { margin-right: 0.625em; } -.image.right { margin-left: 0.625em; } - -a.image { text-decoration: none; display: inline-block; } -a.image object { pointer-events: none; } - -sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } -sup.footnote a, sup.footnoteref a { text-decoration: none; } -sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } - -#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } -#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } -#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } -#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } -#footnotes .footnote:last-of-type { margin-bottom: 0; } -#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } - -.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } -.gist .file-data > table td.line-data { width: 99%; } - -div.unbreakable { page-break-inside: avoid; } - -.big { font-size: larger; } - -.small { font-size: smaller; } - -.underline { text-decoration: underline; } - -.overline { text-decoration: overline; } - -.line-through { text-decoration: line-through; } - -.aqua { color: #00bfbf; } - -.aqua-background { background-color: #00fafa; } - -.black { color: black; } - -.black-background { background-color: black; } - -.blue { color: #0000bf; } - -.blue-background { background-color: #0000fa; } - -.fuchsia { color: #bf00bf; } - -.fuchsia-background { background-color: #fa00fa; } - -.gray { color: #606060; } - -.gray-background { background-color: #7d7d7d; } - -.green { color: #006000; } - -.green-background { background-color: #007d00; } - -.lime { color: #00bf00; } - -.lime-background { background-color: #00fa00; } - -.maroon { color: #600000; } - -.maroon-background { background-color: #7d0000; } - -.navy { color: #000060; } - -.navy-background { background-color: #00007d; } - -.olive { color: #606000; } - -.olive-background { background-color: #7d7d00; } - -.purple { color: #600060; } - -.purple-background { background-color: #7d007d; } - -.red { color: #bf0000; } - -.red-background { background-color: #fa0000; } - -.silver { color: #909090; } - -.silver-background { background-color: #bcbcbc; } - -.teal { color: #006060; } - -.teal-background { background-color: #007d7d; } - -.white { color: #bfbfbf; } - -.white-background { background-color: #fafafa; } - -.yellow { color: #bfbf00; } - -.yellow-background { background-color: #fafa00; } - -span.icon > .fa { cursor: default; } - -.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } -.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } -.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } -.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } -.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } -.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } - -.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } -.conum[data-value] * { color: #fff !important; } -.conum[data-value] + b { display: none; } -.conum[data-value]:after { content: attr(data-value); } -pre .conum[data-value] { position: relative; top: -0.125em; } - -b.conum * { color: inherit !important; } - -.conum:not([data-value]):empty { display: none; } - -.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/http/src/docs/asciidoclet/overview.adoc b/http/src/docs/asciidoclet/overview.adoc deleted file mode 100644 index 7947331..0000000 --- a/http/src/docs/asciidoclet/overview.adoc +++ /dev/null @@ -1,4 +0,0 @@ -= Elasticsearch Java client -Jörg Prante -Version 5.4.0.0 - diff --git a/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods b/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods deleted file mode 100644 index 18f7ab4..0000000 --- a/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods +++ /dev/null @@ -1 +0,0 @@ -org.xbib.elasticsearch.client.http.HttpClient \ No newline at end of file diff --git a/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.http.HttpAction b/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.http.HttpAction deleted file mode 100644 index cce80e6..0000000 --- a/http/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.http.HttpAction +++ /dev/null @@ -1,11 +0,0 @@ -org.elasticsearch.action.admin.cluster.node.info.HttpNodesInfoAction -org.elasticsearch.action.admin.cluster.settings.HttpClusterUpdateSettingsAction -org.elasticsearch.action.admin.indices.create.HttpCreateIndexAction -org.elasticsearch.action.admin.indices.refresh.HttpRefreshIndexAction -org.elasticsearch.action.admin.indices.settings.put.HttpUpdateSettingsAction -org.elasticsearch.action.bulk.HttpBulkAction -org.elasticsearch.action.index.HttpIndexAction -org.elasticsearch.action.search.HttpSearchAction -org.elasticsearch.action.main.HttpMainAction -org.elasticsearch.action.get.HttpExistsAction -org.elasticsearch.action.get.HttpGetAction diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientAliasTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientAliasTests.java deleted file mode 100644 index f683183..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientAliasTests.java +++ /dev/null @@ -1,108 +0,0 @@ -package org.xbib.elasticsearch.client.http; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.IndexAliasAdder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class HttpClientAliasTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(HttpClientAliasTests.class.getName()); - - private TransportAddress httpAddress; - - @Before - public void fetchTransportAddress() { - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); - } - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(Netty4Plugin.class); - } - - @Override - public Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); - } - - private String findHttpAddress() { - return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); - } - - public void testIndexAlias() throws Exception { - final HttpClient client = ClientBuilder.builder() - .put("urls", findHttpAddress()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - try { - client.newIndex("test1234"); - for (int i = 0; i < 1; i++) { - client.index("test1234", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.refreshIndex("test1234"); - - List simpleAliases = Arrays.asList("a", "b", "c"); - client.switchAliases("test", "test1234", simpleAliases); - - client.newIndex("test5678"); - for (int i = 0; i < 1; i++) { - client.index("test5678", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.refreshIndex("test5678"); - - simpleAliases = Arrays.asList("d", "e", "f"); - client.switchAliases("test", "test5678", simpleAliases, new IndexAliasAdder() { - @Override - public void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias) { - builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)); - } - }); - Map aliases = client.getIndexFilters("test5678"); - logger.info("aliases of index test5678 = {}", aliases); - - aliases = client.getAliasFilters("test"); - logger.info("aliases of alias test = {}", aliases); - - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.waitForResponses("30s"); - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } -} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientDuplicateIDTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientDuplicateIDTests.java deleted file mode 100644 index e44d004..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientDuplicateIDTests.java +++ /dev/null @@ -1,101 +0,0 @@ -package org.xbib.elasticsearch.client.http; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Collection; -import java.util.Collections; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class HttpClientDuplicateIDTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(HttpClientDuplicateIDTests.class.getName()); - - private static final long MAX_ACTIONS = 10L; - - private static final long NUM_ACTIONS = 12345L; - - private TransportAddress httpAddress; - - @Before - public void fetchTransportAddress() { - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); - } - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(Netty4Plugin.class); - } - - @Override - public Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); - } - - private String findHttpAddress() { - return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); - } - - public void testDuplicateDocIDs() throws Exception { - final HttpClient client = ClientBuilder.builder() - //.put(ClientBuilder.MAX_CONCURRENT_REQUESTS, 2) // avoid EsRejectedExecutionException - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put("urls", findHttpAddress()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setTypes("test") - .setQuery(QueryBuilders.matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("hits = {}", hits); - assertTrue(hits < NUM_ACTIONS); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - logger.info("numactions = {}, submitted = {}, succeeded= {}, failed = {}", NUM_ACTIONS, - client.getMetric().getSubmitted().getCount(), - client.getMetric().getSucceeded().getCount(), - client.getMetric().getFailed().getCount()); - assertEquals(NUM_ACTIONS, client.getMetric().getSubmitted().getCount()); - assertEquals(NUM_ACTIONS, client.getMetric().getSucceeded().getCount()); - } - } -} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientReplicaTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientReplicaTests.java deleted file mode 100644 index 32069be..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientReplicaTests.java +++ /dev/null @@ -1,141 +0,0 @@ -package org.xbib.elasticsearch.client.http; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.admin.indices.stats.IndexStats; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.shard.IndexingStats; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.testframework.ESIntegTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Collection; -import java.util.Collections; -import java.util.Map; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) -public class HttpClientReplicaTests extends ESIntegTestCase { - - private static final Logger logger = LogManager.getLogger(HttpClientReplicaTests.class.getName()); - - private TransportAddress httpAddress; - - @Before - public void fetchTransportAddress() { - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); - } - - @Override - protected Collection> nodePlugins() { - return Collections.singletonList(Netty4Plugin.class); - } - - @Override - public Settings nodeSettings(int nodeNumber) { - return Settings.builder() - .put(super.nodeSettings(nodeNumber)) - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); - } - - private String findHttpAddress() { - return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); - } - - public void testReplicaLevel() throws Exception { - - Settings settingsTest1 = Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 2) - .build(); - - Settings settingsTest2 = Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .build(); - - final HttpClient client = ClientBuilder.builder() - .put("urls", findHttpAddress()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - - try { - client.newIndex("test1", settingsTest1, null) - .newIndex("test2", settingsTest2, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 1234; i++) { - client.index("test1", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - for (int i = 0; i < 1234; i++) { - client.index("test2", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("60s"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - logger.info("refreshing"); - client.refreshIndex("test1"); - client.refreshIndex("test2"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test1", "test2") - .setQuery(QueryBuilders.matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("query total hits={}", hits); - assertEquals(2468, hits); - IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), - IndicesStatsAction.INSTANCE) - .all(); - IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); - for (Map.Entry m : response.getIndices().entrySet()) { - IndexStats indexStats = m.getValue(); - CommonStats commonStats = indexStats.getTotal(); - IndexingStats indexingStats = commonStats.getIndexing(); - IndexingStats.Stats stats = indexingStats.getTotal(); - logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); - for (Map.Entry me : indexStats.getIndexShards().entrySet()) { - IndexShardStats indexShardStats = me.getValue(); - CommonStats commonShardStats = indexShardStats.getTotal(); - logger.info("shard {} count = {}", me.getKey(), - commonShardStats.getIndexing().getTotal().getIndexCount()); - } - } - try { - client.deleteIndex("test1") - .deleteIndex("test2"); - } catch (Exception e) { - logger.error("delete index failed, ignored. Reason:", e); - } - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } - -} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientTests.java deleted file mode 100644 index 6672e3c..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientTests.java +++ /dev/null @@ -1,204 +0,0 @@ -package org.xbib.elasticsearch.client.http; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Collection; -import java.util.Collections; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class HttpClientTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(HttpClientTests.class.getName()); - - private static final Long MAX_ACTIONS = 10L; - - private static final Long NUM_ACTIONS = 1234L; - - private TransportAddress httpAddress; - - @Before - public void fetchTransportAddress() { - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); - } - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(Netty4Plugin.class); - } - - @Override - public Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); - } - - private String findHttpAddress() { - return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); - } - - public void testNewIndex() throws Exception { - final HttpClient client = ClientBuilder.builder() - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .put("urls", findHttpAddress()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - client.newIndex("test"); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - /*public void testMapping() throws Exception { - final HttpClient client = ClientBuilder.builder() - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .put("urls", findHttpAddress()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject("test") - .startObject("properties") - .startObject("location") - .field("type", "geo_point") - .endObject() - .endObject() - .endObject() - .endObject(); - client.mapping("test", builder.string()); - client.newIndex("test"); - GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); - GetMappingsResponse getMappingsResponse = - client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); - logger.info("mappings={}", getMappingsResponse.getMappings()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - public void testSingleDoc() throws Exception { - final HttpClient client = ClientBuilder.builder() - .put("urls", findHttpAddress()) - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(30)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - client.newIndex("test"); - client.index("test", "test", "1", false,"{ \"name\" : \"Hello World\"}"); // single doc ingest - client.flushIngest(); - client.waitForResponses(TimeValue.timeValueSeconds(30)); - assertEquals(1, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - public void testRandomDocs() throws Exception { - long numactions = NUM_ACTIONS; - final HttpClient client = ClientBuilder.builder() - .put("urls", findHttpAddress()) - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses(TimeValue.timeValueSeconds(30)); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - logger.info("assuring {} == {}", numactions, client.getMetric().getSucceeded().getCount()); - assertEquals(numactions, client.getMetric().getSucceeded().getCount()); - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - public void testThreadedRandomDocs() throws Exception { - int maxthreads = Runtime.getRuntime().availableProcessors(); - Long maxactions = MAX_ACTIONS; - final Long maxloop = NUM_ACTIONS; - logger.info("max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); - final HttpClient client = ClientBuilder.builder() - .put("urls", findHttpAddress()) - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - try { - client.newIndex("test").startBulk("test", 30 * 1000, 1000); - ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); - final CountDownLatch latch = new CountDownLatch(maxthreads); - for (int i = 0; i < maxthreads; i++) { - executorService.execute(() -> { - for (int i1 = 0; i1 < maxloop; i1++) { - client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - latch.countDown(); - }); - } - logger.info("waiting for max 30 seconds..."); - latch.await(30, TimeUnit.SECONDS); - logger.info("flush..."); - client.flushIngest(); - client.waitForResponses(TimeValue.timeValueSeconds(30)); - logger.info("got all responses, executor service shutdown..."); - executorService.shutdown(); - logger.info("executor service is shut down"); - client.stopBulk("test"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - logger.info("assuring {} == {}", maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setQuery(QueryBuilders.matchAllQuery()) - .setSize(0); - assertEquals(maxthreads * maxloop, - searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); - client.shutdown(); - } - }*/ -} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientUpdateReplicaLevelTests.java b/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientUpdateReplicaLevelTests.java deleted file mode 100644 index d023203..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/HttpClientUpdateReplicaLevelTests.java +++ /dev/null @@ -1,97 +0,0 @@ -package org.xbib.elasticsearch.client.http; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.testframework.ESIntegTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Collection; -import java.util.Collections; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) -public class HttpClientUpdateReplicaLevelTests extends ESIntegTestCase { - - private static final Logger logger = LogManager.getLogger(HttpClientUpdateReplicaLevelTests.class.getName()); - - private TransportAddress httpAddress; - - @Before - public void fetchTransportAddress() { - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - httpAddress = nodeInfo.getHttp().getAddress().publishAddress(); - } - - @Override - protected Collection> nodePlugins() { - return Collections.singletonList(Netty4Plugin.class); - } - - @Override - public Settings nodeSettings(int nodeNumber) { - return Settings.builder() - .put(super.nodeSettings(nodeNumber)) - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .put(NetworkModule.HTTP_TYPE_DEFAULT_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); - } - - private String findHttpAddress() { - return "http://" + httpAddress.address().getHostName() + ":" + httpAddress.address().getPort(); - } - - public void testUpdateReplicaLevel() throws Exception { - - int numberOfShards = 1; - int replicaLevel = 2; - - int shardsAfterReplica; - - Settings settings = Settings.builder() - .put("index.number_of_shards", numberOfShards) - .put("index.number_of_replicas", 0) - .build(); - - final HttpClient client = ClientBuilder.builder() - .put("urls", findHttpAddress()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), HttpClient.class); - - try { - client.newIndex("replicatest", settings, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 12345; i++) { - client.index("replicatest", "replicatest", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); - assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } - -} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/IndexCreationTest.java b/http/src/test/java/org/xbib/elasticsearch/client/http/IndexCreationTest.java deleted file mode 100644 index 436b7c3..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/IndexCreationTest.java +++ /dev/null @@ -1,52 +0,0 @@ -package org.xbib.elasticsearch.client.http; - -import org.junit.Test; -import org.xbib.elasticsearch.client.ClientBuilder; - -import java.util.logging.ConsoleHandler; -import java.util.logging.Handler; -import java.util.logging.Level; -import java.util.logging.LogManager; -import java.util.logging.Logger; -import java.util.logging.SimpleFormatter; - -public class IndexCreationTest { - - private static final Logger logger = Logger.getLogger(IndexCreationTest.class.getName()); - - static { - //System.setProperty("io.netty.leakDetection.level", "paranoid"); - System.setProperty("io.netty.noKeySetOptimization", Boolean.toString(true)); - System.setProperty("log4j2.disable.jmx", Boolean.toString(true)); - - // enforce massive logging to the console - System.setProperty("java.util.logging.SimpleFormatter.format", - "%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS.%1$tL %4$-7s [%3$s] %5$s %6$s%n"); - LogManager.getLogManager().reset(); - Logger rootLogger = LogManager.getLogManager().getLogger(""); - Handler handler = new ConsoleHandler(); - handler.setFormatter(new SimpleFormatter()); - rootLogger.addHandler(handler); - rootLogger.setLevel(Level.ALL); - for (Handler h : rootLogger.getHandlers()) { - handler.setFormatter(new SimpleFormatter()); - h.setLevel(Level.ALL); - } - } - - @Test - public void testNewIndex() throws Exception { - HttpClient client = ClientBuilder.builder() - //.put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .put("urls", "http://localhost:9200") - //.setMetric(new SimpleBulkMetric()) - //.setControl(new SimpleBulkControl()) - .getClient(HttpClient.class); - try { - client.newIndex("demo"); - Thread.sleep(3000L); - } finally { - client.shutdown(); - } - } -} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/TestRunnerThreadsFilter.java b/http/src/test/java/org/xbib/elasticsearch/client/http/TestRunnerThreadsFilter.java deleted file mode 100644 index 15e845e..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/TestRunnerThreadsFilter.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.xbib.elasticsearch.client.http; - -import com.carrotsearch.randomizedtesting.ThreadFilter; - -public class TestRunnerThreadsFilter implements ThreadFilter { - - @Override - public boolean reject(Thread thread) { - return thread.getName().startsWith("ObjectCleanerThread"); - } -} diff --git a/http/src/test/java/org/xbib/elasticsearch/client/http/package-info.java b/http/src/test/java/org/xbib/elasticsearch/client/http/package-info.java deleted file mode 100644 index aea79de..0000000 --- a/http/src/test/java/org/xbib/elasticsearch/client/http/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for testing Elasticsearch node client extras. - */ -package org.xbib.elasticsearch.client.http; diff --git a/node/build.gradle b/node/build.gradle deleted file mode 100644 index 9f401d8..0000000 --- a/node/build.gradle +++ /dev/null @@ -1,65 +0,0 @@ -buildscript { - repositories { - jcenter() - maven { - url 'http://xbib.org/repository' - } - } - dependencies { - classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" - } -} - -apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' - -configurations { - main - tests -} - -dependencies { - compile project(':common') - testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" - testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" -} - -jar { - baseName "${rootProject.name}-node" -} - -/* -task testJar(type: Jar, dependsOn: testClasses) { - baseName = "${project.archivesBaseName}-tests" - from sourceSets.test.output -} -*/ - -artifacts { - main jar - tests testJar - archives sourcesJar, javadocJar -} - -test { - enabled = false - jvmArgs "-javaagent:" + configurations.alpnagent.asPath - systemProperty 'path.home', projectDir.absolutePath - testLogging { - showStandardStreams = true - exceptionFormat = 'full' - } -} - -randomizedTest { - enabled = false -} - - -esTest { - // test with the jars, not the classes, for security manager - // classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files - systemProperty 'tests.security.manager', 'true' - // maybe we like some extra security policy for our code - systemProperty 'tests.security.policy', '/extra-security.policy' -} -esTest.dependsOn jar, testJar diff --git a/node/config/checkstyle/checkstyle.xml b/node/config/checkstyle/checkstyle.xml deleted file mode 100644 index 52fe33c..0000000 --- a/node/config/checkstyle/checkstyle.xml +++ /dev/null @@ -1,323 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/node/src/docs/asciidoc/css/foundation.css b/node/src/docs/asciidoc/css/foundation.css deleted file mode 100644 index 27be611..0000000 --- a/node/src/docs/asciidoc/css/foundation.css +++ /dev/null @@ -1,684 +0,0 @@ -/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ -/* ========================================================================== HTML5 display definitions ========================================================================== */ -/** Correct `block` display not defined in IE 8/9. */ -article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } - -/** Correct `inline-block` display not defined in IE 8/9. */ -audio, canvas, video { display: inline-block; } - -/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ -audio:not([controls]) { display: none; height: 0; } - -/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ -[hidden], template { display: none; } - -script { display: none !important; } - -/* ========================================================================== Base ========================================================================== */ -/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ -html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } - -/** Remove default margin. */ -body { margin: 0; } - -/* ========================================================================== Links ========================================================================== */ -/** Remove the gray background color from active links in IE 10. */ -a { background: transparent; } - -/** Address `outline` inconsistency between Chrome and other browsers. */ -a:focus { outline: thin dotted; } - -/** Improve readability when focused and also mouse hovered in all browsers. */ -a:active, a:hover { outline: 0; } - -/* ========================================================================== Typography ========================================================================== */ -/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ -h1 { font-size: 2em; margin: 0.67em 0; } - -/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ -abbr[title] { border-bottom: 1px dotted; } - -/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ -b, strong { font-weight: bold; } - -/** Address styling not present in Safari 5 and Chrome. */ -dfn { font-style: italic; } - -/** Address differences between Firefox and other browsers. */ -hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } - -/** Address styling not present in IE 8/9. */ -mark { background: #ff0; color: #000; } - -/** Correct font family set oddly in Safari 5 and Chrome. */ -code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } - -/** Improve readability of pre-formatted text in all browsers. */ -pre { white-space: pre-wrap; } - -/** Set consistent quote types. */ -q { quotes: "\201C" "\201D" "\2018" "\2019"; } - -/** Address inconsistent and variable font size in all browsers. */ -small { font-size: 80%; } - -/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ -sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } - -sup { top: -0.5em; } - -sub { bottom: -0.25em; } - -/* ========================================================================== Embedded content ========================================================================== */ -/** Remove border when inside `a` element in IE 8/9. */ -img { border: 0; } - -/** Correct overflow displayed oddly in IE 9. */ -svg:not(:root) { overflow: hidden; } - -/* ========================================================================== Figures ========================================================================== */ -/** Address margin not present in IE 8/9 and Safari 5. */ -figure { margin: 0; } - -/* ========================================================================== Forms ========================================================================== */ -/** Define consistent border, margin, and padding. */ -fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } - -/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ -legend { border: 0; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ -button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } - -/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ -button, input { line-height: normal; } - -/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ -button, select { text-transform: none; } - -/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ -button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } - -/** Re-set default cursor for disabled elements. */ -button[disabled], html input[disabled] { cursor: default; } - -/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ -input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ -input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } - -/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ -input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } - -/** Remove inner padding and border in Firefox 4+. */ -button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } - -/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ -textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } - -/* ========================================================================== Tables ========================================================================== */ -/** Remove most spacing between table cells. */ -table { border-collapse: collapse; border-spacing: 0; } - -meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } - -meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } - -meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } - -*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } - -html, body { font-size: 100%; } - -body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } - -a:hover { cursor: pointer; } - -img, object, embed { max-width: 100%; height: auto; } - -object, embed { height: 100%; } - -img { -ms-interpolation-mode: bicubic; } - -#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } - -.left { float: left !important; } - -.right { float: right !important; } - -.text-left { text-align: left !important; } - -.text-right { text-align: right !important; } - -.text-center { text-align: center !important; } - -.text-justify { text-align: justify !important; } - -.hide { display: none; } - -.antialiased { -webkit-font-smoothing: antialiased; } - -img { display: inline-block; vertical-align: middle; } - -textarea { height: auto; min-height: 50px; } - -select { width: 100%; } - -object, svg { display: inline-block; vertical-align: middle; } - -.center { margin-left: auto; margin-right: auto; } - -.spread { width: 100%; } - -p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } - -.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } - -/* Typography resets */ -div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } - -/* Default Link Styles */ -a { color: #2ba6cb; text-decoration: none; line-height: inherit; } -a:hover, a:focus { color: #2795b6; } -a img { border: none; } - -/* Default paragraph styles */ -p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } -p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } - -/* Default header styles */ -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } -h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } - -h1 { font-size: 2.125em; } - -h2 { font-size: 1.6875em; } - -h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } - -h4 { font-size: 1.125em; } - -h5 { font-size: 1.125em; } - -h6 { font-size: 1em; } - -hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } - -/* Helpful Typography Defaults */ -em, i { font-style: italic; line-height: inherit; } - -strong, b { font-weight: bold; line-height: inherit; } - -small { font-size: 60%; line-height: inherit; } - -code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } - -/* Lists */ -ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } - -ul, ol { margin-left: 1.5em; } -ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } - -/* Unordered Lists */ -ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } -ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } -ul.square { list-style-type: square; } -ul.circle { list-style-type: circle; } -ul.disc { list-style-type: disc; } -ul.no-bullet { list-style: none; } - -/* Ordered Lists */ -ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } - -/* Definition Lists */ -dl dt { margin-bottom: 0.3125em; font-weight: bold; } -dl dd { margin-bottom: 1.25em; } - -/* Abbreviations */ -abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } - -abbr { text-transform: none; } - -/* Blockquotes */ -blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } -blockquote cite { display: block; font-size: 0.8125em; color: #555555; } -blockquote cite:before { content: "\2014 \0020"; } -blockquote cite a, blockquote cite a:visited { color: #555555; } - -blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } - -/* Microformats */ -.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } -.vcard li { margin: 0; display: block; } -.vcard .fn { font-weight: bold; font-size: 0.9375em; } - -.vevent .summary { font-weight: bold; } -.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } - -@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - h1 { font-size: 2.75em; } - h2 { font-size: 2.3125em; } - h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } - h4 { font-size: 1.4375em; } } -/* Tables */ -table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } -table thead, table tfoot { background: whitesmoke; font-weight: bold; } -table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } -table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } -table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } -table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } - -body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } - -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - -.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } -.clearfix:after, .float-group:after { clear: both; } - -*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } -*:not(pre) > code.nobreak { word-wrap: normal; } -*:not(pre) > code.nowrap { white-space: nowrap; } - -pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } - -em em { font-style: normal; } - -strong strong { font-weight: normal; } - -.keyseq { color: #555555; } - -kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } - -.keyseq kbd:first-child { margin-left: 0; } - -.keyseq kbd:last-child { margin-right: 0; } - -.menuseq, .menu { color: #090909; } - -b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } - -b.button:before { content: "["; padding: 0 3px 0 2px; } - -b.button:after { content: "]"; padding: 0 2px 0 3px; } - -#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } -#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } -#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } - -#content { margin-top: 1.25em; } - -#content:before { content: none; } - -#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } -#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } -#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } -#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } -#header .details span:first-child { margin-left: -0.125em; } -#header .details span.email a { color: #6f6f6f; } -#header .details br { display: none; } -#header .details br + span:before { content: "\00a0\2013\00a0"; } -#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } -#header .details br + span#revremark:before { content: "\00a0|\00a0"; } -#header #revnumber { text-transform: capitalize; } -#header #revnumber:after { content: "\00a0"; } - -#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } - -#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } -#toc > ul { margin-left: 0.125em; } -#toc ul.sectlevel0 > li > a { font-style: italic; } -#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } -#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } -#toc li { line-height: 1.3334; margin-top: 0.3334em; } -#toc a { text-decoration: none; } -#toc a:active { text-decoration: underline; } - -#toctitle { color: #6f6f6f; font-size: 1.2em; } - -@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } - body.toc2 { padding-left: 15em; padding-right: 0; } - #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } - #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } - #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } - #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } - #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } - body.toc2.toc-right { padding-left: 0; padding-right: 15em; } - body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } -@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } - #toc.toc2 { width: 20em; } - #toc.toc2 #toctitle { font-size: 1.375em; } - #toc.toc2 > ul { font-size: 0.95em; } - #toc.toc2 ul ul { padding-left: 1.25em; } - body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } -#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -#content #toc > :first-child { margin-top: 0; } -#content #toc > :last-child { margin-bottom: 0; } - -#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } - -#footer-text { color: #dddddd; line-height: 1.44; } - -.sect1 { padding-bottom: 0.625em; } - -@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } -.sect1 + .sect1 { border-top: 1px solid #dddddd; } - -#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } -#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } -#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } -#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } -#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } - -.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } - -.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } - -table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } - -.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } - -table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } - -.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } -.admonitionblock > table td.icon { text-align: center; width: 80px; } -.admonitionblock > table td.icon img { max-width: initial; } -.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } -.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } -.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } - -.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } -.exampleblock > .content > :first-child { margin-top: 0; } -.exampleblock > .content > :last-child { margin-bottom: 0; } - -.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -.sidebarblock > :first-child { margin-top: 0; } -.sidebarblock > :last-child { margin-bottom: 0; } -.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } - -.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } - -.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } -.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } - -.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } -.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } -@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } -@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } - -.literalblock.output pre { color: #eeeeee; background-color: black; } - -.listingblock pre.highlightjs { padding: 0; } -.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } - -.listingblock > .content { position: relative; } - -.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } - -.listingblock:hover code[data-lang]:before { display: block; } - -.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } - -.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } - -table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } - -table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } - -table.pyhltable td.code { padding-left: .75em; padding-right: 0; } - -pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } - -pre.pygments .lineno { display: inline-block; margin-right: .25em; } - -table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } - -.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } -.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } -.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } -.quoteblock blockquote { margin: 0; padding: 0; border: 0; } -.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } -.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } -.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } -.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } -.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } -.quoteblock .quoteblock blockquote:before { display: none; } - -.verseblock { margin: 0 1em 1.25em 1em; } -.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } -.verseblock pre strong { font-weight: 400; } -.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } - -.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } -.quoteblock .attribution br, .verseblock .attribution br { display: none; } -.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } - -.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } -.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } -.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } - -table.tableblock { max-width: 100%; border-collapse: separate; } -table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } - -table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } - -table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } - -table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } - -table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } - -table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } - -table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } - -table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } - -table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } - -table.frame-all { border-width: 1px; } - -table.frame-sides { border-width: 0 1px; } - -table.frame-topbot { border-width: 1px 0; } - -th.halign-left, td.halign-left { text-align: left; } - -th.halign-right, td.halign-right { text-align: right; } - -th.halign-center, td.halign-center { text-align: center; } - -th.valign-top, td.valign-top { vertical-align: top; } - -th.valign-bottom, td.valign-bottom { vertical-align: bottom; } - -th.valign-middle, td.valign-middle { vertical-align: middle; } - -table thead th, table tfoot th { font-weight: bold; } - -tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } - -tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } - -p.tableblock > code:only-child { background: none; padding: 0; } - -p.tableblock { font-size: 1em; } - -td > div.verse { white-space: pre; } - -ol { margin-left: 1.75em; } - -ul li ol { margin-left: 1.5em; } - -dl dd { margin-left: 1.125em; } - -dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } - -ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } - -ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } - -ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } - -ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } - -ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } - -ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } -ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } -ul.inline > li > * { display: block; } - -.unstyled dl dt { font-weight: normal; font-style: normal; } - -ol.arabic { list-style-type: decimal; } - -ol.decimal { list-style-type: decimal-leading-zero; } - -ol.loweralpha { list-style-type: lower-alpha; } - -ol.upperalpha { list-style-type: upper-alpha; } - -ol.lowerroman { list-style-type: lower-roman; } - -ol.upperroman { list-style-type: upper-roman; } - -ol.lowergreek { list-style-type: lower-greek; } - -.hdlist > table, .colist > table { border: 0; background: none; } -.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } - -td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } - -td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } - -.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } - -.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } -.colist > table tr > td:first-of-type img { max-width: initial; } -.colist > table tr > td:last-of-type { padding: 0.25em 0; } - -.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } - -.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } -.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } -.imageblock > .title { margin-bottom: 0; } -.imageblock.thumb, .imageblock.th { border-width: 6px; } -.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } - -.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } -.image.left { margin-right: 0.625em; } -.image.right { margin-left: 0.625em; } - -a.image { text-decoration: none; display: inline-block; } -a.image object { pointer-events: none; } - -sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } -sup.footnote a, sup.footnoteref a { text-decoration: none; } -sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } - -#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } -#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } -#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } -#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } -#footnotes .footnote:last-of-type { margin-bottom: 0; } -#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } - -.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } -.gist .file-data > table td.line-data { width: 99%; } - -div.unbreakable { page-break-inside: avoid; } - -.big { font-size: larger; } - -.small { font-size: smaller; } - -.underline { text-decoration: underline; } - -.overline { text-decoration: overline; } - -.line-through { text-decoration: line-through; } - -.aqua { color: #00bfbf; } - -.aqua-background { background-color: #00fafa; } - -.black { color: black; } - -.black-background { background-color: black; } - -.blue { color: #0000bf; } - -.blue-background { background-color: #0000fa; } - -.fuchsia { color: #bf00bf; } - -.fuchsia-background { background-color: #fa00fa; } - -.gray { color: #606060; } - -.gray-background { background-color: #7d7d7d; } - -.green { color: #006000; } - -.green-background { background-color: #007d00; } - -.lime { color: #00bf00; } - -.lime-background { background-color: #00fa00; } - -.maroon { color: #600000; } - -.maroon-background { background-color: #7d0000; } - -.navy { color: #000060; } - -.navy-background { background-color: #00007d; } - -.olive { color: #606000; } - -.olive-background { background-color: #7d7d00; } - -.purple { color: #600060; } - -.purple-background { background-color: #7d007d; } - -.red { color: #bf0000; } - -.red-background { background-color: #fa0000; } - -.silver { color: #909090; } - -.silver-background { background-color: #bcbcbc; } - -.teal { color: #006060; } - -.teal-background { background-color: #007d7d; } - -.white { color: #bfbfbf; } - -.white-background { background-color: #fafafa; } - -.yellow { color: #bfbf00; } - -.yellow-background { background-color: #fafa00; } - -span.icon > .fa { cursor: default; } - -.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } -.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } -.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } -.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } -.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } -.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } - -.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } -.conum[data-value] * { color: #fff !important; } -.conum[data-value] + b { display: none; } -.conum[data-value]:after { content: attr(data-value); } -pre .conum[data-value] { position: relative; top: -0.125em; } - -b.conum * { color: inherit !important; } - -.conum:not([data-value]):empty { display: none; } - -.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/node/src/docs/asciidoclet/overview.adoc b/node/src/docs/asciidoclet/overview.adoc deleted file mode 100644 index 7947331..0000000 --- a/node/src/docs/asciidoclet/overview.adoc +++ /dev/null @@ -1,4 +0,0 @@ -= Elasticsearch Java client -Jörg Prante -Version 5.4.0.0 - diff --git a/node/src/main/java/org/xbib/elasticsearch/client/node/NodeBulkClient.java b/node/src/main/java/org/xbib/elasticsearch/client/node/NodeBulkClient.java deleted file mode 100644 index 103c1be..0000000 --- a/node/src/main/java/org/xbib/elasticsearch/client/node/NodeBulkClient.java +++ /dev/null @@ -1,79 +0,0 @@ -package org.xbib.elasticsearch.client.node; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeValidationException; -import org.elasticsearch.plugins.Plugin; -import org.xbib.elasticsearch.client.AbstractClient; -import org.xbib.elasticsearch.client.BulkControl; -import org.xbib.elasticsearch.client.BulkMetric; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; - -/** - * - */ -public class NodeBulkClient extends AbstractClient { - - private static final Logger logger = LogManager.getLogger(NodeBulkClient.class.getName()); - - private Node node; - - public NodeBulkClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { - super.init(client, settings, metric, control); - return this; - } - - @Override - protected ElasticsearchClient createClient(Settings settings) throws IOException { - if (settings != null) { - String version = System.getProperty("os.name") - + " " + System.getProperty("java.vm.name") - + " " + System.getProperty("java.vm.vendor") - + " " + System.getProperty("java.runtime.version") - + " " + System.getProperty("java.vm.version"); - Settings effectiveSettings = Settings.builder().put(settings) - .put("node.client", true) - .put("node.master", false) - .put("node.data", false) - .build(); - logger.info("creating node client on {} with effective settings {}", - version, effectiveSettings.toString()); - Collection> plugins = Collections.emptyList(); - this.node = new BulkNode(new Environment(effectiveSettings, null), plugins); - try { - node.start(); - } catch (NodeValidationException e) { - throw new IOException(e); - } - return node.client(); - } - return null; - } - - @Override - public synchronized void shutdown() throws IOException { - super.shutdown(); - try { - if (node != null) { - logger.debug("closing node..."); - node.close(); - } - } catch (Exception e) { - logger.error(e.getMessage(), e); - } - } - - private static class BulkNode extends Node { - - BulkNode(Environment env, Collection> classpathPlugins) { - super(env, classpathPlugins); - } - } -} diff --git a/node/src/main/java/org/xbib/elasticsearch/client/node/package-info.java b/node/src/main/java/org/xbib/elasticsearch/client/node/package-info.java deleted file mode 100644 index 08795e8..0000000 --- a/node/src/main/java/org/xbib/elasticsearch/client/node/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for Elasticsearch node client extras. - */ -package org.xbib.elasticsearch.client.node; diff --git a/node/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods b/node/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods deleted file mode 100644 index 631ddb7..0000000 --- a/node/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods +++ /dev/null @@ -1 +0,0 @@ -org.xbib.elasticsearch.client.node.NodeBulkClient \ No newline at end of file diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientDuplicateIDTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientDuplicateIDTests.java deleted file mode 100644 index aceb7ac..0000000 --- a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientDuplicateIDTests.java +++ /dev/null @@ -1,63 +0,0 @@ -package org.xbib.elasticsearch.client.node; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class NodeBulkClientDuplicateIDTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(NodeBulkClientDuplicateIDTests.class.getName()); - - private static final long MAX_ACTIONS = 100L; - - private static final long NUM_ACTIONS = 12345L; - - public void testDuplicateDocIDs() throws Exception { - final NodeBulkClient client = ClientBuilder.builder() - //.put(ClientBuilder.MAX_CONCURRENT_REQUESTS, 2) // avoid EsRejectedExecutionException - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setTypes("test") - .setQuery(matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("hits = {}", hits); - assertTrue(hits < NUM_ACTIONS); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - logger.info("numactions = {}, submitted = {}, succeeded= {}, failed = {}", NUM_ACTIONS, - client.getMetric().getSubmitted().getCount(), - client.getMetric().getSucceeded().getCount(), - client.getMetric().getFailed().getCount()); - assertEquals(NUM_ACTIONS, client.getMetric().getSubmitted().getCount()); - assertEquals(NUM_ACTIONS, client.getMetric().getSucceeded().getCount()); - } - } -} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientIndexAliasTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientIndexAliasTests.java deleted file mode 100644 index 603c34a..0000000 --- a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientIndexAliasTests.java +++ /dev/null @@ -1,71 +0,0 @@ -package org.xbib.elasticsearch.client.node; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.IndexAliasAdder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class NodeBulkClientIndexAliasTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(NodeBulkClientIndexAliasTests.class.getName()); - - public void testIndexAlias() throws Exception { - final NodeBulkClient client = ClientBuilder.builder() - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - try { - client.newIndex("test1234"); - for (int i = 0; i < 1; i++) { - client.index("test1234", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.refreshIndex("test1234"); - - List simpleAliases = Arrays.asList("a", "b", "c"); - client.switchAliases("test", "test1234", simpleAliases); - - client.newIndex("test5678"); - for (int i = 0; i < 1; i++) { - client.index("test5678", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.refreshIndex("test5678"); - - simpleAliases = Arrays.asList("d", "e", "f"); - client.switchAliases("test", "test5678", simpleAliases, new IndexAliasAdder() { - @Override - public void addIndexAlias(IndicesAliasesRequestBuilder builder, String index, String alias) { - builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)); - } - }); - Map aliases = client.getIndexFilters("test5678"); - logger.info("aliases of index test5678 = {}", aliases); - - aliases = client.getAliasFilters("test"); - logger.info("aliases of alias test = {}", aliases); - - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.waitForResponses("30s"); - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } -} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientReplicaTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientReplicaTests.java deleted file mode 100644 index 3f167d2..0000000 --- a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientReplicaTests.java +++ /dev/null @@ -1,103 +0,0 @@ -package org.xbib.elasticsearch.client.node; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.admin.indices.stats.IndexStats; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.shard.IndexingStats; -import org.elasticsearch.testframework.ESIntegTestCase; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Map; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) -public class NodeBulkClientReplicaTests extends ESIntegTestCase { - - private static final Logger logger = LogManager.getLogger(NodeBulkClientReplicaTests.class.getName()); - - public void testReplicaLevel() throws Exception { - - Settings settingsTest1 = Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 2) - .build(); - - Settings settingsTest2 = Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .build(); - - final NodeBulkClient client = ClientBuilder.builder() - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - - try { - client.newIndex("test1", settingsTest1, null) - .newIndex("test2", settingsTest2, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 1234; i++) { - client.index("test1", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - for (int i = 0; i < 1234; i++) { - client.index("test2", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("60s"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - logger.info("refreshing"); - client.refreshIndex("test1"); - client.refreshIndex("test2"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test1", "test2") - .setQuery(QueryBuilders.matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("query total hits={}", hits); - assertEquals(2468, hits); - IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), - IndicesStatsAction.INSTANCE) - .all(); - IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); - for (Map.Entry m : response.getIndices().entrySet()) { - IndexStats indexStats = m.getValue(); - CommonStats commonStats = indexStats.getTotal(); - IndexingStats indexingStats = commonStats.getIndexing(); - IndexingStats.Stats stats = indexingStats.getTotal(); - logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); - for (Map.Entry me : indexStats.getIndexShards().entrySet()) { - IndexShardStats indexShardStats = me.getValue(); - CommonStats commonShardStats = indexShardStats.getTotal(); - logger.info("shard {} count = {}", me.getKey(), - commonShardStats.getIndexing().getTotal().getIndexCount()); - } - } - try { - client.deleteIndex("test1") - .deleteIndex("test2"); - } catch (Exception e) { - logger.error("delete index failed, ignored. Reason:", e); - } - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } - -} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientTests.java deleted file mode 100644 index 801016c..0000000 --- a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientTests.java +++ /dev/null @@ -1,177 +0,0 @@ -package org.xbib.elasticsearch.client.node; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class NodeBulkClientTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(NodeBulkClientTests.class.getName()); - - private static final Long MAX_ACTIONS = 10L; - - private static final Long NUM_ACTIONS = 1234L; - - public void testNewIndexNodeClient() throws Exception { - final NodeBulkClient client = ClientBuilder.builder() - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - client.newIndex("test"); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - public void testBulkNodeClientMapping() throws Exception { - final NodeBulkClient client = ClientBuilder.builder() - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject("test") - .startObject("properties") - .startObject("location") - .field("type", "geo_point") - .endObject() - .endObject() - .endObject() - .endObject(); - client.mapping("test", Strings.toString(builder)); - client.newIndex("test"); - GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); - GetMappingsResponse getMappingsResponse = - client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); - logger.info("mappings={}", getMappingsResponse.getMappings()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - public void testBulkNodeClientSingleDoc() throws Exception { - final NodeBulkClient client = ClientBuilder.builder() - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(30)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - client.newIndex("test"); - client.index("test", "test", "1", false, "{ \"name\" : \"Hello World\"}"); // single doc ingest - client.flushIngest(); - client.waitForResponses("30s"); - assertEquals(1, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - public void testBulkNodeClientRandomDocs() throws Exception { - long numactions = NUM_ACTIONS; - final NodeBulkClient client = ClientBuilder.builder() - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - logger.info("assuring {} == {}", numactions, client.getMetric().getSucceeded().getCount()); - assertEquals(numactions, client.getMetric().getSucceeded().getCount()); - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - public void testBulkNodeClientThreadedRandomDocs() throws Exception { - int maxthreads = Runtime.getRuntime().availableProcessors(); - Long maxactions = MAX_ACTIONS; - final Long maxloop = NUM_ACTIONS; - logger.info("NodeClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); - final NodeBulkClient client = ClientBuilder.builder() - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - try { - client.newIndex("test").startBulk("test", 30 * 1000, 1000); - ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); - final CountDownLatch latch = new CountDownLatch(maxthreads); - for (int i = 0; i < maxthreads; i++) { - executorService.execute(() -> { - for (int i1 = 0; i1 < maxloop; i1++) { - client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - latch.countDown(); - }); - } - logger.info("waiting for max 30 seconds..."); - latch.await(30, TimeUnit.SECONDS); - logger.info("flush..."); - client.flushIngest(); - client.waitForResponses("30s"); - logger.info("got all responses, executor service shutdown..."); - executorService.shutdown(); - logger.info("executor service is shut down"); - client.stopBulk("test"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - logger.info("assuring {} == {}", maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setQuery(QueryBuilders.matchAllQuery()) - .setSize(0); - assertEquals(maxthreads * maxloop, - searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); - client.shutdown(); - } - } -} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientUpdateReplicaLevelTests.java b/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientUpdateReplicaLevelTests.java deleted file mode 100644 index 58c1f4e..0000000 --- a/node/src/test/java/org/xbib/elasticsearch/client/node/NodeBulkClientUpdateReplicaLevelTests.java +++ /dev/null @@ -1,56 +0,0 @@ -package org.xbib.elasticsearch.client.node; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.testframework.ESIntegTestCase; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) -public class NodeBulkClientUpdateReplicaLevelTests extends ESIntegTestCase { - - private static final Logger logger = LogManager.getLogger(NodeBulkClientUpdateReplicaLevelTests.class.getName()); - - public void testUpdateReplicaLevel() throws Exception { - - int numberOfShards = 1; - int replicaLevel = 2; - - int shardsAfterReplica; - - Settings settings = Settings.builder() - .put("index.number_of_shards", numberOfShards) - .put("index.number_of_replicas", 0) - .build(); - - final NodeBulkClient client = ClientBuilder.builder() - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(client(), NodeBulkClient.class); - - try { - client.newIndex("replicatest", settings, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 12345; i++) { - client.index("replicatest", "replicatest", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - shardsAfterReplica = client.updateReplicaLevel("replicatest", replicaLevel); - assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1)); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } -} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/TestRunnerThreadsFilter.java b/node/src/test/java/org/xbib/elasticsearch/client/node/TestRunnerThreadsFilter.java deleted file mode 100644 index 6d0252d..0000000 --- a/node/src/test/java/org/xbib/elasticsearch/client/node/TestRunnerThreadsFilter.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.xbib.elasticsearch.client.node; - -import com.carrotsearch.randomizedtesting.ThreadFilter; - -public class TestRunnerThreadsFilter implements ThreadFilter { - - @Override - public boolean reject(Thread thread) { - return thread.getName().startsWith("ObjectCleanerThread"); - } -} diff --git a/node/src/test/java/org/xbib/elasticsearch/client/node/package-info.java b/node/src/test/java/org/xbib/elasticsearch/client/node/package-info.java deleted file mode 100644 index f0ef244..0000000 --- a/node/src/test/java/org/xbib/elasticsearch/client/node/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for testing Elasticsearch node client extras. - */ -package org.xbib.elasticsearch.client.node; diff --git a/node/src/test/resources/org/xbib/elasticsearch/client/node/settings.json b/node/src/test/resources/org/xbib/elasticsearch/client/node/settings.json deleted file mode 100644 index 86f5118..0000000 --- a/node/src/test/resources/org/xbib/elasticsearch/client/node/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "index.analysis.analyzer.default.type" : "keyword" -} diff --git a/settings.gradle b/settings.gradle index cc4c55d..57d6828 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,5 +1,5 @@ -include 'api' -include 'common' -include 'node' -include 'transport' -include 'http' +include 'elx-api' +include 'elx-common' +include 'elx-node' +include 'elx-transport' +include 'elx-http' diff --git a/transport/build.gradle b/transport/build.gradle deleted file mode 100644 index 2d2eb74..0000000 --- a/transport/build.gradle +++ /dev/null @@ -1,66 +0,0 @@ -buildscript { - repositories { - jcenter() - maven { - url 'http://xbib.org/repository' - } - } - dependencies { - classpath "org.xbib.elasticsearch:gradle-plugin-elasticsearch-build:6.3.2.4" - } -} - -apply plugin: 'org.xbib.gradle.plugin.elasticsearch.build' - -configurations { - main - tests -} - -dependencies { - compile project(':common') - compile "org.xbib.elasticsearch:transport-netty4:${project.property('elasticsearch-server.version')}" - testCompile "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" - testRuntime "org.xbib.elasticsearch:elasticsearch-test-framework:${project.property('elasticsearch-devkit.version')}" -} - -jar { - baseName "${rootProject.name}-transport" -} - -/* -task testJar(type: Jar, dependsOn: testClasses) { - baseName = "${project.archivesBaseName}-tests" - from sourceSets.test.output -} -*/ - -artifacts { - main jar - tests testJar - archives sourcesJar, javadocJar -} - -esTest { - enabled = true - // test with the jars, not the classes, for security manager - //classpath = files(configurations.testRuntime) + configurations.main.artifacts.files + configurations.tests.artifacts.files - systemProperty 'tests.security.manager', 'true' - // maybe we like some extra security policy for our code - systemProperty 'tests.security.policy', '/extra-security.policy' -} -esTest.dependsOn jar, testJar - -randomizedTest { - enabled = false -} - -test { - enabled = false - //jvmArgs "-javaagent:" + configurations.alpnagent.asPath - systemProperty 'path.home', projectDir.absolutePath - testLogging { - showStandardStreams = true - exceptionFormat = 'full' - } -} diff --git a/transport/config/checkstyle/checkstyle.xml b/transport/config/checkstyle/checkstyle.xml deleted file mode 100644 index 52fe33c..0000000 --- a/transport/config/checkstyle/checkstyle.xml +++ /dev/null @@ -1,323 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/transport/src/docs/asciidoc/css/foundation.css b/transport/src/docs/asciidoc/css/foundation.css deleted file mode 100644 index 27be611..0000000 --- a/transport/src/docs/asciidoc/css/foundation.css +++ /dev/null @@ -1,684 +0,0 @@ -/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ -/* ========================================================================== HTML5 display definitions ========================================================================== */ -/** Correct `block` display not defined in IE 8/9. */ -article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { display: block; } - -/** Correct `inline-block` display not defined in IE 8/9. */ -audio, canvas, video { display: inline-block; } - -/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ -audio:not([controls]) { display: none; height: 0; } - -/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ -[hidden], template { display: none; } - -script { display: none !important; } - -/* ========================================================================== Base ========================================================================== */ -/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ -html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } - -/** Remove default margin. */ -body { margin: 0; } - -/* ========================================================================== Links ========================================================================== */ -/** Remove the gray background color from active links in IE 10. */ -a { background: transparent; } - -/** Address `outline` inconsistency between Chrome and other browsers. */ -a:focus { outline: thin dotted; } - -/** Improve readability when focused and also mouse hovered in all browsers. */ -a:active, a:hover { outline: 0; } - -/* ========================================================================== Typography ========================================================================== */ -/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ -h1 { font-size: 2em; margin: 0.67em 0; } - -/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ -abbr[title] { border-bottom: 1px dotted; } - -/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ -b, strong { font-weight: bold; } - -/** Address styling not present in Safari 5 and Chrome. */ -dfn { font-style: italic; } - -/** Address differences between Firefox and other browsers. */ -hr { -moz-box-sizing: content-box; box-sizing: content-box; height: 0; } - -/** Address styling not present in IE 8/9. */ -mark { background: #ff0; color: #000; } - -/** Correct font family set oddly in Safari 5 and Chrome. */ -code, kbd, pre, samp { font-family: monospace, serif; font-size: 1em; } - -/** Improve readability of pre-formatted text in all browsers. */ -pre { white-space: pre-wrap; } - -/** Set consistent quote types. */ -q { quotes: "\201C" "\201D" "\2018" "\2019"; } - -/** Address inconsistent and variable font size in all browsers. */ -small { font-size: 80%; } - -/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ -sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } - -sup { top: -0.5em; } - -sub { bottom: -0.25em; } - -/* ========================================================================== Embedded content ========================================================================== */ -/** Remove border when inside `a` element in IE 8/9. */ -img { border: 0; } - -/** Correct overflow displayed oddly in IE 9. */ -svg:not(:root) { overflow: hidden; } - -/* ========================================================================== Figures ========================================================================== */ -/** Address margin not present in IE 8/9 and Safari 5. */ -figure { margin: 0; } - -/* ========================================================================== Forms ========================================================================== */ -/** Define consistent border, margin, and padding. */ -fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } - -/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ -legend { border: 0; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ -button, input, select, textarea { font-family: inherit; /* 1 */ font-size: 100%; /* 2 */ margin: 0; /* 3 */ } - -/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ -button, input { line-height: normal; } - -/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ -button, select { text-transform: none; } - -/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ -button, html input[type="button"], input[type="reset"], input[type="submit"] { -webkit-appearance: button; /* 2 */ cursor: pointer; /* 3 */ } - -/** Re-set default cursor for disabled elements. */ -button[disabled], html input[disabled] { cursor: default; } - -/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ -input[type="checkbox"], input[type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } - -/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ -input[type="search"] { -webkit-appearance: textfield; /* 1 */ -moz-box-sizing: content-box; -webkit-box-sizing: content-box; /* 2 */ box-sizing: content-box; } - -/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ -input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; } - -/** Remove inner padding and border in Firefox 4+. */ -button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } - -/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ -textarea { overflow: auto; /* 1 */ vertical-align: top; /* 2 */ } - -/* ========================================================================== Tables ========================================================================== */ -/** Remove most spacing between table cells. */ -table { border-collapse: collapse; border-spacing: 0; } - -meta.foundation-mq-small { font-family: "only screen and (min-width: 768px)"; width: 768px; } - -meta.foundation-mq-medium { font-family: "only screen and (min-width:1280px)"; width: 1280px; } - -meta.foundation-mq-large { font-family: "only screen and (min-width:1440px)"; width: 1440px; } - -*, *:before, *:after { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } - -html, body { font-size: 100%; } - -body { background: white; color: #222222; padding: 0; margin: 0; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: normal; font-style: normal; line-height: 1; position: relative; cursor: auto; } - -a:hover { cursor: pointer; } - -img, object, embed { max-width: 100%; height: auto; } - -object, embed { height: 100%; } - -img { -ms-interpolation-mode: bicubic; } - -#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { max-width: none !important; } - -.left { float: left !important; } - -.right { float: right !important; } - -.text-left { text-align: left !important; } - -.text-right { text-align: right !important; } - -.text-center { text-align: center !important; } - -.text-justify { text-align: justify !important; } - -.hide { display: none; } - -.antialiased { -webkit-font-smoothing: antialiased; } - -img { display: inline-block; vertical-align: middle; } - -textarea { height: auto; min-height: 50px; } - -select { width: 100%; } - -object, svg { display: inline-block; vertical-align: middle; } - -.center { margin-left: auto; margin-right: auto; } - -.spread { width: 100%; } - -p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { font-size: 1.21875em; line-height: 1.6; } - -.subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.4; color: #6f6f6f; font-weight: 300; margin-top: 0.2em; margin-bottom: 0.5em; } - -/* Typography resets */ -div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; direction: ltr; } - -/* Default Link Styles */ -a { color: #2ba6cb; text-decoration: none; line-height: inherit; } -a:hover, a:focus { color: #2795b6; } -a img { border: none; } - -/* Default paragraph styles */ -p { font-family: inherit; font-weight: normal; font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } -p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } - -/* Default header styles */ -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; font-weight: bold; font-style: normal; color: #222222; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.2125em; } -h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #6f6f6f; line-height: 0; } - -h1 { font-size: 2.125em; } - -h2 { font-size: 1.6875em; } - -h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } - -h4 { font-size: 1.125em; } - -h5 { font-size: 1.125em; } - -h6 { font-size: 1em; } - -hr { border: solid #dddddd; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; height: 0; } - -/* Helpful Typography Defaults */ -em, i { font-style: italic; line-height: inherit; } - -strong, b { font-weight: bold; line-height: inherit; } - -small { font-size: 60%; line-height: inherit; } - -code { font-family: Consolas, "Liberation Mono", Courier, monospace; font-weight: bold; color: #7f0a0c; } - -/* Lists */ -ul, ol, dl { font-size: 1em; line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } - -ul, ol { margin-left: 1.5em; } -ul.no-bullet, ol.no-bullet { margin-left: 1.5em; } - -/* Unordered Lists */ -ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; font-size: 1em; /* Override nested font-size change */ } -ul.square li ul, ul.circle li ul, ul.disc li ul { list-style: inherit; } -ul.square { list-style-type: square; } -ul.circle { list-style-type: circle; } -ul.disc { list-style-type: disc; } -ul.no-bullet { list-style: none; } - -/* Ordered Lists */ -ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } - -/* Definition Lists */ -dl dt { margin-bottom: 0.3125em; font-weight: bold; } -dl dd { margin-bottom: 1.25em; } - -/* Abbreviations */ -abbr, acronym { text-transform: uppercase; font-size: 90%; color: #222222; border-bottom: 1px dotted #dddddd; cursor: help; } - -abbr { text-transform: none; } - -/* Blockquotes */ -blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #dddddd; } -blockquote cite { display: block; font-size: 0.8125em; color: #555555; } -blockquote cite:before { content: "\2014 \0020"; } -blockquote cite a, blockquote cite a:visited { color: #555555; } - -blockquote, blockquote p { line-height: 1.6; color: #6f6f6f; } - -/* Microformats */ -.vcard { display: inline-block; margin: 0 0 1.25em 0; border: 1px solid #dddddd; padding: 0.625em 0.75em; } -.vcard li { margin: 0; display: block; } -.vcard .fn { font-weight: bold; font-size: 0.9375em; } - -.vevent .summary { font-weight: bold; } -.vevent abbr { cursor: auto; text-decoration: none; font-weight: bold; border: none; padding: 0 0.0625em; } - -@media only screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - h1 { font-size: 2.75em; } - h2 { font-size: 2.3125em; } - h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } - h4 { font-size: 1.4375em; } } -/* Tables */ -table { background: white; margin-bottom: 1.25em; border: solid 1px #dddddd; } -table thead, table tfoot { background: whitesmoke; font-weight: bold; } -table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: #222222; text-align: left; } -table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: #222222; } -table tr.even, table tr.alt, table tr:nth-of-type(even) { background: #f9f9f9; } -table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { display: table-cell; line-height: 1.4; } - -body { -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; tab-size: 4; } - -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.4; } - -.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { content: " "; display: table; } -.clearfix:after, .float-group:after { clear: both; } - -*:not(pre) > code { font-size: inherit; font-style: normal !important; letter-spacing: 0; padding: 0; line-height: inherit; word-wrap: break-word; } -*:not(pre) > code.nobreak { word-wrap: normal; } -*:not(pre) > code.nowrap { white-space: nowrap; } - -pre, pre > code { line-height: 1.4; color: black; font-family: monospace, serif; font-weight: normal; } - -em em { font-style: normal; } - -strong strong { font-weight: normal; } - -.keyseq { color: #555555; } - -kbd { font-family: Consolas, "Liberation Mono", Courier, monospace; display: inline-block; color: #222222; font-size: 0.65em; line-height: 1.45; background-color: #f7f7f7; border: 1px solid #ccc; -webkit-border-radius: 3px; border-radius: 3px; -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em white inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } - -.keyseq kbd:first-child { margin-left: 0; } - -.keyseq kbd:last-child { margin-right: 0; } - -.menuseq, .menu { color: #090909; } - -b.button:before, b.button:after { position: relative; top: -1px; font-weight: normal; } - -b.button:before { content: "["; padding: 0 3px 0 2px; } - -b.button:after { content: "]"; padding: 0 2px 0 3px; } - -#header, #content, #footnotes, #footer { width: 100%; margin-left: auto; margin-right: auto; margin-top: 0; margin-bottom: 0; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } -#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { content: " "; display: table; } -#header:after, #content:after, #footnotes:after, #footer:after { clear: both; } - -#content { margin-top: 1.25em; } - -#content:before { content: none; } - -#header > h1:first-child { color: black; margin-top: 2.25rem; margin-bottom: 0; } -#header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddd; } -#header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddd; padding-bottom: 8px; } -#header .details { border-bottom: 1px solid #dddddd; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: #555555; display: -ms-flexbox; display: -webkit-flex; display: flex; -ms-flex-flow: row wrap; -webkit-flex-flow: row wrap; flex-flow: row wrap; } -#header .details span:first-child { margin-left: -0.125em; } -#header .details span.email a { color: #6f6f6f; } -#header .details br { display: none; } -#header .details br + span:before { content: "\00a0\2013\00a0"; } -#header .details br + span.author:before { content: "\00a0\22c5\00a0"; color: #6f6f6f; } -#header .details br + span#revremark:before { content: "\00a0|\00a0"; } -#header #revnumber { text-transform: capitalize; } -#header #revnumber:after { content: "\00a0"; } - -#content > h1:first-child:not([class]) { color: black; border-bottom: 1px solid #dddddd; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } - -#toc { border-bottom: 1px solid #dddddd; padding-bottom: 0.5em; } -#toc > ul { margin-left: 0.125em; } -#toc ul.sectlevel0 > li > a { font-style: italic; } -#toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } -#toc ul { font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; list-style-type: none; } -#toc li { line-height: 1.3334; margin-top: 0.3334em; } -#toc a { text-decoration: none; } -#toc a:active { text-decoration: underline; } - -#toctitle { color: #6f6f6f; font-size: 1.2em; } - -@media only screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } - body.toc2 { padding-left: 15em; padding-right: 0; } - #toc.toc2 { margin-top: 0 !important; background-color: #f2f2f2; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #dddddd; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } - #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } - #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } - #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } - #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } - body.toc2.toc-right { padding-left: 0; padding-right: 15em; } - body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #dddddd; left: auto; right: 0; } } -@media only screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } - #toc.toc2 { width: 20em; } - #toc.toc2 #toctitle { font-size: 1.375em; } - #toc.toc2 > ul { font-size: 0.95em; } - #toc.toc2 ul ul { padding-left: 1.25em; } - body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } -#content #toc { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -#content #toc > :first-child { margin-top: 0; } -#content #toc > :last-child { margin-bottom: 0; } - -#footer { max-width: 100%; background-color: #222222; padding: 1.25em; } - -#footer-text { color: #dddddd; line-height: 1.44; } - -.sect1 { padding-bottom: 0.625em; } - -@media only screen and (min-width: 768px) { .sect1 { padding-bottom: 1.25em; } } -.sect1 + .sect1 { border-top: 1px solid #dddddd; } - -#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: normal; } -#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } -#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } -#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #222222; text-decoration: none; } -#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #151515; } - -.audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } - -.admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; } - -table.tableblock > caption.title { white-space: nowrap; overflow: visible; max-width: 0; } - -.paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { color: black; } - -table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { font-size: inherit; } - -.admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } -.admonitionblock > table td.icon { text-align: center; width: 80px; } -.admonitionblock > table td.icon img { max-width: initial; } -.admonitionblock > table td.icon .title { font-weight: bold; font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; text-transform: uppercase; } -.admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddd; color: #555555; } -.admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } - -.exampleblock > .content { border-style: solid; border-width: 1px; border-color: #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: white; -webkit-border-radius: 0; border-radius: 0; } -.exampleblock > .content > :first-child { margin-top: 0; } -.exampleblock > .content > :last-child { margin-bottom: 0; } - -.sidebarblock { border-style: solid; border-width: 1px; border-color: #d9d9d9; margin-bottom: 1.25em; padding: 1.25em; background: #f2f2f2; -webkit-border-radius: 0; border-radius: 0; } -.sidebarblock > :first-child { margin-top: 0; } -.sidebarblock > :last-child { margin-bottom: 0; } -.sidebarblock > .content > .title { color: #6f6f6f; margin-top: 0; } - -.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } - -.literalblock pre, .listingblock pre:not(.highlight), .listingblock pre[class="highlight"], .listingblock pre[class^="highlight "], .listingblock pre.CodeRay, .listingblock pre.prettyprint { background: #eeeeee; } -.sidebarblock .literalblock pre, .sidebarblock .listingblock pre:not(.highlight), .sidebarblock .listingblock pre[class="highlight"], .sidebarblock .listingblock pre[class^="highlight "], .sidebarblock .listingblock pre.CodeRay, .sidebarblock .listingblock pre.prettyprint { background: #f2f1f1; } - -.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { border: 1px solid #cccccc; -webkit-border-radius: 0; border-radius: 0; word-wrap: break-word; padding: 0.8em 0.8em 0.65em 0.8em; font-size: 0.8125em; } -.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { overflow-x: auto; white-space: pre; word-wrap: normal; } -@media only screen and (min-width: 768px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 0.90625em; } } -@media only screen and (min-width: 1280px) { .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { font-size: 1em; } } - -.literalblock.output pre { color: #eeeeee; background-color: black; } - -.listingblock pre.highlightjs { padding: 0; } -.listingblock pre.highlightjs > code { padding: 0.8em 0.8em 0.65em 0.8em; -webkit-border-radius: 0; border-radius: 0; } - -.listingblock > .content { position: relative; } - -.listingblock code[data-lang]:before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: #999; } - -.listingblock:hover code[data-lang]:before { display: block; } - -.listingblock.terminal pre .command:before { content: attr(data-prompt); padding-right: 0.5em; color: #999; } - -.listingblock.terminal pre .command:not([data-prompt]):before { content: "$"; } - -table.pyhltable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } - -table.pyhltable td { vertical-align: top; padding-top: 0; padding-bottom: 0; line-height: 1.4; } - -table.pyhltable td.code { padding-left: .75em; padding-right: 0; } - -pre.pygments .lineno, table.pyhltable td:not(.code) { color: #999; padding-left: 0; padding-right: .5em; border-right: 1px solid #dddddd; } - -pre.pygments .lineno { display: inline-block; margin-right: .25em; } - -table.pyhltable .linenodiv { background: none !important; padding-right: 0 !important; } - -.quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } -.quoteblock > .title { margin-left: -1.5em; margin-bottom: 0.75em; } -.quoteblock blockquote, .quoteblock blockquote p { color: #6f6f6f; font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } -.quoteblock blockquote { margin: 0; padding: 0; border: 0; } -.quoteblock blockquote:before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #6f6f6f; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } -.quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } -.quoteblock .attribution { margin-top: 0.5em; margin-right: 0.5ex; text-align: right; } -.quoteblock .quoteblock { margin-left: 0; margin-right: 0; padding: 0.5em 0; border-left: 3px solid #555555; } -.quoteblock .quoteblock blockquote { padding: 0 0 0 0.75em; } -.quoteblock .quoteblock blockquote:before { display: none; } - -.verseblock { margin: 0 1em 1.25em 1em; } -.verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans; font-size: 1.15rem; color: #6f6f6f; font-weight: 300; text-rendering: optimizeLegibility; } -.verseblock pre strong { font-weight: 400; } -.verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } - -.quoteblock .attribution, .verseblock .attribution { font-size: 0.8125em; line-height: 1.45; font-style: italic; } -.quoteblock .attribution br, .verseblock .attribution br { display: none; } -.quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: #555555; } - -.quoteblock.abstract { margin: 0 0 1.25em 0; display: block; } -.quoteblock.abstract blockquote, .quoteblock.abstract blockquote p { text-align: left; word-spacing: 0; } -.quoteblock.abstract blockquote:before, .quoteblock.abstract blockquote p:first-of-type:before { display: none; } - -table.tableblock { max-width: 100%; border-collapse: separate; } -table.tableblock td > .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { margin-bottom: 0; } - -table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dddddd; } - -table.grid-all th.tableblock, table.grid-all td.tableblock { border-width: 0 1px 1px 0; } - -table.grid-all tfoot > tr > th.tableblock, table.grid-all tfoot > tr > td.tableblock { border-width: 1px 1px 0 0; } - -table.grid-cols th.tableblock, table.grid-cols td.tableblock { border-width: 0 1px 0 0; } - -table.grid-all * > tr > .tableblock:last-child, table.grid-cols * > tr > .tableblock:last-child { border-right-width: 0; } - -table.grid-rows th.tableblock, table.grid-rows td.tableblock { border-width: 0 0 1px 0; } - -table.grid-all tbody > tr:last-child > th.tableblock, table.grid-all tbody > tr:last-child > td.tableblock, table.grid-all thead:last-child > tr > th.tableblock, table.grid-rows tbody > tr:last-child > th.tableblock, table.grid-rows tbody > tr:last-child > td.tableblock, table.grid-rows thead:last-child > tr > th.tableblock { border-bottom-width: 0; } - -table.grid-rows tfoot > tr > th.tableblock, table.grid-rows tfoot > tr > td.tableblock { border-width: 1px 0 0 0; } - -table.frame-all { border-width: 1px; } - -table.frame-sides { border-width: 0 1px; } - -table.frame-topbot { border-width: 1px 0; } - -th.halign-left, td.halign-left { text-align: left; } - -th.halign-right, td.halign-right { text-align: right; } - -th.halign-center, td.halign-center { text-align: center; } - -th.valign-top, td.valign-top { vertical-align: top; } - -th.valign-bottom, td.valign-bottom { vertical-align: bottom; } - -th.valign-middle, td.valign-middle { vertical-align: middle; } - -table thead th, table tfoot th { font-weight: bold; } - -tbody tr th { display: table-cell; line-height: 1.4; background: whitesmoke; } - -tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: #222222; font-weight: bold; } - -p.tableblock > code:only-child { background: none; padding: 0; } - -p.tableblock { font-size: 1em; } - -td > div.verse { white-space: pre; } - -ol { margin-left: 1.75em; } - -ul li ol { margin-left: 1.5em; } - -dl dd { margin-left: 1.125em; } - -dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } - -ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } - -ul.unstyled, ol.unnumbered, ul.checklist, ul.none { list-style-type: none; } - -ul.unstyled, ol.unnumbered, ul.checklist { margin-left: 0.625em; } - -ul.checklist li > p:first-child > .fa-square-o:first-child, ul.checklist li > p:first-child > .fa-check-square-o:first-child { width: 1em; font-size: 0.85em; } - -ul.checklist li > p:first-child > input[type="checkbox"]:first-child { width: 1em; position: relative; top: 1px; } - -ul.inline { margin: 0 auto 0.625em auto; margin-left: -1.375em; margin-right: 0; padding: 0; list-style: none; overflow: hidden; } -ul.inline > li { list-style: none; float: left; margin-left: 1.375em; display: block; } -ul.inline > li > * { display: block; } - -.unstyled dl dt { font-weight: normal; font-style: normal; } - -ol.arabic { list-style-type: decimal; } - -ol.decimal { list-style-type: decimal-leading-zero; } - -ol.loweralpha { list-style-type: lower-alpha; } - -ol.upperalpha { list-style-type: upper-alpha; } - -ol.lowerroman { list-style-type: lower-roman; } - -ol.upperroman { list-style-type: upper-roman; } - -ol.lowergreek { list-style-type: lower-greek; } - -.hdlist > table, .colist > table { border: 0; background: none; } -.hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } - -td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } - -td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } - -.literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } - -.colist > table tr > td:first-of-type { padding: 0 0.75em; line-height: 1; } -.colist > table tr > td:first-of-type img { max-width: initial; } -.colist > table tr > td:last-of-type { padding: 0.25em 0; } - -.thumb, .th { line-height: 0; display: inline-block; border: solid 4px white; -webkit-box-shadow: 0 0 0 1px #dddddd; box-shadow: 0 0 0 1px #dddddd; } - -.imageblock.left, .imageblock[style*="float: left"] { margin: 0.25em 0.625em 1.25em 0; } -.imageblock.right, .imageblock[style*="float: right"] { margin: 0.25em 0 1.25em 0.625em; } -.imageblock > .title { margin-bottom: 0; } -.imageblock.thumb, .imageblock.th { border-width: 6px; } -.imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } - -.image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } -.image.left { margin-right: 0.625em; } -.image.right { margin-left: 0.625em; } - -a.image { text-decoration: none; display: inline-block; } -a.image object { pointer-events: none; } - -sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } -sup.footnote a, sup.footnoteref a { text-decoration: none; } -sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } - -#footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } -#footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em 0; border-width: 1px 0 0 0; } -#footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; text-indent: -1.05em; margin-bottom: 0.2em; } -#footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; } -#footnotes .footnote:last-of-type { margin-bottom: 0; } -#content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } - -.gist .file-data > table { border: 0; background: #fff; width: 100%; margin-bottom: 0; } -.gist .file-data > table td.line-data { width: 99%; } - -div.unbreakable { page-break-inside: avoid; } - -.big { font-size: larger; } - -.small { font-size: smaller; } - -.underline { text-decoration: underline; } - -.overline { text-decoration: overline; } - -.line-through { text-decoration: line-through; } - -.aqua { color: #00bfbf; } - -.aqua-background { background-color: #00fafa; } - -.black { color: black; } - -.black-background { background-color: black; } - -.blue { color: #0000bf; } - -.blue-background { background-color: #0000fa; } - -.fuchsia { color: #bf00bf; } - -.fuchsia-background { background-color: #fa00fa; } - -.gray { color: #606060; } - -.gray-background { background-color: #7d7d7d; } - -.green { color: #006000; } - -.green-background { background-color: #007d00; } - -.lime { color: #00bf00; } - -.lime-background { background-color: #00fa00; } - -.maroon { color: #600000; } - -.maroon-background { background-color: #7d0000; } - -.navy { color: #000060; } - -.navy-background { background-color: #00007d; } - -.olive { color: #606000; } - -.olive-background { background-color: #7d7d00; } - -.purple { color: #600060; } - -.purple-background { background-color: #7d007d; } - -.red { color: #bf0000; } - -.red-background { background-color: #fa0000; } - -.silver { color: #909090; } - -.silver-background { background-color: #bcbcbc; } - -.teal { color: #006060; } - -.teal-background { background-color: #007d7d; } - -.white { color: #bfbfbf; } - -.white-background { background-color: #fafafa; } - -.yellow { color: #bfbf00; } - -.yellow-background { background-color: #fafa00; } - -span.icon > .fa { cursor: default; } - -.admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } -.admonitionblock td.icon .icon-note:before { content: "\f05a"; color: #207c98; } -.admonitionblock td.icon .icon-tip:before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } -.admonitionblock td.icon .icon-warning:before { content: "\f071"; color: #bf6900; } -.admonitionblock td.icon .icon-caution:before { content: "\f06d"; color: #bf3400; } -.admonitionblock td.icon .icon-important:before { content: "\f06a"; color: #bf0000; } - -.conum[data-value] { display: inline-block; color: #fff !important; background-color: #222222; -webkit-border-radius: 100px; border-radius: 100px; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } -.conum[data-value] * { color: #fff !important; } -.conum[data-value] + b { display: none; } -.conum[data-value]:after { content: attr(data-value); } -pre .conum[data-value] { position: relative; top: -0.125em; } - -b.conum * { color: inherit !important; } - -.conum:not([data-value]):empty { display: none; } - -.literalblock pre, .listingblock pre { background: #eeeeee; } diff --git a/transport/src/docs/asciidoclet/overview.adoc b/transport/src/docs/asciidoclet/overview.adoc deleted file mode 100644 index 7947331..0000000 --- a/transport/src/docs/asciidoclet/overview.adoc +++ /dev/null @@ -1,4 +0,0 @@ -= Elasticsearch Java client -Jörg Prante -Version 5.4.0.0 - diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufBytesReference.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufBytesReference.java deleted file mode 100644 index e0b96d6..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufBytesReference.java +++ /dev/null @@ -1,74 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.buffer.ByteBuf; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; - -final class ByteBufBytesReference extends BytesReference { - - private final ByteBuf buffer; - private final int length; - private final int offset; - - ByteBufBytesReference(ByteBuf buffer, int length) { - this.buffer = buffer; - this.length = length; - this.offset = buffer.readerIndex(); - assert length <= buffer.readableBytes() : "length[" + length +"] > " + buffer.readableBytes(); - } - - @Override - public byte get(int index) { - return buffer.getByte(offset + index); - } - - @Override - public int length() { - return length; - } - - @Override - public BytesReference slice(int from, int length) { - return new ByteBufBytesReference(buffer.slice(offset + from, length), length); - } - - @Override - public StreamInput streamInput() { - return new ByteBufStreamInput(buffer.duplicate(), length); - } - - @Override - public void writeTo(OutputStream os) throws IOException { - buffer.getBytes(offset, os, length); - } - - ByteBuf toByteBuf() { - return buffer.duplicate(); - } - - @Override - public String utf8ToString() { - return buffer.toString(offset, length, StandardCharsets.UTF_8); - } - - @Override - public BytesRef toBytesRef() { - if (buffer.hasArray()) { - return new BytesRef(buffer.array(), buffer.arrayOffset() + offset, length); - } - final byte[] copy = new byte[length]; - buffer.getBytes(offset, copy); - return new BytesRef(copy); - } - - @Override - public long ramBytesUsed() { - return buffer.capacity(); - } - -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufStreamInput.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufStreamInput.java deleted file mode 100644 index 1dadaea..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ByteBufStreamInput.java +++ /dev/null @@ -1,131 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.buffer.ByteBuf; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.EOFException; -import java.io.IOException; - -/** - * A Netty {@link io.netty.buffer.ByteBuf} based {@link org.elasticsearch.common.io.stream.StreamInput}. - */ -class ByteBufStreamInput extends StreamInput { - - private final ByteBuf buffer; - private final int endIndex; - - ByteBufStreamInput(ByteBuf buffer, int length) { - if (length > buffer.readableBytes()) { - throw new IndexOutOfBoundsException(); - } - this.buffer = buffer; - int startIndex = buffer.readerIndex(); - endIndex = startIndex + length; - buffer.markReaderIndex(); - } - - @Override - public BytesReference readBytesReference(int length) throws IOException { - // NOTE: It is unsafe to share a reference of the internal structure, so we - // use the default implementation which will copy the bytes. It is unsafe because - // a netty ByteBuf might be pooled which requires a manual release to prevent - // memory leaks. - return super.readBytesReference(length); - } - - @Override - public BytesRef readBytesRef(int length) throws IOException { - // NOTE: It is unsafe to share a reference of the internal structure, so we - // use the default implementation which will copy the bytes. It is unsafe because - // a netty ByteBuf might be pooled which requires a manual release to prevent - // memory leaks. - return super.readBytesRef(length); - } - - @Override - public int available() throws IOException { - return endIndex - buffer.readerIndex(); - } - - @Override - protected void ensureCanReadBytes(int length) throws EOFException { - int bytesAvailable = endIndex - buffer.readerIndex(); - if (bytesAvailable < length) { - throw new EOFException("tried to read: " + length + " bytes but only " + bytesAvailable + " remaining"); - } - } - - @Override - public void mark(int readlimit) { - buffer.markReaderIndex(); - } - - @Override - public boolean markSupported() { - return true; - } - - @Override - public int read() throws IOException { - if (available() == 0) { - return -1; - } - return buffer.readByte() & 0xff; - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - if (len == 0) { - return 0; - } - int available = available(); - if (available == 0) { - return -1; - } - - len = Math.min(available, len); - buffer.readBytes(b, off, len); - return len; - } - - @Override - public void reset() throws IOException { - buffer.resetReaderIndex(); - } - - @Override - public long skip(long n) throws IOException { - if (n > Integer.MAX_VALUE) { - return skipBytes(Integer.MAX_VALUE); - } else { - return skipBytes((int) n); - } - } - - public int skipBytes(int n) throws IOException { - int nBytes = Math.min(available(), n); - buffer.skipBytes(nBytes); - return nBytes; - } - - - @Override - public byte readByte() throws IOException { - return buffer.readByte(); - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - int read = read(b, offset, len); - if (read < len) { - throw new IndexOutOfBoundsException(); - } - } - - @Override - public void close() throws IOException { - // nothing to do here - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/CompressibleBytesOutputStream.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/CompressibleBytesOutputStream.java deleted file mode 100644 index 3318068..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/CompressibleBytesOutputStream.java +++ /dev/null @@ -1,87 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.io.stream.BytesStream; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.zip.DeflaterOutputStream; - -/** - * This class exists to provide a stream with optional compression. This is useful as using compression - * requires that the underlying {@link DeflaterOutputStream} be closed to write EOS bytes. However, the - * {@link BytesStream} should not be closed yet, as we have not used the bytes. This class handles these - * intricacies. - * - * {@link CompressibleBytesOutputStream#materializeBytes()} should be called when all the bytes have been - * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. - * The underlying {@link BytesReference} will be returned. - * - * {@link CompressibleBytesOutputStream#close()} should be called when the bytes are no longer needed and - * can be safely released. - */ -final class CompressibleBytesOutputStream extends StreamOutput { - - private final StreamOutput stream; - private final BytesStream bytesStreamOutput; - private final boolean shouldCompress; - - CompressibleBytesOutputStream(BytesStream bytesStreamOutput, boolean shouldCompress) throws IOException { - this.bytesStreamOutput = bytesStreamOutput; - this.shouldCompress = shouldCompress; - if (shouldCompress) { - this.stream = CompressorFactory.COMPRESSOR.streamOutput(Streams.flushOnCloseStream(bytesStreamOutput)); - } else { - this.stream = bytesStreamOutput; - } - } - - /** - * This method ensures that compression is complete and returns the underlying bytes. - * - * @return bytes underlying the stream - * @throws IOException if an exception occurs when writing or flushing - */ - BytesReference materializeBytes() throws IOException { - // If we are using compression the stream needs to be closed to ensure that EOS marker bytes are written. - // The actual ReleasableBytesStreamOutput will not be closed yet as it is wrapped in flushOnCloseStream when - // passed to the deflater stream. - if (shouldCompress) { - stream.close(); - } - - return bytesStreamOutput.bytes(); - } - - @Override - public void writeByte(byte b) throws IOException { - stream.write(b); - } - - @Override - public void writeBytes(byte[] b, int offset, int length) throws IOException { - stream.writeBytes(b, offset, length); - } - - @Override - public void flush() throws IOException { - stream.flush(); - } - - @Override - public void close() throws IOException { - if (stream == bytesStreamOutput) { - IOUtils.close(stream); - } else { - IOUtils.close(stream, bytesStreamOutput); - } - } - - @Override - public void reset() { - throw new UnsupportedOperationException(); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ConnectionProfile.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ConnectionProfile.java deleted file mode 100644 index ed36792..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ConnectionProfile.java +++ /dev/null @@ -1,209 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.transport.TransportRequestOptions; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * A connection profile describes how many connection are established to specific node for each of the available request types. - * ({@link org.elasticsearch.transport.TransportRequestOptions.Type}). This allows to tailor a connection towards a specific usage. - */ -public final class ConnectionProfile { - - /** - * Builds a connection profile that is dedicated to a single channel type. Use this - * when opening single use connections - */ - public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, - @Nullable TimeValue connectTimeout, - @Nullable TimeValue handshakeTimeout) { - Builder builder = new Builder(); - builder.addConnections(1, channelType); - final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); - otherTypes.remove(channelType); - builder.addConnections(0, otherTypes.stream().toArray(TransportRequestOptions.Type[]::new)); - if (connectTimeout != null) { - builder.setConnectTimeout(connectTimeout); - } - if (handshakeTimeout != null) { - builder.setHandshakeTimeout(handshakeTimeout); - } - return builder.build(); - } - - private final List handles; - private final int numConnections; - private final TimeValue connectTimeout; - private final TimeValue handshakeTimeout; - - private ConnectionProfile(List handles, int numConnections, TimeValue connectTimeout, TimeValue handshakeTimeout) - { - this.handles = handles; - this.numConnections = numConnections; - this.connectTimeout = connectTimeout; - this.handshakeTimeout = handshakeTimeout; - } - - /** - * A builder to build a new {@link ConnectionProfile} - */ - public static class Builder { - private final List handles = new ArrayList<>(); - private final Set addedTypes = EnumSet.noneOf(TransportRequestOptions.Type.class); - private int offset = 0; - private TimeValue connectTimeout; - private TimeValue handshakeTimeout; - - /** create an empty builder */ - public Builder() { - } - - /** copy constructor, using another profile as a base */ - public Builder(ConnectionProfile source) { - handles.addAll(source.getHandles()); - offset = source.getNumConnections(); - handles.forEach(th -> addedTypes.addAll(th.types)); - connectTimeout = source.getConnectTimeout(); - handshakeTimeout = source.getHandshakeTimeout(); - } - /** - * Sets a connect timeout for this connection profile - */ - public void setConnectTimeout(TimeValue connectTimeout) { - if (connectTimeout.millis() < 0) { - throw new IllegalArgumentException("connectTimeout must be non-negative but was: " + connectTimeout); - } - this.connectTimeout = connectTimeout; - } - - /** - * Sets a handshake timeout for this connection profile - */ - public void setHandshakeTimeout(TimeValue handshakeTimeout) { - if (handshakeTimeout.millis() < 0) { - throw new IllegalArgumentException("handshakeTimeout must be non-negative but was: " + handshakeTimeout); - } - this.handshakeTimeout = handshakeTimeout; - } - - /** - * Adds a number of connections for one or more types. Each type can only be added once. - * @param numConnections the number of connections to use in the pool for the given connection types - * @param types a set of types that should share the given number of connections - */ - public void addConnections(int numConnections, TransportRequestOptions.Type... types) { - if (types == null || types.length == 0) { - throw new IllegalArgumentException("types must not be null"); - } - for (TransportRequestOptions.Type type : types) { - if (addedTypes.contains(type)) { - throw new IllegalArgumentException("type [" + type + "] is already registered"); - } - } - addedTypes.addAll(Arrays.asList(types)); - handles.add(new ConnectionTypeHandle(offset, numConnections, EnumSet.copyOf(Arrays.asList(types)))); - offset += numConnections; - } - - /** - * Creates a new {@link ConnectionProfile} based on the added connections. - * @throws IllegalStateException if any of the {@link org.elasticsearch.transport.TransportRequestOptions.Type} enum is missing - */ - public ConnectionProfile build() { - EnumSet types = EnumSet.allOf(TransportRequestOptions.Type.class); - types.removeAll(addedTypes); - if (types.isEmpty() == false) { - throw new IllegalStateException("not all types are added for this connection profile - missing types: " + types); - } - return new ConnectionProfile(Collections.unmodifiableList(handles), offset, connectTimeout, handshakeTimeout); - } - - } - - /** - * Returns the connect timeout or null if no explicit timeout is set on this profile. - */ - public TimeValue getConnectTimeout() { - return connectTimeout; - } - - /** - * Returns the handshake timeout or null if no explicit timeout is set on this profile. - */ - public TimeValue getHandshakeTimeout() { - return handshakeTimeout; - } - - /** - * Returns the total number of connections for this profile - */ - public int getNumConnections() { - return numConnections; - } - - /** - * Returns the number of connections per type for this profile. This might return a count that is shared with other types such - * that the sum of all connections per type might be higher than {@link #getNumConnections()}. For instance if - * {@link org.elasticsearch.transport.TransportRequestOptions.Type#BULK} shares connections with - * {@link org.elasticsearch.transport.TransportRequestOptions.Type#REG} they will return both the same number of connections from - * this method but the connections are not distinct. - */ - public int getNumConnectionsPerType(TransportRequestOptions.Type type) { - for (ConnectionTypeHandle handle : handles) { - if (handle.getTypes().contains(type)) { - return handle.length; - } - } - throw new AssertionError("no handle found for type: " + type); - } - - /** - * Returns the type handles for this connection profile - */ - List getHandles() { - return Collections.unmodifiableList(handles); - } - - /** - * Connection type handle encapsulates the logic which connection - */ - static final class ConnectionTypeHandle { - public final int length; - public final int offset; - private final Set types; - private final AtomicInteger counter = new AtomicInteger(); - - private ConnectionTypeHandle(int offset, int length, Set types) { - this.length = length; - this.offset = offset; - this.types = types; - } - - /** - * Returns one of the channels out configured for this handle. The channel is selected in a round-robin - * fashion. - */ - T getChannel(List channels) { - if (length == 0) { - throw new IllegalStateException("can't select channel size is 0 for types: " + types); - } - assert channels.size() >= offset + length : "illegal size: " + channels.size() + " expected >= " + (offset + length); - return channels.get(offset + Math.floorMod(counter.incrementAndGet(), length)); - } - - /** - * Returns all types for this handle - */ - Set getTypes() { - return types; - } - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ESLoggingHandler.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/ESLoggingHandler.java deleted file mode 100644 index 5ed6ef6..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/ESLoggingHandler.java +++ /dev/null @@ -1,108 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.logging.LogLevel; -import io.netty.handler.logging.LoggingHandler; -import org.elasticsearch.Version; -import org.elasticsearch.common.compress.Compressor; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.transport.TcpHeader; -import org.elasticsearch.transport.TransportStatus; - -import java.io.IOException; - -final class ESLoggingHandler extends LoggingHandler { - - ESLoggingHandler() { - super(LogLevel.TRACE); - } - - @Override - protected String format(final ChannelHandlerContext ctx, final String eventName, final Object arg) { - if (arg instanceof ByteBuf) { - try { - return format(ctx, eventName, (ByteBuf) arg); - } catch (final Exception e) { - // we really do not want to allow a bug in the formatting handling to escape - logger.trace("an exception occurred formatting a trace message", e); - // we are going to let this be formatted via the default formatting - return super.format(ctx, eventName, arg); - } - } else { - return super.format(ctx, eventName, arg); - } - } - - private static final int MESSAGE_LENGTH_OFFSET = TcpHeader.MARKER_BYTES_SIZE; - private static final int REQUEST_ID_OFFSET = MESSAGE_LENGTH_OFFSET + TcpHeader.MESSAGE_LENGTH_SIZE; - private static final int STATUS_OFFSET = REQUEST_ID_OFFSET + TcpHeader.REQUEST_ID_SIZE; - private static final int VERSION_ID_OFFSET = STATUS_OFFSET + TcpHeader.STATUS_SIZE; - private static final int ACTION_OFFSET = VERSION_ID_OFFSET + TcpHeader.VERSION_ID_SIZE; - - private String format(final ChannelHandlerContext ctx, final String eventName, final ByteBuf arg) throws IOException { - final int readableBytes = arg.readableBytes(); - if (readableBytes == 0) { - return super.format(ctx, eventName, arg); - } else if (readableBytes >= 2) { - final StringBuilder sb = new StringBuilder(); - sb.append(ctx.channel().toString()); - final int offset = arg.readerIndex(); - // this might be an ES message, check the header - if (arg.getByte(offset) == (byte) 'E' && arg.getByte(offset + 1) == (byte) 'S') { - if (readableBytes == TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE) { - final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET); - if (length == TcpTransport.PING_DATA_SIZE) { - sb.append(" [ping]").append(' ').append(eventName).append(": ").append(readableBytes).append('B'); - return sb.toString(); - } - } - else if (readableBytes >= TcpHeader.HEADER_SIZE) { - // we are going to try to decode this as an ES message - final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET); - final long requestId = arg.getLong(offset + REQUEST_ID_OFFSET); - final byte status = arg.getByte(offset + STATUS_OFFSET); - final boolean isRequest = TransportStatus.isRequest(status); - final String type = isRequest ? "request" : "response"; - final String version = Version.fromId(arg.getInt(offset + VERSION_ID_OFFSET)).toString(); - sb.append(" [length: ").append(length); - sb.append(", request id: ").append(requestId); - sb.append(", type: ").append(type); - sb.append(", version: ").append(version); - if (isRequest) { - // it looks like an ES request, try to decode the action - final int remaining = readableBytes - ACTION_OFFSET; - final ByteBuf slice = arg.slice(offset + ACTION_OFFSET, remaining); - // the stream might be compressed - try (StreamInput in = in(status, slice, remaining)) { - // the first bytes in the message is the context headers - try (ThreadContext context = new ThreadContext(Settings.EMPTY)) { - context.readHeaders(in); - } - // now we can decode the action name - sb.append(", action: ").append(in.readString()); - } - } - sb.append(']'); - sb.append(' ').append(eventName).append(": ").append(readableBytes).append('B'); - return sb.toString(); - } - } - } - // we could not decode this as an ES message, use the default formatting - return super.format(ctx, eventName, arg); - } - - private StreamInput in(final Byte status, final ByteBuf slice, final int remaining) throws IOException { - final ByteBufStreamInput in = new ByteBufStreamInput(slice, remaining); - if (TransportStatus.isCompress(status)) { - final Compressor compressor = CompressorFactory.compressor(Netty4Utils.toBytesReference(slice)); - return compressor.streamInput(in); - } else { - return in; - } - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/MockTransportBulkClient.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/MockTransportBulkClient.java deleted file mode 100644 index 38c689d..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/MockTransportBulkClient.java +++ /dev/null @@ -1,149 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.settings.Settings; -import org.xbib.elasticsearch.client.BulkControl; -import org.xbib.elasticsearch.client.BulkMetric; - -import java.io.IOException; -import java.util.Map; - -/** - * Mock client, it does not perform actions on a cluster. - * Useful for testing or dry runs. - */ -public class MockTransportBulkClient extends TransportBulkClient { - - @Override - public ElasticsearchClient client() { - return null; - } - - @Override - public MockTransportBulkClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { - return this; - } - - @Override - public MockTransportBulkClient maxActionsPerRequest(int maxActions) { - return this; - } - - @Override - public MockTransportBulkClient maxConcurrentRequests(int maxConcurrentRequests) { - return this; - } - - @Override - public MockTransportBulkClient maxVolumePerRequest(String maxVolumePerRequest) { - return this; - } - - @Override - public MockTransportBulkClient flushIngestInterval(String interval) { - return this; - } - - @Override - public MockTransportBulkClient index(String index, String type, String id, boolean create, String source) { - return this; - } - - @Override - public MockTransportBulkClient delete(String index, String type, String id) { - return this; - } - - @Override - public MockTransportBulkClient update(String index, String type, String id, String source) { - return this; - } - - @Override - public MockTransportBulkClient indexRequest(IndexRequest indexRequest) { - return this; - } - - @Override - public MockTransportBulkClient deleteRequest(DeleteRequest deleteRequest) { - return this; - } - - @Override - public MockTransportBulkClient updateRequest(UpdateRequest updateRequest) { - return this; - } - - @Override - public MockTransportBulkClient flushIngest() { - return this; - } - - @Override - public MockTransportBulkClient waitForResponses(String timeValue) throws InterruptedException { - return this; - } - - @Override - public MockTransportBulkClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) { - return this; - } - - @Override - public MockTransportBulkClient stopBulk(String index) { - return this; - } - - @Override - public MockTransportBulkClient deleteIndex(String index) { - return this; - } - - @Override - public MockTransportBulkClient newIndex(String index) { - return this; - } - - @Override - public MockTransportBulkClient newMapping(String index, String type, Map mapping) { - return this; - } - - @Override - public void putMapping(String index) { - // mockup method - } - - @Override - public void refreshIndex(String index) { - // mockup method - } - - @Override - public void flushIndex(String index) { - // mockup method - } - - @Override - public void waitForCluster(String healthColor, String timeValue) throws IOException { - // mockup method - } - - @Override - public int waitForRecovery(String index) throws IOException { - return -1; - } - - @Override - public int updateReplicaLevel(String index, int level) throws IOException { - return -1; - } - - @Override - public void shutdown() { - // mockup method - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4InternalESLogger.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4InternalESLogger.java deleted file mode 100644 index 33429bf..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4InternalESLogger.java +++ /dev/null @@ -1,168 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.util.internal.logging.AbstractInternalLogger; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.SuppressLoggerChecks; -import org.elasticsearch.common.logging.Loggers; - -@SuppressLoggerChecks(reason = "safely delegates to logger") -class Netty4InternalESLogger extends AbstractInternalLogger { - - private final Logger logger; - - Netty4InternalESLogger(final String name) { - super(name); - this.logger = Loggers.getLogger(name); - } - - @Override - public boolean isTraceEnabled() { - return logger.isTraceEnabled(); - } - - @Override - public void trace(String msg) { - logger.trace(msg); - } - - @Override - public void trace(String format, Object arg) { - logger.trace(format, arg); - } - - @Override - public void trace(String format, Object argA, Object argB) { - logger.trace(format, argA, argB); - } - - @Override - public void trace(String format, Object... arguments) { - logger.trace(format, arguments); - } - - @Override - public void trace(String msg, Throwable t) { - logger.trace(msg, t); - } - - @Override - public boolean isDebugEnabled() { - return logger.isDebugEnabled(); - } - - @Override - public void debug(String msg) { - logger.debug(msg); - } - - @Override - public void debug(String format, Object arg) { - logger.debug(format, arg); - } - - @Override - public void debug(String format, Object argA, Object argB) { - logger.debug(format, argA, argB); - } - - @Override - public void debug(String format, Object... arguments) { - logger.debug(format, arguments); - } - - @Override - public void debug(String msg, Throwable t) { - logger.debug(msg, t); - } - - @Override - public boolean isInfoEnabled() { - return logger.isInfoEnabled(); - } - - @Override - public void info(String msg) { - logger.info(msg); - } - - @Override - public void info(String format, Object arg) { - logger.info(format, arg); - } - - @Override - public void info(String format, Object argA, Object argB) { - logger.info(format, argA, argB); - } - - @Override - public void info(String format, Object... arguments) { - logger.info(format, arguments); - } - - @Override - public void info(String msg, Throwable t) { - logger.info(msg, t); - } - - @Override - public boolean isWarnEnabled() { - return logger.isWarnEnabled(); - } - - @Override - public void warn(String msg) { - logger.warn(msg); - } - - @Override - public void warn(String format, Object arg) { - logger.warn(format, arg); - } - - @Override - public void warn(String format, Object... arguments) { - logger.warn(format, arguments); - } - - @Override - public void warn(String format, Object argA, Object argB) { - logger.warn(format, argA, argB); - } - - @Override - public void warn(String msg, Throwable t) { - logger.warn(msg, t); - } - - @Override - public boolean isErrorEnabled() { - return logger.isErrorEnabled(); - } - - @Override - public void error(String msg) { - logger.error(msg); - } - - @Override - public void error(String format, Object arg) { - logger.error(format, arg); - } - - @Override - public void error(String format, Object argA, Object argB) { - logger.error(format, argA, argB); - } - - @Override - public void error(String format, Object... arguments) { - logger.error(format, arguments); - } - - @Override - public void error(String msg, Throwable t) { - logger.error(msg, t); - } - -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4MessageChannelHandler.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4MessageChannelHandler.java deleted file mode 100644 index 4126944..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4MessageChannelHandler.java +++ /dev/null @@ -1,58 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.Channel; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.util.Attribute; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.transport.TcpHeader; -import org.elasticsearch.transport.Transports; - -import java.net.InetSocketAddress; - -/** - * A handler (must be the last one!) that does size based frame decoding and forwards the actual message - * to the relevant action. - */ -final class Netty4MessageChannelHandler extends ChannelDuplexHandler { - - private final Netty4Transport transport; - private final String profileName; - - Netty4MessageChannelHandler(Netty4Transport transport, String profileName) { - this.transport = transport; - this.profileName = profileName; - } - - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - Transports.assertTransportThread(); - if (!(msg instanceof ByteBuf)) { - ctx.fireChannelRead(msg); - return; - } - final ByteBuf buffer = (ByteBuf) msg; - final int remainingMessageSize = buffer.getInt(buffer.readerIndex() - TcpHeader.MESSAGE_LENGTH_SIZE); - final int expectedReaderIndex = buffer.readerIndex() + remainingMessageSize; - try { - Channel channel = ctx.channel(); - InetSocketAddress remoteAddress = (InetSocketAddress) channel.remoteAddress(); - // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh - // buffer, or in the cumulative buffer, which is cleaned each time so it could be bigger than the actual size - BytesReference reference = Netty4Utils.toBytesReference(buffer, remainingMessageSize); - Attribute channelAttribute = channel.attr(Netty4Transport.CHANNEL_KEY); - transport.messageReceived(reference, channelAttribute.get(), profileName, remoteAddress, remainingMessageSize); - } finally { - // Set the expected position of the buffer, no matter what happened - buffer.readerIndex(expectedReaderIndex); - } - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); - transport.exceptionCaught(ctx, cause); - } - -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Plugin.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Plugin.java deleted file mode 100644 index cb0b450..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Plugin.java +++ /dev/null @@ -1,77 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.http.netty4.Netty4HttpServerTransport; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - -public class Netty4Plugin extends Plugin implements NetworkPlugin { - - static { - Netty4Utils.setup(); - } - - public static final String NETTY_TRANSPORT_NAME = "netty4"; - public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; - - @Override - public List> getSettings() { - return Arrays.asList( - Netty4HttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, - Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT, - Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, - Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN, - Netty4HttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX, - Netty4Transport.WORKER_COUNT, - Netty4Transport.NETTY_RECEIVE_PREDICTOR_SIZE, - Netty4Transport.NETTY_RECEIVE_PREDICTOR_MIN, - Netty4Transport.NETTY_RECEIVE_PREDICTOR_MAX, - Netty4Transport.NETTY_BOSS_COUNT - ); - } - - @Override - public Settings additionalSettings() { - return Settings.builder() - // here we set the netty4 transport and http transport as the default. This is a set once setting - // ie. if another plugin does that as well the server will fail - only one default network can exist! - .put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), NETTY_HTTP_TRANSPORT_NAME) - .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), NETTY_TRANSPORT_NAME) - .build(); - } - - @Override - public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, - PageCacheRecycler pageCacheRecycler, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService) { - return Collections.singletonMap(NETTY_TRANSPORT_NAME, () -> new Netty4Transport(settings, threadPool, networkService, bigArrays, - namedWriteableRegistry, circuitBreakerService)); - } - - @Override - public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NamedXContentRegistry xContentRegistry, - NetworkService networkService, - HttpServerTransport.Dispatcher dispatcher) { - return Collections.singletonMap(NETTY_HTTP_TRANSPORT_NAME, - () -> new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher)); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4SizeHeaderFrameDecoder.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4SizeHeaderFrameDecoder.java deleted file mode 100644 index bf18d0f..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4SizeHeaderFrameDecoder.java +++ /dev/null @@ -1,30 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.ByteToMessageDecoder; -import io.netty.handler.codec.TooLongFrameException; -import org.elasticsearch.transport.TcpHeader; - -import java.util.List; - -final class Netty4SizeHeaderFrameDecoder extends ByteToMessageDecoder { - - @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { - try { - boolean continueProcessing = TcpTransport.validateMessageHeader(Netty4Utils.toBytesReference(in)); - final ByteBuf message = in.skipBytes(TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); - if (!continueProcessing) return; - out.add(message); - } catch (IllegalArgumentException ex) { - throw new TooLongFrameException(ex); - } catch (IllegalStateException ex) { - /* decode will be called until the ByteBuf is fully consumed; when it is fully - * consumed, transport#validateMessageHeader will throw an IllegalStateException which - * is okay, it means we have finished consuming the ByteBuf and we can get out - */ - } - } - -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Transport.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Transport.java deleted file mode 100644 index 6e7df8b..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Transport.java +++ /dev/null @@ -1,339 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.bootstrap.Bootstrap; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.channel.AdaptiveRecvByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.FixedRecvByteBufAllocator; -import io.netty.channel.RecvByteBufAllocator; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.util.AttributeKey; -import io.netty.util.concurrent.Future; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportRequestOptions; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.common.settings.Setting.byteSizeSetting; -import static org.elasticsearch.common.settings.Setting.intSetting; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; - -/** - * There are 4 types of connections per node, low/med/high/ping. Low if for batch oriented APIs (like recovery or - * batch) with high payload that will cause regular request. (like search or single index) to take - * longer. Med is for the typical search / single doc index. And High for things like cluster state. Ping is reserved for - * sending out ping requests to other nodes. - */ -public class Netty4Transport extends TcpTransport { - - static { - Netty4Utils.setup(); - } - - public static final Setting WORKER_COUNT = - new Setting<>("transport.netty.worker_count", - (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope); - - public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope); - public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = - byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); - public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = - byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); - public static final Setting NETTY_BOSS_COUNT = - intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope); - - - protected final RecvByteBufAllocator recvByteBufAllocator; - protected final int workerCount; - protected final ByteSizeValue receivePredictorMin; - protected final ByteSizeValue receivePredictorMax; - protected volatile Bootstrap bootstrap; - protected final Map serverBootstraps = newConcurrentMap(); - - public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, - NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super("netty", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); - Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); - this.workerCount = WORKER_COUNT.get(settings); - - // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one - this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings); - this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings); - if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) { - recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.getBytes()); - } else { - recvByteBufAllocator = new AdaptiveRecvByteBufAllocator((int) receivePredictorMin.getBytes(), - (int) receivePredictorMin.getBytes(), (int) receivePredictorMax.getBytes()); - } - } - - @Override - protected void doStart() { - boolean success = false; - try { - bootstrap = createBootstrap(); - if (NetworkService.NETWORK_SERVER.get(settings)) { - for (ProfileSettings profileSettings : profileSettings) { - createServerBootstrap(profileSettings); - bindServer(profileSettings); - } - } - super.doStart(); - success = true; - } finally { - if (success == false) { - doStop(); - } - } - } - - private Bootstrap createBootstrap() { - final Bootstrap bootstrap = new Bootstrap(); - bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX))); - bootstrap.channel(NioSocketChannel.class); - - bootstrap.handler(getClientChannelInitializer()); - - bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(defaultConnectionProfile.getConnectTimeout().millis())); - bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); - bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings)); - - final ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings); - if (tcpSendBufferSize.getBytes() > 0) { - bootstrap.option(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); - } - - final ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings); - if (tcpReceiveBufferSize.getBytes() > 0) { - bootstrap.option(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); - } - - bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - - final boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings); - bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); - - bootstrap.validate(); - - return bootstrap; - } - - private void createServerBootstrap(ProfileSettings profileSettings) { - String name = profileSettings.profileName; - if (logger.isDebugEnabled()) { - logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " - + "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", - name, workerCount, profileSettings.portOrRange, profileSettings.bindHosts, profileSettings.publishHosts, compress, - defaultConnectionProfile.getConnectTimeout(), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.REG), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.PING), - receivePredictorMin, receivePredictorMax); - } - - - final ThreadFactory workerFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, name); - - final ServerBootstrap serverBootstrap = new ServerBootstrap(); - - serverBootstrap.group(new NioEventLoopGroup(workerCount, workerFactory)); - serverBootstrap.channel(NioServerSocketChannel.class); - - serverBootstrap.childHandler(getServerChannelInitializer(name)); - - serverBootstrap.childOption(ChannelOption.TCP_NODELAY, profileSettings.tcpNoDelay); - serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, profileSettings.tcpKeepAlive); - - if (profileSettings.sendBufferSize.getBytes() != -1) { - serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(profileSettings.sendBufferSize.getBytes())); - } - - if (profileSettings.receiveBufferSize.getBytes() != -1) { - serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(profileSettings.receiveBufferSize.bytesAsInt())); - } - - serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - - serverBootstrap.option(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); - serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); - serverBootstrap.validate(); - - serverBootstraps.put(name, serverBootstrap); - } - - protected ChannelHandler getServerChannelInitializer(String name) { - return new ServerChannelInitializer(name); - } - - protected ChannelHandler getClientChannelInitializer() { - return new ClientChannelInitializer(); - } - - static final AttributeKey CHANNEL_KEY = AttributeKey.newInstance("es-channel-client"); - - protected final void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class); - final Throwable t = unwrapped != null ? unwrapped : cause; - Channel channel = ctx.channel(); - onException(channel.attr(CHANNEL_KEY).get(), t instanceof Exception ? (Exception) t : new ElasticsearchException(t)); - } - - @Override - protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener listener) - throws IOException { - ChannelFuture channelFuture = bootstrap.connect(node.getAddress().address()); - Channel channel = channelFuture.channel(); - if (channel == null) { - Netty4Utils.maybeDie(channelFuture.cause()); - throw new IOException(channelFuture.cause()); - } - addClosedExceptionLogger(channel); - - NettyTcpChannel nettyChannel = new NettyTcpChannel(channel); - channel.attr(CHANNEL_KEY).set(nettyChannel); - - channelFuture.addListener(f -> { - if (f.isSuccess()) { - listener.onResponse(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); - listener.onFailure(new Exception(cause)); - } else { - listener.onFailure((Exception) cause); - } - } - }); - - return nettyChannel; - } - - @Override - protected NettyTcpChannel bind(String name, InetSocketAddress address) { - Channel channel = serverBootstraps.get(name).bind(address).syncUninterruptibly().channel(); - NettyTcpChannel esChannel = new NettyTcpChannel(channel); - channel.attr(CHANNEL_KEY).set(esChannel); - return esChannel; - } - - ScheduledPing getPing() { - return scheduledPing; - } - - @Override - @SuppressForbidden(reason = "debug") - protected void stopInternal() { - Releasables.close(() -> { - final List>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size()); - for (final Map.Entry entry : serverBootstraps.entrySet()) { - serverBootstrapCloseFutures.add( - Tuple.tuple(entry.getKey(), entry.getValue().config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS))); - } - for (final Tuple> future : serverBootstrapCloseFutures) { - future.v2().awaitUninterruptibly(); - if (!future.v2().isSuccess()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause()); - } - } - serverBootstraps.clear(); - - if (bootstrap != null) { - bootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); - bootstrap = null; - } - }); - } - - protected class ClientChannelInitializer extends ChannelInitializer { - - @Override - protected void initChannel(Channel ch) throws Exception { - ch.pipeline().addLast("logging", new ESLoggingHandler()); - ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); - // using a dot as a prefix means this cannot come from any settings parsed - ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, ".client")); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); - super.exceptionCaught(ctx, cause); - } - } - - protected class ServerChannelInitializer extends ChannelInitializer { - - protected final String name; - - protected ServerChannelInitializer(String name) { - this.name = name; - } - - @Override - protected void initChannel(Channel ch) throws Exception { - addClosedExceptionLogger(ch); - NettyTcpChannel nettyTcpChannel = new NettyTcpChannel(ch); - ch.attr(CHANNEL_KEY).set(nettyTcpChannel); - serverAcceptedChannel(nettyTcpChannel); - ch.pipeline().addLast("logging", new ESLoggingHandler()); - ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); - ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name)); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - Netty4Utils.maybeDie(cause); - super.exceptionCaught(ctx, cause); - } - } - - private void addClosedExceptionLogger(Channel channel) { - channel.closeFuture().addListener(f -> { - if (f.isSuccess() == false) { - logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", channel), f.cause()); - } - }); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Utils.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Utils.java deleted file mode 100644 index 355f4c9..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Netty4Utils.java +++ /dev/null @@ -1,164 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.util.NettyRuntime; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.ESLoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -public class Netty4Utils { - - static { - InternalLoggerFactory.setDefaultFactory(new InternalLoggerFactory() { - - @Override - public InternalLogger newInstance(final String name) { - return new Netty4InternalESLogger(name); - } - - }); - } - - public static void setup() { - - } - - private static AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); - - /** - * Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools). - * - * @param availableProcessors the number of available processors - * @throws IllegalStateException if available processors was set previously and the specified value does not match the already-set value - */ - public static void setAvailableProcessors(final int availableProcessors) { - // we set this to false in tests to avoid tests that randomly set processors from stepping on each other - final boolean set = Booleans.parseBoolean(System.getProperty("es.set.netty.runtime.available.processors", "true")); - if (!set) { - return; - } - - try { - NettyRuntime.setAvailableProcessors(availableProcessors); - } catch (IllegalStateException e) { - // - } - } - - /** - * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal - * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. - */ - public static ByteBuf toByteBuf(final BytesReference reference) { - if (reference.length() == 0) { - return Unpooled.EMPTY_BUFFER; - } - if (reference instanceof ByteBufBytesReference) { - return ((ByteBufBytesReference) reference).toByteBuf(); - } else { - final BytesRefIterator iterator = reference.iterator(); - // usually we have one, two, or three components from the header, the message, and a buffer - final List buffers = new ArrayList<>(3); - try { - BytesRef slice; - while ((slice = iterator.next()) != null) { - buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); - } - final CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); - composite.addComponents(true, buffers); - return composite; - } catch (IOException ex) { - throw new AssertionError("no IO happens here", ex); - } - } - } - - /** - * Wraps the given ChannelBuffer with a BytesReference - */ - public static BytesReference toBytesReference(final ByteBuf buffer) { - return toBytesReference(buffer, buffer.readableBytes()); - } - - /** - * Wraps the given ChannelBuffer with a BytesReference of a given size - */ - static BytesReference toBytesReference(final ByteBuf buffer, final int size) { - return new ByteBufBytesReference(buffer, size); - } - - public static void closeChannels(final Collection channels) throws IOException { - IOException closingExceptions = null; - final List futures = new ArrayList<>(); - for (final Channel channel : channels) { - try { - if (channel != null && channel.isOpen()) { - futures.add(channel.close()); - } - } catch (Exception e) { - if (closingExceptions == null) { - closingExceptions = new IOException("failed to close channels"); - } - closingExceptions.addSuppressed(e); - } - } - for (final ChannelFuture future : futures) { - future.awaitUninterruptibly(); - } - - if (closingExceptions != null) { - throw closingExceptions; - } - } - - /** - * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be - * caught and bubbles up to the uncaught exception handler. - * - * @param cause the throwable to test - */ - public static void maybeDie(final Throwable cause) { - final Logger logger = ESLoggerFactory.getLogger(Netty4Utils.class); - final Optional maybeError = ExceptionsHelper.maybeError(cause, logger); - if (maybeError.isPresent()) { - /* - * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many - * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up - * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap - * the exception so as to not lose the original cause during exit. - */ - try { - // try to log the current stack trace - final StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); - final String formatted = Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); - logger.error("fatal error on the network layer\n{}", formatted); - } finally { - new Thread( - () -> { - throw maybeError.get(); - }) - .start(); - } - } - } - -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NettyTcpChannel.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NettyTcpChannel.java deleted file mode 100644 index b0e5f73..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NettyTcpChannel.java +++ /dev/null @@ -1,92 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import io.netty.channel.Channel; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPromise; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.transport.TcpChannel; -import org.elasticsearch.transport.TransportException; - -import java.net.InetSocketAddress; -import java.util.concurrent.CompletableFuture; - -public class NettyTcpChannel implements TcpChannel { - - private final Channel channel; - private final CompletableFuture closeContext = new CompletableFuture<>(); - - NettyTcpChannel(Channel channel) { - this.channel = channel; - this.channel.closeFuture().addListener(f -> { - if (f.isSuccess()) { - closeContext.complete(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - Netty4Utils.maybeDie(cause); - closeContext.completeExceptionally(cause); - } else { - closeContext.completeExceptionally(cause); - } - } - }); - } - - @Override - public void close() { - channel.close(); - } - - @Override - public void addCloseListener(ActionListener listener) { - closeContext.whenComplete(ActionListener.toBiConsumer(listener)); - } - - @Override - public void setSoLinger(int value) { - channel.config().setOption(ChannelOption.SO_LINGER, value); - } - - @Override - public boolean isOpen() { - return channel.isOpen(); - } - - @Override - public InetSocketAddress getLocalAddress() { - return (InetSocketAddress) channel.localAddress(); - } - - @Override - public void sendMessage(BytesReference reference, ActionListener listener) { - ChannelPromise writePromise = channel.newPromise(); - writePromise.addListener(f -> { - if (f.isSuccess()) { - listener.onResponse(null); - } else { - final Throwable cause = f.cause(); - Netty4Utils.maybeDie(cause); - assert cause instanceof Exception; - listener.onFailure((Exception) cause); - } - }); - channel.writeAndFlush(Netty4Utils.toByteBuf(reference), writePromise); - - if (channel.eventLoop().isShutdown()) { - listener.onFailure(new TransportException("Cannot send message, event loop is shutting down.")); - } - } - - public Channel getLowLevelChannel() { - return channel; - } - - @Override - public String toString() { - return "NettyTcpChannel{" + - "localAddress=" + getLocalAddress() + - ", remoteAddress=" + channel.remoteAddress() + - '}'; - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkModule.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkModule.java deleted file mode 100644 index 820353e..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkModule.java +++ /dev/null @@ -1,237 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.action.support.replication.ReplicationTask; -import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.tasks.RawTaskStatus; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.function.Supplier; - -/** - * A module to handle registering and binding all network related classes. - */ -public final class NetworkModule { - - public static final String TRANSPORT_TYPE_KEY = "transport.type"; - public static final String HTTP_TYPE_KEY = "http.type"; - public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default"; - public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default"; - - public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_DEFAULT_KEY, - Property.NodeScope); - public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); - public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); - public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); - public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope); - - private final Settings settings; - private final boolean transportClient; - - private static final List namedWriteables = new ArrayList<>(); - private static final List namedXContents = new ArrayList<>(); - - static { - registerAllocationCommand(CancelAllocationCommand::new, CancelAllocationCommand::fromXContent, - CancelAllocationCommand.COMMAND_NAME_FIELD); - registerAllocationCommand(MoveAllocationCommand::new, MoveAllocationCommand::fromXContent, - MoveAllocationCommand.COMMAND_NAME_FIELD); - registerAllocationCommand(AllocateReplicaAllocationCommand::new, AllocateReplicaAllocationCommand::fromXContent, - AllocateReplicaAllocationCommand.COMMAND_NAME_FIELD); - registerAllocationCommand(AllocateEmptyPrimaryAllocationCommand::new, AllocateEmptyPrimaryAllocationCommand::fromXContent, - AllocateEmptyPrimaryAllocationCommand.COMMAND_NAME_FIELD); - registerAllocationCommand(AllocateStalePrimaryAllocationCommand::new, AllocateStalePrimaryAllocationCommand::fromXContent, - AllocateStalePrimaryAllocationCommand.COMMAND_NAME_FIELD); - namedWriteables.add( - new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new)); - namedWriteables.add( - new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new)); - } - - private final Map> transportFactories = new HashMap<>(); - private final Map> transportHttpFactories = new HashMap<>(); - private final List transportIntercetors = new ArrayList<>(); - - /** - * Creates a network module that custom networking classes can be plugged into. - * @param settings The settings for the node - * @param transportClient True if only transport classes should be allowed to be registered, false otherwise. - */ - public NetworkModule(Settings settings, boolean transportClient, List plugins, ThreadPool threadPool, - BigArrays bigArrays, - PageCacheRecycler pageCacheRecycler, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NamedXContentRegistry xContentRegistry, - NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { - this.settings = settings; - this.transportClient = transportClient; - for (NetworkPlugin plugin : plugins) { - if (transportClient == false && HTTP_ENABLED.get(settings)) { - Map> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, - circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher); - for (Map.Entry> entry : httpTransportFactory.entrySet()) { - registerHttpTransport(entry.getKey(), entry.getValue()); - } - } - Map> transportFactory = plugin.getTransports(settings, threadPool, bigArrays, pageCacheRecycler, - circuitBreakerService, namedWriteableRegistry, networkService); - for (Map.Entry> entry : transportFactory.entrySet()) { - registerTransport(entry.getKey(), entry.getValue()); - } - List transportInterceptors = plugin.getTransportInterceptors(namedWriteableRegistry, - threadPool.getThreadContext()); - for (TransportInterceptor interceptor : transportInterceptors) { - registerTransportInterceptor(interceptor); - } - } - } - - public boolean isTransportClient() { - return transportClient; - } - - /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ - private void registerTransport(String key, Supplier factory) { - if (transportFactories.putIfAbsent(key, factory) != null) { - throw new IllegalArgumentException("transport for name: " + key + " is already registered"); - } - } - - /** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */ - // TODO: we need another name than "http transport"....so confusing with transportClient... - private void registerHttpTransport(String key, Supplier factory) { - if (transportClient) { - throw new IllegalArgumentException("Cannot register http transport " + key + " for transport client"); - } - if (transportHttpFactories.putIfAbsent(key, factory) != null) { - throw new IllegalArgumentException("transport for name: " + key + " is already registered"); - } - } - - /** - * Register an allocation command. - *

- * This lives here instead of the more aptly named ClusterModule because the Transport client needs these to be registered. - *

- * @param reader the reader to read it from a stream - * @param parser the parser to read it from XContent - * @param commandName the names under which the command should be parsed. The {@link ParseField#getPreferredName()} is special because - * it is the name under which the command's reader is registered. - */ - private static void registerAllocationCommand(Writeable.Reader reader, - CheckedFunction parser, ParseField commandName) { - namedXContents.add(new NamedXContentRegistry.Entry(AllocationCommand.class, commandName, parser)); - namedWriteables.add(new NamedWriteableRegistry.Entry(AllocationCommand.class, commandName.getPreferredName(), reader)); - } - - public static List getNamedWriteables() { - return Collections.unmodifiableList(namedWriteables); - } - - public static List getNamedXContents() { - return Collections.unmodifiableList(namedXContents); - } - - public Supplier getHttpServerTransportSupplier() { - final String name; - if (HTTP_TYPE_SETTING.exists(settings)) { - name = HTTP_TYPE_SETTING.get(settings); - } else { - name = HTTP_DEFAULT_TYPE_SETTING.get(settings); - } - final Supplier factory = transportHttpFactories.get(name); - if (factory == null) { - throw new IllegalStateException("Unsupported http.type [" + name + "]"); - } - return factory; - } - - public boolean isHttpEnabled() { - return transportClient == false && HTTP_ENABLED.get(settings); - } - - public Supplier getTransportSupplier() { - final String name; - if (TRANSPORT_TYPE_SETTING.exists(settings)) { - name = TRANSPORT_TYPE_SETTING.get(settings); - } else { - name = TRANSPORT_DEFAULT_TYPE_SETTING.get(settings); - } - final Supplier factory = transportFactories.get(name); - if (factory == null) { - throw new IllegalStateException("Unsupported transport.type [" + name + "] factories = " + transportFactories); - } - return factory; - } - - /** - * Registers a new {@link TransportInterceptor} - */ - private void registerTransportInterceptor(TransportInterceptor interceptor) { - this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); - } - - /** - * Returns a composite {@link TransportInterceptor} containing all registered interceptors - * @see #registerTransportInterceptor(TransportInterceptor) - */ - public TransportInterceptor getTransportInterceptor() { - return new CompositeTransportInterceptor(this.transportIntercetors); - } - - static final class CompositeTransportInterceptor implements TransportInterceptor { - final List transportInterceptors; - - private CompositeTransportInterceptor(List transportInterceptors) { - this.transportInterceptors = new ArrayList<>(transportInterceptors); - } - - @Override - public TransportRequestHandler interceptHandler(String action, String executor, - boolean forceExecution, - TransportRequestHandler actualHandler) { - for (TransportInterceptor interceptor : this.transportInterceptors) { - actualHandler = interceptor.interceptHandler(action, executor, forceExecution, actualHandler); - } - return actualHandler; - } - - @Override - public AsyncSender interceptSender(AsyncSender sender) { - for (TransportInterceptor interceptor : this.transportInterceptors) { - sender = interceptor.interceptSender(sender); - } - return sender; - } - } - -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkPlugin.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkPlugin.java deleted file mode 100644 index f566813..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/NetworkPlugin.java +++ /dev/null @@ -1,61 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.threadpool.ThreadPool; - -/** - * Plugin for extending network and transport related classes - */ -public interface NetworkPlugin { - - /** - * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing - * transport (inter-node) requests. This must not return null - * - * @param namedWriteableRegistry registry of all named writeables registered - * @param threadContext a {@link ThreadContext} of the current nodes or clients {@link ThreadPool} that can be used to set additional - * headers in the interceptors - */ - default List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, - ThreadContext threadContext) { - return Collections.emptyList(); - } - - /** - * Returns a map of {@link Transport} suppliers. - * See {@link org.elasticsearch.common.network.NetworkModule#TRANSPORT_TYPE_KEY} to configure a specific implementation. - */ - default Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, - PageCacheRecycler pageCacheRecycler, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService) { - return Collections.emptyMap(); - } - - /** - * Returns a map of {@link HttpServerTransport} suppliers. - * See {@link org.elasticsearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. - */ - default Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NamedXContentRegistry xContentRegistry, - NetworkService networkService, - HttpServerTransport.Dispatcher dispatcher) { - return Collections.emptyMap(); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterConnection.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterConnection.java deleted file mode 100644 index 2251158..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterConnection.java +++ /dev/null @@ -1,728 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.TransportActionProxy; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponseHandler; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -/** - * Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the - * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not - * fully connected with the current node. From a connection perspective a local cluster forms a bi-directional star network while in the - * remote case we only connect to a subset of the nodes in the cluster in an uni-directional fashion. - * - * This class also handles the discovery of nodes from the remote cluster. The initial list of seed nodes is only used to discover all nodes - * in the remote cluster and connects to all eligible nodes, for details see {@link RemoteClusterService#REMOTE_NODE_ATTRIBUTE}. - * - * In the case of a disconnection, this class will issue a re-connect task to establish at most - * {@link RemoteClusterService#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of - * connections per cluster has been reached. - */ -final class RemoteClusterConnection extends AbstractComponent implements TransportConnectionListener, Closeable { - - private final TransportService transportService; - private final ConnectionProfile remoteProfile; - private final ConnectedNodes connectedNodes; - private final String clusterAlias; - private final int maxNumRemoteConnections; - private final Predicate nodePredicate; - private volatile List seedNodes; - private volatile boolean skipUnavailable; - private final ConnectHandler connectHandler; - private SetOnce remoteClusterName = new SetOnce<>(); - - /** - * Creates a new {@link RemoteClusterConnection} - * @param settings the nodes settings object - * @param clusterAlias the configured alias of the cluster to connect to - * @param seedNodes a list of seed nodes to discover eligible nodes from - * @param transportService the local nodes transport service - * @param maxNumRemoteConnections the maximum number of connections to the remote cluster - * @param nodePredicate a predicate to filter eligible remote nodes to connect to - */ - RemoteClusterConnection(Settings settings, String clusterAlias, List seedNodes, - TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { - super(settings); - this.transportService = transportService; - this.maxNumRemoteConnections = maxNumRemoteConnections; - this.nodePredicate = nodePredicate; - this.clusterAlias = clusterAlias; - ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); - builder.setConnectTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); - builder.setHandshakeTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); - builder.addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING); // TODO make this configurable? - builder.addConnections(0, // we don't want this to be used for anything else but search - TransportRequestOptions.Type.BULK, - TransportRequestOptions.Type.STATE, - TransportRequestOptions.Type.RECOVERY); - remoteProfile = builder.build(); - connectedNodes = new ConnectedNodes(clusterAlias); - this.seedNodes = Collections.unmodifiableList(seedNodes); - this.skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE - .getConcreteSettingForNamespace(clusterAlias).get(settings); - this.connectHandler = new ConnectHandler(); - transportService.addConnectionListener(this); - } - - /** - * Updates the list of seed nodes for this cluster connection - */ - synchronized void updateSeedNodes(List seedNodes, ActionListener connectListener) { - this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); - connectHandler.connect(connectListener); - } - - /** - * Updates the skipUnavailable flag that can be dynamically set for each remote cluster - */ - void updateSkipUnavailable(boolean skipUnavailable) { - this.skipUnavailable = skipUnavailable; - } - - @Override - public void onNodeDisconnected(DiscoveryNode node) { - boolean remove = connectedNodes.remove(node); - if (remove && connectedNodes.size() < maxNumRemoteConnections) { - // try to reconnect and fill up the slot of the disconnected node - connectHandler.forceConnect(); - } - } - - /** - * Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end. - */ - public void fetchSearchShards(ClusterSearchShardsRequest searchRequest, - ActionListener listener) { - - final ActionListener searchShardsListener; - final Consumer onConnectFailure; - if (skipUnavailable) { - onConnectFailure = (exception) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY); - searchShardsListener = ActionListener.wrap(listener::onResponse, (e) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY)); - } else { - onConnectFailure = listener::onFailure; - searchShardsListener = listener; - } - // in case we have no connected nodes we try to connect and if we fail we either notify the listener or not depending on - // the skip_unavailable setting - ensureConnected(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, searchShardsListener), onConnectFailure)); - } - - /** - * Ensures that this cluster is connected. If the cluster is connected this operation - * will invoke the listener immediately. - */ - public void ensureConnected(ActionListener voidActionListener) { - if (connectedNodes.size() == 0) { - connectHandler.connect(voidActionListener); - } else { - voidActionListener.onResponse(null); - } - } - - private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, - final ActionListener listener) { - final DiscoveryNode node = connectedNodes.get(); - transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, - new TransportResponseHandler() { - - @Override - public ClusterSearchShardsResponse newInstance() { - return new ClusterSearchShardsResponse(); - } - - @Override - public void handleResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { - listener.onResponse(clusterSearchShardsResponse); - } - - @Override - public void handleException(TransportException e) { - listener.onFailure(e); - } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } - }); - } - - /** - * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function - * that returns null if the node ID is not found. - */ - void collectNodes(ActionListener> listener) { - Runnable runnable = () -> { - final ClusterStateRequest request = new ClusterStateRequest(); - request.clear(); - request.nodes(true); - request.local(true); // run this on the node that gets the request it's as good as any other - final DiscoveryNode node = connectedNodes.get(); - transportService.sendRequest(node, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, - new TransportResponseHandler() { - @Override - public ClusterStateResponse newInstance() { - return new ClusterStateResponse(); - } - - @Override - public void handleResponse(ClusterStateResponse response) { - DiscoveryNodes nodes = response.getState().nodes(); - listener.onResponse(nodes::get); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - }; - try { - // just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener - // this will cause some back pressure on the search end and eventually will cause rejections but that's fine - // we can't proceed with a search on a cluster level. - // in the future we might want to just skip the remote nodes in such a case but that can already be implemented on the - // caller end since they provide the listener. - ensureConnected(ActionListener.wrap((x) -> runnable.run(), listener::onFailure)); - } catch (Exception ex) { - listener.onFailure(ex); - } - } - - /** - * Returns a connection to the remote cluster. This connection might be a proxy connection that redirects internally to the - * given node. - */ - Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { - DiscoveryNode discoveryNode = connectedNodes.get(); - Transport.Connection connection = transportService.getConnection(discoveryNode); - return new Transport.Connection() { - @Override - public DiscoveryNode getNode() { - return remoteClusterNode; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - connection.sendRequest(requestId, TransportActionProxy.getProxyAction(action), - TransportActionProxy.wrapRequest(remoteClusterNode, request), options); - } - - @Override - public void close() throws IOException { - assert false: "proxy connections must not be closed"; - } - - @Override - public Version getVersion() { - return connection.getVersion(); - } - }; - } - - Transport.Connection getConnection() { - DiscoveryNode discoveryNode = connectedNodes.get(); - return transportService.getConnection(discoveryNode); - } - - @Override - public void close() throws IOException { - connectHandler.close(); - } - - public boolean isClosed() { - return connectHandler.isClosed(); - } - - /** - * The connect handler manages node discovery and the actual connect to the remote cluster. - * There is at most one connect job running at any time. If such a connect job is triggered - * while another job is running the provided listeners are queued and batched up until the current running job returns. - * - * The handler has a built-in queue that can hold up to 100 connect attempts and will reject requests once the queue is full. - * In a scenario when a remote cluster becomes unavailable we will queue requests up but if we can't connect quick enough - * we will just reject the connect trigger which will lead to failing searches. - */ - private class ConnectHandler implements Closeable { - private final Semaphore running = new Semaphore(1); - private final AtomicBoolean closed = new AtomicBoolean(false); - private final BlockingQueue> queue = new ArrayBlockingQueue<>(100); - private final CancellableThreads cancellableThreads = new CancellableThreads(); - - /** - * Triggers a connect round iff there are pending requests queued up and if there is no - * connect round currently running. - */ - void maybeConnect() { - connect(null); - } - - /** - * Triggers a connect round unless there is one running already. If there is a connect round running, the listener will either - * be queued or rejected and failed. - */ - void connect(ActionListener connectListener) { - connect(connectListener, false); - } - - /** - * Triggers a connect round unless there is one already running. In contrast to {@link #maybeConnect()} will this method also - * trigger a connect round if there is no listener queued up. - */ - void forceConnect() { - connect(null, true); - } - - private void connect(ActionListener connectListener, boolean forceRun) { - final boolean runConnect; - final Collection> toNotify; - synchronized (queue) { - if (connectListener != null && queue.offer(connectListener) == false) { - connectListener.onFailure(new RejectedExecutionException("connect queue is full")); - return; - } - if (forceRun == false && queue.isEmpty()) { - return; - } - runConnect = running.tryAcquire(); - if (runConnect) { - toNotify = new ArrayList<>(); - queue.drainTo(toNotify); - if (closed.get()) { - running.release(); - ActionListener.onFailure(toNotify, new AlreadyClosedException("connect handler is already closed")); - return; - } - } else { - toNotify = Collections.emptyList(); - } - } - if (runConnect) { - forkConnect(toNotify); - } - } - - private void forkConnect(final Collection> toNotify) { - ThreadPool threadPool = transportService.getThreadPool(); - ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); - executor.submit(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - synchronized (queue) { - running.release(); - } - try { - ActionListener.onFailure(toNotify, e); - } finally { - maybeConnect(); - } - } - - @Override - protected void doRun() throws Exception { - ActionListener listener = ActionListener.wrap((x) -> { - synchronized (queue) { - running.release(); - } - try { - ActionListener.onResponse(toNotify, x); - } finally { - maybeConnect(); - } - - }, (e) -> { - synchronized (queue) { - running.release(); - } - try { - ActionListener.onFailure(toNotify, e); - } finally { - maybeConnect(); - } - }); - collectRemoteNodes(seedNodes.iterator(), transportService, listener); - } - }); - - } - - void collectRemoteNodes(Iterator seedNodes, - final TransportService transportService, ActionListener listener) { - if (Thread.currentThread().isInterrupted()) { - listener.onFailure(new InterruptedException("remote connect thread got interrupted")); - } - try { - if (seedNodes.hasNext()) { - cancellableThreads.executeIO(() -> { - final DiscoveryNode seedNode = seedNodes.next(); - final DiscoveryNode handshakeNode; - Transport.Connection connection = transportService.openConnection(seedNode, - ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); - boolean success = false; - try { - try { - handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), - (c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get())); - } catch (IllegalStateException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + - "cluster name {}", connection.getNode(), remoteClusterName.get()), ex); - throw ex; - } - if (nodePredicate.test(handshakeNode) && connectedNodes.size() < maxNumRemoteConnections) { - transportService.connectToNode(handshakeNode, remoteProfile); - connectedNodes.add(handshakeNode); - } - ClusterStateRequest request = new ClusterStateRequest(); - request.clear(); - request.nodes(true); - // here we pass on the connection since we can only close it once the sendRequest returns otherwise - // due to the async nature (it will return before it's actually sent) this can cause the request to fail - // due to an already closed connection. - ThreadPool threadPool = transportService.getThreadPool(); - ThreadContext threadContext = threadPool.getThreadContext(); - TransportService.ContextRestoreResponseHandler responseHandler = new TransportService - .ContextRestoreResponseHandler<>(threadContext.newRestorableContext(false), - new SniffClusterStateResponseHandler(transportService, connection, listener, seedNodes, - cancellableThreads)); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - // we stash any context here since this is an internal execution and should not leak any - // existing context information. - threadContext.markAsSystemContext(); - transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, - responseHandler); - } - success = true; - } finally { - if (success == false) { - connection.close(); - } - } - }); - } else { - listener.onFailure(new IllegalStateException("no seed node left")); - } - } catch (CancellableThreads.ExecutionCancelledException ex) { - listener.onFailure(ex); // we got canceled - fail the listener and step out - } catch (ConnectTransportException | IOException | IllegalStateException ex) { - // ISE if we fail the handshake with an version incompatible node - if (seedNodes.hasNext()) { - logger.debug((Supplier) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); - } else { - listener.onFailure(ex); - } - } - } - - @Override - public void close() throws IOException { - try { - if (closed.compareAndSet(false, true)) { - cancellableThreads.cancel("connect handler is closed"); - running.acquire(); // acquire the semaphore to ensure all connections are closed and all thread joined - running.release(); - maybeConnect(); // now go and notify pending listeners - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - final boolean isClosed() { - return closed.get(); - } - - /* This class handles the _state response from the remote cluster when sniffing nodes to connect to */ - private class SniffClusterStateResponseHandler implements TransportResponseHandler { - - private final TransportService transportService; - private final Transport.Connection connection; - private final ActionListener listener; - private final Iterator seedNodes; - private final CancellableThreads cancellableThreads; - - SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, - ActionListener listener, Iterator seedNodes, - CancellableThreads cancellableThreads) { - this.transportService = transportService; - this.connection = connection; - this.listener = listener; - this.seedNodes = seedNodes; - this.cancellableThreads = cancellableThreads; - } - - @Override - public ClusterStateResponse newInstance() { - return new ClusterStateResponse(); - } - - @Override - public void handleResponse(ClusterStateResponse response) { - assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; - try { - if (remoteClusterName.get() == null) { - assert response.getClusterName().value() != null; - remoteClusterName.set(response.getClusterName()); - } - try (Closeable theConnection = connection) { // the connection is unused - see comment in #collectRemoteNodes - // we have to close this connection before we notify listeners - this is mainly needed for test correctness - // since if we do it afterwards we might fail assertions that check if all high level connections are closed. - // from a code correctness perspective we could also close it afterwards. This try/with block will - // maintain the possibly exceptions thrown from within the try block and suppress the ones that are possible thrown - // by closing the connection - cancellableThreads.executeIO(() -> { - DiscoveryNodes nodes = response.getState().nodes(); - Iterable nodesIter = nodes.getNodes()::valuesIt; - for (DiscoveryNode node : nodesIter) { - if (nodePredicate.test(node) && connectedNodes.size() < maxNumRemoteConnections) { - try { - transportService.connectToNode(node, remoteProfile); // noop if node is connected - connectedNodes.add(node); - } catch (ConnectTransportException | IllegalStateException ex) { - // ISE if we fail the handshake with an version incompatible node - // fair enough we can't connect just move on - logger.debug((Supplier) - () -> new ParameterizedMessage("failed to connect to node {}", node), ex); - } - } - } - }); - } - listener.onResponse(null); - } catch (CancellableThreads.ExecutionCancelledException ex) { - listener.onFailure(ex); // we got canceled - fail the listener and step out - } catch (Exception ex) { - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, listener); - } - } - - @Override - public void handleException(TransportException exp) { - assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), - exp); - try { - IOUtils.closeWhileHandlingException(connection); - } finally { - // once the connection is closed lets try the next node - collectRemoteNodes(seedNodes, transportService, listener); - } - } - - @Override - public String executor() { - return ThreadPool.Names.MANAGEMENT; - } - } - } - - boolean assertNoRunningConnections() { // for testing only - assert connectHandler.running.availablePermits() == 1; - return true; - } - - boolean isNodeConnected(final DiscoveryNode node) { - return connectedNodes.contains(node); - } - - DiscoveryNode getConnectedNode() { - return connectedNodes.get(); - } - - void addConnectedNode(DiscoveryNode node) { - connectedNodes.add(node); - } - - /** - * Fetches connection info for this connection - */ - public void getConnectionInfo(ActionListener listener) { - final Optional anyNode = connectedNodes.getAny(); - if (anyNode.isPresent() == false) { - // not connected we return immediately - RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias, - Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0, - RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); - listener.onResponse(remoteConnectionStats); - } else { - NodesInfoRequest request = new NodesInfoRequest(); - request.clear(); - request.http(true); - - transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler() { - @Override - public NodesInfoResponse newInstance() { - return new NodesInfoResponse(); - } - - @Override - public void handleResponse(NodesInfoResponse response) { - Collection httpAddresses = new HashSet<>(); - for (NodeInfo info : response.getNodes()) { - if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) { - httpAddresses.add(info.getHttp().getAddress().publishAddress()); - } - } - - if (httpAddresses.size() < maxNumRemoteConnections) { - // just in case non of the connected nodes have http enabled we get other http enabled nodes instead. - for (NodeInfo info : response.getNodes()) { - if (nodePredicate.test(info.getNode()) && info.getHttp() != null) { - httpAddresses.add(info.getHttp().getAddress().publishAddress()); - } - if (httpAddresses.size() == maxNumRemoteConnections) { - break; // once we have enough return... - } - } - } - RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias, - seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()), new ArrayList<>(httpAddresses), - maxNumRemoteConnections, connectedNodes.size(), - RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); - listener.onResponse(remoteConnectionInfo); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - - } - - int getNumNodesConnected() { - return connectedNodes.size(); - } - - private static class ConnectedNodes implements Supplier { - - private final Set nodeSet = new HashSet<>(); - private final String clusterAlias; - - private Iterator currentIterator = null; - - private ConnectedNodes(String clusterAlias) { - this.clusterAlias = clusterAlias; - } - - @Override - public synchronized DiscoveryNode get() { - ensureIteratorAvailable(); - if (currentIterator.hasNext()) { - return currentIterator.next(); - } else { - throw new IllegalStateException("No node available for cluster: " + clusterAlias); - } - } - - synchronized boolean remove(DiscoveryNode node) { - final boolean setRemoval = nodeSet.remove(node); - if (setRemoval) { - currentIterator = null; - } - return setRemoval; - } - - synchronized boolean add(DiscoveryNode node) { - final boolean added = nodeSet.add(node); - if (added) { - currentIterator = null; - } - return added; - } - - synchronized int size() { - return nodeSet.size(); - } - - synchronized boolean contains(DiscoveryNode node) { - return nodeSet.contains(node); - } - - synchronized Optional getAny() { - ensureIteratorAvailable(); - if (currentIterator.hasNext()) { - return Optional.of(currentIterator.next()); - } else { - return Optional.empty(); - } - } - - private synchronized void ensureIteratorAvailable() { - if (currentIterator == null) { - currentIterator = nodeSet.iterator(); - } else if (currentIterator.hasNext() == false && nodeSet.isEmpty() == false) { - // iterator rollover - currentIterator = nodeSet.iterator(); - } - } - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterService.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterService.java deleted file mode 100644 index aaa0c41..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteClusterService.java +++ /dev/null @@ -1,385 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.action.support.GroupedActionListener; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.CountDown; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.transport.TransportException; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiFunction; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -import static org.elasticsearch.common.settings.Setting.boolSetting; - -/** - * Basic service for accessing remote clusters via gateway nodes - */ -public final class RemoteClusterService extends RemoteClusterAware implements Closeable { - - /** - * The maximum number of connections that will be established to a remote cluster. For instance if there is only a single - * seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3. - */ - public static final Setting REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting("search.remote.connections_per_cluster", - 3, 1, Setting.Property.NodeScope); - - /** - * The initial connect timeout for remote cluster connections - */ - public static final Setting REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING = - Setting.positiveTimeSetting("search.remote.initial_connect_timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); - - /** - * The name of a node attribute to select nodes that should be connected to in the remote cluster. - * For instance a node can be configured with node.attr.gateway: true in order to be eligible as a gateway node between - * clusters. In that case search.remote.node.attr: gateway can be used to filter out other nodes in the remote cluster. - * The value of the setting is expected to be a boolean, true for nodes that can become gateways, false otherwise. - */ - public static final Setting REMOTE_NODE_ATTRIBUTE = Setting.simpleString("search.remote.node.attr", - Setting.Property.NodeScope); - - /** - * If true connecting to remote clusters is supported on this node. If false this node will not establish - * connections to any remote clusters configured. Search requests executed against this node (where this node is the coordinating node) - * will fail if remote cluster syntax is used as an index pattern. The default is true - */ - public static final Setting ENABLE_REMOTE_CLUSTERS = Setting.boolSetting("search.remote.connect", true, - Setting.Property.NodeScope); - - public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = - Setting.affixKeySetting("search.remote.", "skip_unavailable", - key -> boolSetting(key, false, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS); - - private final TransportService transportService; - private final int numRemoteConnections; - private volatile Map remoteClusters = Collections.emptyMap(); - - RemoteClusterService(Settings settings, TransportService transportService) { - super(settings); - this.transportService = transportService; - numRemoteConnections = REMOTE_CONNECTIONS_PER_CLUSTER.get(settings); - } - - /** - * This method updates the list of remote clusters. It's intended to be used as an update consumer on the settings infrastructure - * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes - * @param connectionListener a listener invoked once every configured cluster has been connected to - */ - private synchronized void updateRemoteClusters(Map> seeds, ActionListener connectionListener) { - if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) { - throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); - } - Map remoteClusters = new HashMap<>(); - if (seeds.isEmpty()) { - connectionListener.onResponse(null); - } else { - CountDown countDown = new CountDown(seeds.size()); - Predicate nodePredicate = (node) -> Version.CURRENT.isCompatible(node.getVersion()); - if (REMOTE_NODE_ATTRIBUTE.exists(settings)) { - // nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for - // cross cluster search - String attribute = REMOTE_NODE_ATTRIBUTE.get(settings); - nodePredicate = nodePredicate.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false"))); - } - remoteClusters.putAll(this.remoteClusters); - for (Map.Entry> entry : seeds.entrySet()) { - RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); - if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection - try { - IOUtils.close(remote); - } catch (IOException e) { - logger.warn("failed to close remote cluster connections for cluster: " + entry.getKey(), e); - } - remoteClusters.remove(entry.getKey()); - continue; - } - - if (remote == null) { // this is a new cluster we have to add a new representation - remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections, - nodePredicate); - remoteClusters.put(entry.getKey(), remote); - } - - // now update the seed nodes no matter if it's new or already existing - RemoteClusterConnection finalRemote = remote; - remote.updateSeedNodes(entry.getValue(), ActionListener.wrap( - response -> { - if (countDown.countDown()) { - connectionListener.onResponse(response); - } - }, - exception -> { - if (countDown.fastForward()) { - connectionListener.onFailure(exception); - } - if (finalRemote.isClosed() == false) { - logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception); - } - })); - } - } - this.remoteClusters = Collections.unmodifiableMap(remoteClusters); - } - - /** - * Returns true if at least one remote cluster is configured - */ - public boolean isCrossClusterSearchEnabled() { - return remoteClusters.isEmpty() == false; - } - - boolean isRemoteNodeConnected(final String remoteCluster, final DiscoveryNode node) { - return remoteClusters.get(remoteCluster).isNodeConnected(node); - } - - public Map groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate indexExists) { - Map originalIndicesMap = new HashMap<>(); - if (isCrossClusterSearchEnabled()) { - final Map> groupedIndices = groupClusterIndices(indices, indexExists); - for (Map.Entry> entry : groupedIndices.entrySet()) { - String clusterAlias = entry.getKey(); - List originalIndices = entry.getValue(); - originalIndicesMap.put(clusterAlias, - new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions)); - } - if (originalIndicesMap.containsKey(LOCAL_CLUSTER_GROUP_KEY) == false) { - originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions)); - } - } else { - originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(indices, indicesOptions)); - } - return originalIndicesMap; - } - - /** - * Returns true iff the given cluster is configured as a remote cluster. Otherwise false - */ - boolean isRemoteClusterRegistered(String clusterName) { - return remoteClusters.containsKey(clusterName); - } - - public void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, - Map remoteIndicesByCluster, - ActionListener> listener) { - final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); - final Map searchShardsResponses = new ConcurrentHashMap<>(); - final AtomicReference transportException = new AtomicReference<>(); - for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { - final String clusterName = entry.getKey(); - RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterName); - if (remoteClusterConnection == null) { - throw new IllegalArgumentException("no such remote cluster: " + clusterName); - } - final String[] indices = entry.getValue().indices(); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) - .indicesOptions(indicesOptions).local(true).preference(preference) - .routing(routing); - remoteClusterConnection.fetchSearchShards(searchShardsRequest, - new ActionListener() { - @Override - public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { - searchShardsResponses.put(clusterName, clusterSearchShardsResponse); - if (responsesCountDown.countDown()) { - TransportException exception = transportException.get(); - if (exception == null) { - listener.onResponse(searchShardsResponses); - } else { - listener.onFailure(transportException.get()); - } - } - } - - @Override - public void onFailure(Exception e) { - TransportException exception = new TransportException("unable to communicate with remote cluster [" + - clusterName + "]", e); - if (transportException.compareAndSet(null, exception) == false) { - exception = transportException.accumulateAndGet(exception, (previous, current) -> { - current.addSuppressed(previous); - return current; - }); - } - if (responsesCountDown.countDown()) { - listener.onFailure(exception); - } - } - }); - } - } - - /** - * Returns a connection to the given node on the given remote cluster - * @throws IllegalArgumentException if the remote cluster is unknown - */ - public Transport.Connection getConnection(DiscoveryNode node, String cluster) { - RemoteClusterConnection connection = remoteClusters.get(cluster); - if (connection == null) { - throw new IllegalArgumentException("no such remote cluster: " + cluster); - } - return connection.getConnection(node); - } - - /** - * Ensures that the given cluster alias is connected. If the cluster is connected this operation - * will invoke the listener immediately. - */ - public void ensureConnected(String clusterAlias, ActionListener listener) { - RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterAlias); - if (remoteClusterConnection == null) { - throw new IllegalArgumentException("no such remote cluster: " + clusterAlias); - } - remoteClusterConnection.ensureConnected(listener); - } - - public Transport.Connection getConnection(String cluster) { - RemoteClusterConnection connection = remoteClusters.get(cluster); - if (connection == null) { - throw new IllegalArgumentException("no such remote cluster: " + cluster); - } - return connection.getConnection(); - } - - @Override - protected Set getRemoteClusterNames() { - return this.remoteClusters.keySet(); - } - - @Override - public void listenForUpdates(ClusterSettings clusterSettings) { - super.listenForUpdates(clusterSettings); - clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, - (clusterAlias, value) -> {}); - } - - synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { - RemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); - if (remote != null) { - remote.updateSkipUnavailable(skipUnavailable); - } - } - - protected void updateRemoteCluster(String clusterAlias, List addresses) { - updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {})); - } - - void updateRemoteCluster( - final String clusterAlias, - final List addresses, - final ActionListener connectionListener) { - final List nodes = addresses.stream().map(address -> { - final TransportAddress transportAddress = new TransportAddress(address); - final String id = clusterAlias + "#" + transportAddress.toString(); - final Version version = Version.CURRENT.minimumCompatibilityVersion(); - return new DiscoveryNode(id, transportAddress, version); - }).collect(Collectors.toList()); - updateRemoteClusters(Collections.singletonMap(clusterAlias, nodes), connectionListener); - } - - /** - * Connects to all remote clusters in a blocking fashion. This should be called on node startup to establish an initial connection - * to all configured seed nodes. - */ - void initializeRemoteClusters() { - final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); - final PlainActionFuture future = new PlainActionFuture<>(); - Map> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); - updateRemoteClusters(seeds, future); - try { - future.get(timeValue.millis(), TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (TimeoutException ex) { - logger.warn("failed to connect to remote clusters within {}", timeValue.toString()); - } catch (Exception e) { - throw new IllegalStateException("failed to connect to remote clusters", e); - } - } - - @Override - public void close() throws IOException { - IOUtils.close(remoteClusters.values()); - } - - public void getRemoteConnectionInfos(ActionListener> listener) { - final Map remoteClusters = this.remoteClusters; - if (remoteClusters.isEmpty()) { - listener.onResponse(Collections.emptyList()); - } else { - final GroupedActionListener actionListener = new GroupedActionListener<>(listener, - remoteClusters.size(), Collections.emptyList()); - for (RemoteClusterConnection connection : remoteClusters.values()) { - connection.getConnectionInfo(actionListener); - } - } - } - - /** - * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link DiscoveryNode} - * function on success. - */ - public void collectNodes(Set clusters, ActionListener> listener) { - Map remoteClusters = this.remoteClusters; - for (String cluster : clusters) { - if (remoteClusters.containsKey(cluster) == false) { - listener.onFailure(new IllegalArgumentException("no such remote cluster: [" + cluster + "]")); - return; - } - } - - final Map> clusterMap = new HashMap<>(); - CountDown countDown = new CountDown(clusters.size()); - Function nullFunction = s -> null; - for (final String cluster : clusters) { - RemoteClusterConnection connection = remoteClusters.get(cluster); - connection.collectNodes(new ActionListener>() { - @Override - public void onResponse(Function nodeLookup) { - synchronized (clusterMap) { - clusterMap.put(cluster, nodeLookup); - } - if (countDown.countDown()) { - listener.onResponse((clusterAlias, nodeId) - -> clusterMap.getOrDefault(clusterAlias, nullFunction).apply(nodeId)); - } - } - - @Override - public void onFailure(Exception e) { - if (countDown.fastForward()) { // we need to check if it's true since we could have multiple failures - listener.onFailure(e); - } - } - }); - } - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteConnectionInfo.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteConnectionInfo.java deleted file mode 100644 index 35c9759..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/RemoteConnectionInfo.java +++ /dev/null @@ -1,112 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -/** - * This class encapsulates all remote cluster information to be rendered on - * _remote/info requests. - */ -public final class RemoteConnectionInfo implements ToXContentFragment, Writeable { - final List seedNodes; - final List httpAddresses; - final int connectionsPerCluster; - final TimeValue initialConnectionTimeout; - final int numNodesConnected; - final String clusterAlias; - final boolean skipUnavailable; - - RemoteConnectionInfo(String clusterAlias, List seedNodes, - List httpAddresses, - int connectionsPerCluster, int numNodesConnected, - TimeValue initialConnectionTimeout, boolean skipUnavailable) { - this.clusterAlias = clusterAlias; - this.seedNodes = seedNodes; - this.httpAddresses = httpAddresses; - this.connectionsPerCluster = connectionsPerCluster; - this.numNodesConnected = numNodesConnected; - this.initialConnectionTimeout = initialConnectionTimeout; - this.skipUnavailable = skipUnavailable; - } - - public RemoteConnectionInfo(StreamInput input) throws IOException { - seedNodes = input.readList(TransportAddress::new); - httpAddresses = input.readList(TransportAddress::new); - connectionsPerCluster = input.readVInt(); - initialConnectionTimeout = input.readTimeValue(); - numNodesConnected = input.readVInt(); - clusterAlias = input.readString(); - if (input.getVersion().onOrAfter(Version.V_6_1_0)) { - skipUnavailable = input.readBoolean(); - } else { - skipUnavailable = false; - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(clusterAlias); - { - builder.startArray("seeds"); - for (TransportAddress addr : seedNodes) { - builder.value(addr.toString()); - } - builder.endArray(); - builder.startArray("http_addresses"); - for (TransportAddress addr : httpAddresses) { - builder.value(addr.toString()); - } - builder.endArray(); - builder.field("connected", numNodesConnected > 0); - builder.field("num_nodes_connected", numNodesConnected); - builder.field("max_connections_per_cluster", connectionsPerCluster); - builder.field("initial_connect_timeout", initialConnectionTimeout); - builder.field("skip_unavailable", skipUnavailable); - } - builder.endObject(); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeList(seedNodes); - out.writeList(httpAddresses); - out.writeVInt(connectionsPerCluster); - out.writeTimeValue(initialConnectionTimeout); - out.writeVInt(numNodesConnected); - out.writeString(clusterAlias); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(skipUnavailable); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - RemoteConnectionInfo that = (RemoteConnectionInfo) o; - return connectionsPerCluster == that.connectionsPerCluster && - numNodesConnected == that.numNodesConnected && - Objects.equals(seedNodes, that.seedNodes) && - Objects.equals(httpAddresses, that.httpAddresses) && - Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) && - Objects.equals(clusterAlias, that.clusterAlias) && - skipUnavailable == that.skipUnavailable; - } - - @Override - public int hashCode() { - return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout, - numNodesConnected, clusterAlias, skipUnavailable); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransport.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransport.java deleted file mode 100644 index 0a237e7..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransport.java +++ /dev/null @@ -1,1808 +0,0 @@ - -package org.xbib.elasticsearch.client.transport; - -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.NotifyOnceListener; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.CheckedBiConsumer; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.compress.Compressor; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.compress.NotCompressedException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.network.NetworkUtils; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.PortsRange; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.KeyedLock; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.BindTransportException; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.NodeNotConnectedException; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.ResponseHandlerFailureTransportException; -import org.elasticsearch.transport.TcpChannel; -import org.elasticsearch.transport.TcpHeader; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportMessage; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportSerializationException; -import org.elasticsearch.transport.TransportStats; -import org.elasticsearch.transport.Transports; - -import java.io.Closeable; -import java.io.IOException; -import java.io.StreamCorruptedException; -import java.io.UncheckedIOException; -import java.net.BindException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.nio.channels.CancelledKeyException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import static java.util.Collections.emptyList; -import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.common.settings.Setting.affixKeySetting; -import static org.elasticsearch.common.settings.Setting.boolSetting; -import static org.elasticsearch.common.settings.Setting.intSetting; -import static org.elasticsearch.common.settings.Setting.listSetting; -import static org.elasticsearch.common.settings.Setting.timeSetting; -import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; -import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; - -public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { - - public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker"; - public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; - - public static final Setting> HOST = - listSetting("transport.host", emptyList(), Function.identity(), Setting.Property.NodeScope); - public static final Setting> BIND_HOST = - listSetting("transport.bind_host", HOST, Function.identity(), Setting.Property.NodeScope); - public static final Setting> PUBLISH_HOST = - listSetting("transport.publish_host", HOST, Function.identity(), Setting.Property.NodeScope); - public static final Setting PORT = - new Setting<>("transport.tcp.port", "9300-9400", Function.identity(), Setting.Property.NodeScope); - public static final Setting PUBLISH_PORT = - intSetting("transport.publish_port", -1, -1, Setting.Property.NodeScope); - public static final String DEFAULT_PROFILE = "default"; - // the scheduled internal ping interval setting, defaults to disabled (-1) - public static final Setting PING_SCHEDULE = - timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_RECOVERY = - intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_BULK = - intSetting("transport.connections_per_node.bulk", 3, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_REG = - intSetting("transport.connections_per_node.reg", 6, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_STATE = - intSetting("transport.connections_per_node.state", 1, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_PING = - intSetting("transport.connections_per_node.ping", 1, 1, Setting.Property.NodeScope); - public static final Setting TCP_CONNECT_TIMEOUT = - timeSetting("transport.tcp.connect_timeout", NetworkService.TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope); - public static final Setting TCP_NO_DELAY = - boolSetting("transport.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope); - public static final Setting TCP_KEEP_ALIVE = - boolSetting("transport.tcp.keep_alive", NetworkService.TCP_KEEP_ALIVE, Setting.Property.NodeScope); - public static final Setting TCP_REUSE_ADDRESS = - boolSetting("transport.tcp.reuse_address", NetworkService.TCP_REUSE_ADDRESS, Setting.Property.NodeScope); - public static final Setting TCP_SEND_BUFFER_SIZE = - Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TCP_SEND_BUFFER_SIZE, - Setting.Property.NodeScope); - public static final Setting TCP_RECEIVE_BUFFER_SIZE = - Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TCP_RECEIVE_BUFFER_SIZE, - Setting.Property.NodeScope); - - - public static final Setting.AffixSetting TCP_NO_DELAY_PROFILE = affixKeySetting("transport.profiles.", "tcp_no_delay", - key -> boolSetting(key, TcpTransport.TCP_NO_DELAY, Setting.Property.NodeScope)); - public static final Setting.AffixSetting TCP_KEEP_ALIVE_PROFILE = affixKeySetting("transport.profiles.", "tcp_keep_alive", - key -> boolSetting(key, TcpTransport.TCP_KEEP_ALIVE, Setting.Property.NodeScope)); - public static final Setting.AffixSetting TCP_REUSE_ADDRESS_PROFILE = affixKeySetting("transport.profiles.", "reuse_address", - key -> boolSetting(key, TcpTransport.TCP_REUSE_ADDRESS, Setting.Property.NodeScope)); - public static final Setting.AffixSetting TCP_SEND_BUFFER_SIZE_PROFILE = affixKeySetting("transport.profiles.", - "send_buffer_size", key -> Setting.byteSizeSetting(key, TcpTransport.TCP_SEND_BUFFER_SIZE, Setting.Property.NodeScope)); - public static final Setting.AffixSetting TCP_RECEIVE_BUFFER_SIZE_PROFILE = affixKeySetting("transport.profiles.", - "receive_buffer_size", key -> Setting.byteSizeSetting(key, TcpTransport.TCP_RECEIVE_BUFFER_SIZE, Setting.Property.NodeScope)); - - public static final Setting.AffixSetting> BIND_HOST_PROFILE = affixKeySetting("transport.profiles.", "bind_host", - key -> listSetting(key, BIND_HOST, Function.identity(), Setting.Property.NodeScope)); - public static final Setting.AffixSetting> PUBLISH_HOST_PROFILE = affixKeySetting("transport.profiles.", "publish_host", - key -> listSetting(key, PUBLISH_HOST, Function.identity(), Setting.Property.NodeScope)); - public static final Setting.AffixSetting PORT_PROFILE = affixKeySetting("transport.profiles.", "port", - key -> new Setting<>(key, PORT, Function.identity(), Setting.Property.NodeScope)); - public static final Setting.AffixSetting PUBLISH_PORT_PROFILE = affixKeySetting("transport.profiles.", "publish_port", - key -> intSetting(key, -1, -1, Setting.Property.NodeScope)); - - private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); - public static final int PING_DATA_SIZE = -1; - private final CircuitBreakerService circuitBreakerService; - // package visibility for tests - protected final ScheduledPing scheduledPing; - private final TimeValue pingSchedule; - protected final ThreadPool threadPool; - private final BigArrays bigArrays; - protected final NetworkService networkService; - protected final Set profileSettings; - - private volatile TransportService transportService; - - private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); - // node id to actual channel - private final ConcurrentMap connectedNodes = newConcurrentMap(); - private final Map> serverChannels = newConcurrentMap(); - private final Set acceptedChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); - - private final KeyedLock connectionLock = new KeyedLock<>(); - private final NamedWriteableRegistry namedWriteableRegistry; - - // this lock is here to make sure we close this transport and disconnect all the client nodes - // connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?) - private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); - protected final boolean compress; - private volatile BoundTransportAddress boundAddress; - private final String transportName; - protected final ConnectionProfile defaultConnectionProfile; - - private final ConcurrentMap pendingHandshakes = new ConcurrentHashMap<>(); - private final AtomicLong requestIdGenerator = new AtomicLong(); - private final CounterMetric numHandshakes = new CounterMetric(); - private static final String HANDSHAKE_ACTION_NAME = "internal:tcp/handshake"; - - private final MeanMetric readBytesMetric = new MeanMetric(); - private final MeanMetric transmittedBytesMetric = new MeanMetric(); - - public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, - CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService) { - super(settings); - this.profileSettings = getProfileSettings(settings); - this.threadPool = threadPool; - this.bigArrays = bigArrays; - this.circuitBreakerService = circuitBreakerService; - this.scheduledPing = new ScheduledPing(); - this.pingSchedule = PING_SCHEDULE.get(settings); - this.namedWriteableRegistry = namedWriteableRegistry; - this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); - this.networkService = networkService; - this.transportName = transportName; - defaultConnectionProfile = buildDefaultConnectionProfile(settings); - } - - static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { - int connectionsPerNodeRecovery = CONNECTIONS_PER_NODE_RECOVERY.get(settings); - int connectionsPerNodeBulk = CONNECTIONS_PER_NODE_BULK.get(settings); - int connectionsPerNodeReg = CONNECTIONS_PER_NODE_REG.get(settings); - int connectionsPerNodeState = CONNECTIONS_PER_NODE_STATE.get(settings); - int connectionsPerNodePing = CONNECTIONS_PER_NODE_PING.get(settings); - ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); - builder.setConnectTimeout(TCP_CONNECT_TIMEOUT.get(settings)); - builder.setHandshakeTimeout(TCP_CONNECT_TIMEOUT.get(settings)); - builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); - builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); - // if we are not master eligible we don't need a dedicated channel to publish the state - builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); - // if we are not a data-node we don't need any dedicated channels for recovery - builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); - builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); - return builder.build(); - } - - @Override - protected void doStart() { - if (pingSchedule.millis() > 0) { - threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing); - } - } - - @Override - public CircuitBreaker getInFlightRequestBreaker() { - // We always obtain a fresh breaker to reflect changes to the breaker configuration. - return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); - } - - @Override - public void setTransportService(TransportService service) { - if (service.getRequestHandler(HANDSHAKE_ACTION_NAME) != null) { - throw new IllegalStateException(HANDSHAKE_ACTION_NAME + " is a reserved request handler and must not be registered"); - } - this.transportService = service; - } - - private static class HandshakeResponseHandler implements TransportResponseHandler { - final AtomicReference versionRef = new AtomicReference<>(); - final CountDownLatch latch = new CountDownLatch(1); - final AtomicReference exceptionRef = new AtomicReference<>(); - final TcpChannel channel; - - HandshakeResponseHandler(TcpChannel channel) { - this.channel = channel; - } - - @Override - public VersionHandshakeResponse newInstance() { - return new VersionHandshakeResponse(); - } - - @Override - public void handleResponse(VersionHandshakeResponse response) { - final boolean success = versionRef.compareAndSet(null, response.version); - latch.countDown(); - assert success; - } - - @Override - public void handleException(TransportException exp) { - final boolean success = exceptionRef.compareAndSet(null, exp); - latch.countDown(); - assert success; - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - } - - public class ScheduledPing extends AbstractLifecycleRunnable { - - /** - * The magic number (must be lower than 0) for a ping message. This is handled - * specifically in {@link TcpTransport#validateMessageHeader}. - */ - private final BytesReference pingHeader; - final CounterMetric successfulPings = new CounterMetric(); - final CounterMetric failedPings = new CounterMetric(); - - public ScheduledPing() { - super(lifecycle, logger); - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.writeByte((byte) 'E'); - out.writeByte((byte) 'S'); - out.writeInt(PING_DATA_SIZE); - pingHeader = out.bytes(); - } catch (IOException e) { - throw new IllegalStateException(e.getMessage(), e); // won't happen - } - } - - @Override - protected void doRunInLifecycle() throws Exception { - for (Map.Entry entry : connectedNodes.entrySet()) { - DiscoveryNode node = entry.getKey(); - NodeChannels channels = entry.getValue(); - for (TcpChannel channel : channels.getChannels()) { - internalSendMessage(channel, pingHeader, new SendMetricListener(pingHeader.length()) { - @Override - protected void innerInnerOnResponse(Void v) { - successfulPings.inc(); - } - - @Override - protected void innerOnFailure(Exception e) { - if (channel.isOpen()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); - failedPings.inc(); - } else { - logger.trace((Supplier) () -> - new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); - } - - } - }); - } - } - } - - public long getSuccessfulPings() { - return successfulPings.count(); - } - - public long getFailedPings() { - return failedPings.count(); - } - - @Override - protected void onAfterInLifecycle() { - try { - threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); - } catch (EsRejectedExecutionException ex) { - if (ex.isExecutorShutdown()) { - logger.debug("couldn't schedule new ping execution, executor is shutting down", ex); - } else { - throw ex; - } - } - } - - @Override - public void onFailure(Exception e) { - if (lifecycle.stoppedOrClosed()) { - logger.trace("failed to send ping transport message", e); - } else { - logger.warn("failed to send ping transport message", e); - } - } - } - - public final class NodeChannels implements Connection { - private final Map typeMapping; - private final List channels; - private final DiscoveryNode node; - private final AtomicBoolean closed = new AtomicBoolean(false); - private final Version version; - - NodeChannels(DiscoveryNode node, List channels, ConnectionProfile connectionProfile, Version handshakeVersion) { - this.node = node; - this.channels = Collections.unmodifiableList(channels); - assert channels.size() == connectionProfile.getNumConnections() : "expected channels size to be == " - + connectionProfile.getNumConnections() + " but was: [" + channels.size() + "]"; - typeMapping = new EnumMap<>(TransportRequestOptions.Type.class); - for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile.getHandles()) { - for (TransportRequestOptions.Type type : handle.getTypes()) - typeMapping.put(type, handle); - } - version = handshakeVersion; - } - - @Override - public Version getVersion() { - return version; - } - - public List getChannels() { - return channels; - } - - public TcpChannel channel(TransportRequestOptions.Type type) { - ConnectionProfile.ConnectionTypeHandle connectionTypeHandle = typeMapping.get(type); - if (connectionTypeHandle == null) { - throw new IllegalArgumentException("no type channel for [" + type + "]"); - } - return connectionTypeHandle.getChannel(channels); - } - - public boolean allChannelsOpen() { - return channels.stream().allMatch(TcpChannel::isOpen); - } - - @Override - public void close() { - if (closed.compareAndSet(false, true)) { - try { - if (lifecycle.stopped()) { - /* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't - * have a gazillion connections sitting in TIME_WAIT to free up resources quickly. - * This is really the only part where we close the connection from the server side - * otherwise the client (node) initiates the TCP closing sequence which doesn't cause - * these issues. Setting this by default from the beginning can have unexpected - * side-effects an should be avoided, our protocol is designed in a way that clients - * close connection which is how it should be*/ - - channels.forEach(c -> { - try { - c.setSoLinger(0); - } catch (IOException e) { - logger.warn(new ParameterizedMessage("unexpected exception when setting SO_LINGER on channel {}", c), e); - } - }); - } - - boolean block = lifecycle.stopped() && Transports.isTransportThread(Thread.currentThread()) == false; - TcpChannel.closeChannels(channels, block); - } finally { - transportService.onConnectionClosed(this); - } - } - } - - @Override - public DiscoveryNode getNode() { - return this.node; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - if (closed.get()) { - throw new NodeNotConnectedException(node, "connection already closed"); - } - TcpChannel channel = channel(options.type()); - sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), (byte) 0); - } - - boolean isClosed() { - return closed.get(); - } - } - - @Override - public boolean nodeConnected(DiscoveryNode node) { - return connectedNodes.containsKey(node); - } - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - connectionProfile = resolveConnectionProfile(connectionProfile); - if (node == null) { - throw new ConnectTransportException(null, "can't connect to a null node"); - } - closeLock.readLock().lock(); // ensure we don't open connections while we are closing - try { - ensureOpen(); - try (Releasable ignored = connectionLock.acquire(node.getId())) { - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels != null) { - return; - } - boolean success = false; - try { - nodeChannels = openConnection(node, connectionProfile); - connectionValidator.accept(nodeChannels, connectionProfile); - // we acquire a connection lock, so no way there is an existing connection - connectedNodes.put(node, nodeChannels); - if (logger.isDebugEnabled()) { - logger.debug("connected to node [{}]", node); - } - try { - transportService.onNodeConnected(node); - } finally { - if (nodeChannels.isClosed()) { - // we got closed concurrently due to a disconnect or some other event on the channel. - // the close callback will close the NodeChannel instance first and then try to remove - // the connection from the connected nodes. It will NOT acquire the connectionLock for - // the node to prevent any blocking calls on network threads. Yet, we still establish a happens - // before relationship to the connectedNodes.put since we check if we can remove the - // (DiscoveryNode, NodeChannels) tuple from the map after we closed. Here we check if it's closed an if so we - // try to remove it first either way one of the two wins even if the callback has run before we even added the - // tuple to the map since in that case we remove it here again - if (connectedNodes.remove(node, nodeChannels)) { - transportService.onNodeDisconnected(node); - } - throw new NodeNotConnectedException(node, "connection concurrently closed"); - } - } - success = true; - } catch (ConnectTransportException e) { - throw e; - } catch (Exception e) { - throw new ConnectTransportException(node, "general node connection failure", e); - } finally { - if (success == false) { // close the connection if there is a failure - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "failed to connect to [{}], cleaning dangling connections", node)); - IOUtils.closeWhileHandlingException(nodeChannels); - } - } - } - } finally { - closeLock.readLock().unlock(); - } - } - - /** - * takes a {@link ConnectionProfile} that have been passed as a parameter to the public methods - * and resolves it to a fully specified (i.e., no nulls) profile - */ - static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionProfile connectionProfile, - ConnectionProfile defaultConnectionProfile) { - Objects.requireNonNull(defaultConnectionProfile); - if (connectionProfile == null) { - return defaultConnectionProfile; - } else if (connectionProfile.getConnectTimeout() != null && connectionProfile.getHandshakeTimeout() != null) { - return connectionProfile; - } else { - ConnectionProfile.Builder builder = new ConnectionProfile.Builder(connectionProfile); - if (connectionProfile.getConnectTimeout() == null) { - builder.setConnectTimeout(defaultConnectionProfile.getConnectTimeout()); - } - if (connectionProfile.getHandshakeTimeout() == null) { - builder.setHandshakeTimeout(defaultConnectionProfile.getHandshakeTimeout()); - } - return builder.build(); - } - } - - protected ConnectionProfile resolveConnectionProfile(ConnectionProfile connectionProfile) { - return resolveConnectionProfile(connectionProfile, defaultConnectionProfile); - } - - @Override - public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { - if (node == null) { - throw new ConnectTransportException(null, "can't open connection to a null node"); - } - boolean success = false; - NodeChannels nodeChannels = null; - connectionProfile = resolveConnectionProfile(connectionProfile); - closeLock.readLock().lock(); // ensure we don't open connections while we are closing - try { - ensureOpen(); - try { - int numConnections = connectionProfile.getNumConnections(); - assert numConnections > 0 : "A connection profile must be configured with at least one connection"; - List channels = new ArrayList<>(numConnections); - List> connectionFutures = new ArrayList<>(numConnections); - for (int i = 0; i < numConnections; ++i) { - try { - PlainActionFuture connectFuture = PlainActionFuture.newFuture(); - connectionFutures.add(connectFuture); - TcpChannel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture); - logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel)); - channels.add(channel); - } catch (Exception e) { - // If there was an exception when attempting to instantiate the raw channels, we close all of the channels - TcpChannel.closeChannels(channels, false); - throw e; - } - } - - // If we make it past the block above, we successfully instantiated all of the channels - try { - TcpChannel.awaitConnected(node, connectionFutures, connectionProfile.getConnectTimeout()); - } catch (Exception ex) { - TcpChannel.closeChannels(channels, false); - throw ex; - } - - // If we make it past the block above, we have successfully established connections for all of the channels - final TcpChannel handshakeChannel = channels.get(0); // one channel is guaranteed by the connection profile - handshakeChannel.addCloseListener(ActionListener.wrap(() -> cancelHandshakeForChannel(handshakeChannel))); - Version version; - try { - version = executeHandshake(node, handshakeChannel, connectionProfile.getHandshakeTimeout()); - } catch (Exception ex) { - TcpChannel.closeChannels(channels, false); - throw ex; - } - - // If we make it past the block above, we have successfully completed the handshake and the connection is now open. - // At this point we should construct the connection, notify the transport service, and attach close listeners to the - // underlying channels. - nodeChannels = new NodeChannels(node, channels, connectionProfile, version); - transportService.onConnectionOpened(nodeChannels); - final NodeChannels finalNodeChannels = nodeChannels; - final AtomicBoolean runOnce = new AtomicBoolean(false); - Consumer onClose = c -> { - assert c.isOpen() == false : "channel is still open when onClose is called"; - // we only need to disconnect from the nodes once since all other channels - // will also try to run this we protect it from running multiple times. - if (runOnce.compareAndSet(false, true)) { - disconnectFromNodeCloseAndNotify(node, finalNodeChannels); - } - }; - - nodeChannels.channels.forEach(ch -> ch.addCloseListener(ActionListener.wrap(() -> onClose.accept(ch)))); - - if (nodeChannels.allChannelsOpen() == false) { - throw new ConnectTransportException(node, "a channel closed while connecting"); - } - success = true; - return nodeChannels; - } catch (ConnectTransportException e) { - throw e; - } catch (Exception e) { - // ConnectTransportExceptions are handled specifically on the caller end - we wrap the actual exception to ensure - // only relevant exceptions are logged on the caller end.. this is the same as in connectToNode - throw new ConnectTransportException(node, "general node connection failure", e); - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(nodeChannels); - } - } - } finally { - closeLock.readLock().unlock(); - } - } - - private void disconnectFromNodeCloseAndNotify(DiscoveryNode node, NodeChannels nodeChannels) { - assert nodeChannels != null : "nodeChannels must not be null"; - try { - IOUtils.closeWhileHandlingException(nodeChannels); - } finally { - if (closeLock.readLock().tryLock()) { - try { - if (connectedNodes.remove(node, nodeChannels)) { - transportService.onNodeDisconnected(node); - } - } finally { - closeLock.readLock().unlock(); - } - } - } - } - - @Override - public NodeChannels getConnection(DiscoveryNode node) { - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels == null) { - throw new NodeNotConnectedException(node, "Node not connected"); - } - return nodeChannels; - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - closeLock.readLock().lock(); - NodeChannels nodeChannels = null; - try (Releasable ignored = connectionLock.acquire(node.getId())) { - nodeChannels = connectedNodes.remove(node); - } finally { - closeLock.readLock().unlock(); - if (nodeChannels != null) { // if we found it and removed it we close and notify - IOUtils.closeWhileHandlingException(nodeChannels, () -> transportService.onNodeDisconnected(node)); - } - } - } - - protected Version getCurrentVersion() { - // this is just for tests to mock stuff like the nodes version - tests can override this internally - return Version.CURRENT; - } - - @Override - public BoundTransportAddress boundAddress() { - return this.boundAddress; - } - - @Override - public Map profileBoundAddresses() { - return unmodifiableMap(new HashMap<>(profileBoundAddresses)); - } - - @Override - public List getLocalAddresses() { - List local = new ArrayList<>(); - local.add("127.0.0.1"); - // check if v6 is supported, if so, v4 will also work via mapped addresses. - if (NetworkUtils.SUPPORTS_V6) { - local.add("[::1]"); // may get ports appended! - } - return local; - } - - protected void bindServer(ProfileSettings profileSettings) { - // Bind and start to accept incoming connections. - InetAddress hostAddresses[]; - List profileBindHosts = profileSettings.bindHosts; - try { - hostAddresses = networkService.resolveBindHostAddresses(profileBindHosts.toArray(Strings.EMPTY_ARRAY)); - } catch (IOException e) { - throw new BindTransportException("Failed to resolve host " + profileBindHosts, e); - } - if (logger.isDebugEnabled()) { - String[] addresses = new String[hostAddresses.length]; - for (int i = 0; i < hostAddresses.length; i++) { - addresses[i] = NetworkAddress.format(hostAddresses[i]); - } - logger.debug("binding server bootstrap to: {}", (Object) addresses); - } - - assert hostAddresses.length > 0; - - List boundAddresses = new ArrayList<>(); - for (InetAddress hostAddress : hostAddresses) { - boundAddresses.add(bindToPort(profileSettings.profileName, hostAddress, profileSettings.portOrRange)); - } - - final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(profileSettings, boundAddresses); - - if (profileSettings.isDefaultProfile) { - this.boundAddress = boundTransportAddress; - } else { - profileBoundAddresses.put(profileSettings.profileName, boundTransportAddress); - } - } - - protected InetSocketAddress bindToPort(final String name, final InetAddress hostAddress, String port) { - PortsRange portsRange = new PortsRange(port); - final AtomicReference lastException = new AtomicReference<>(); - final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = portsRange.iterate(portNumber -> { - try { - TcpChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); - synchronized (serverChannels) { - List list = serverChannels.get(name); - if (list == null) { - list = new ArrayList<>(); - serverChannels.put(name, list); - } - list.add(channel); - boundSocket.set(channel.getLocalAddress()); - } - } catch (Exception e) { - lastException.set(e); - return false; - } - return true; - }); - if (!success) { - throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); - } - - if (logger.isDebugEnabled()) { - logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); - } - - return boundSocket.get(); - } - - private BoundTransportAddress createBoundTransportAddress(ProfileSettings profileSettings, - List boundAddresses) { - String[] boundAddressesHostStrings = new String[boundAddresses.size()]; - TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; - for (int i = 0; i < boundAddresses.size(); i++) { - InetSocketAddress boundAddress = boundAddresses.get(i); - boundAddressesHostStrings[i] = boundAddress.getHostString(); - transportBoundAddresses[i] = new TransportAddress(boundAddress); - } - - List publishHosts = profileSettings.publishHosts; - if (profileSettings.isDefaultProfile == false && publishHosts.isEmpty()) { - publishHosts = Arrays.asList(boundAddressesHostStrings); - } - if (publishHosts.isEmpty()) { - publishHosts = NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings); - } - - final InetAddress publishInetAddress; - try { - publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts.toArray(Strings.EMPTY_ARRAY)); - } catch (Exception e) { - throw new BindTransportException("Failed to resolve publish address", e); - } - - final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); - return new BoundTransportAddress(transportBoundAddresses, publishAddress); - } - - // package private for tests - public static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, - InetAddress publishInetAddress) { - int publishPort = profileSettings.publishPort; - - // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress - if (publishPort < 0) { - for (InetSocketAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final IntSet ports = new IntHashSet(); - for (InetSocketAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next().value; - } - } - - if (publishPort < 0) { - String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; - throw new BindTransportException("Failed to auto-resolve publish port" + profileExplanation + ", multiple bound addresses " + - boundAddresses + " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " + - "Please specify a unique port by setting " + PORT.getKey() + " or " + - PUBLISH_PORT.getKey()); - } - return publishPort; - } - - @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - return parse(address, settings.get("transport.profiles.default.port", PORT.get(settings)), perAddressLimit); - } - - // this code is a take on guava's HostAndPort, like a HostAndPortRange - - // pattern for validating ipv6 bracket addresses. - // not perfect, but PortsRange should take care of any port range validation, not a regex - private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$"); - - /** parse a hostname+port range spec into its equivalent addresses */ - static TransportAddress[] parse(String hostPortString, String defaultPortRange, int perAddressLimit) throws UnknownHostException { - Objects.requireNonNull(hostPortString); - String host; - String portString = null; - - if (hostPortString.startsWith("[")) { - // Parse a bracketed host, typically an IPv6 literal. - Matcher matcher = BRACKET_PATTERN.matcher(hostPortString); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid bracketed host/port range: " + hostPortString); - } - host = matcher.group(1); - portString = matcher.group(2); // could be null - } else { - int colonPos = hostPortString.indexOf(':'); - if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) { - // Exactly 1 colon. Split into host:port. - host = hostPortString.substring(0, colonPos); - portString = hostPortString.substring(colonPos + 1); - } else { - // 0 or 2+ colons. Bare hostname or IPv6 literal. - host = hostPortString; - // 2+ colons and not bracketed: exception - if (colonPos >= 0) { - throw new IllegalArgumentException("IPv6 addresses must be bracketed: " + hostPortString); - } - } - } - - // if port isn't specified, fill with the default - if (portString == null || portString.isEmpty()) { - portString = defaultPortRange; - } - - // generate address for each port in the range - Set addresses = new HashSet<>(Arrays.asList(InetAddress.getAllByName(host))); - List transportAddresses = new ArrayList<>(); - int[] ports = new PortsRange(portString).ports(); - int limit = Math.min(ports.length, perAddressLimit); - for (int i = 0; i < limit; i++) { - for (InetAddress address : addresses) { - transportAddresses.add(new TransportAddress(address, ports[i])); - } - } - return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]); - } - - @Override - protected final void doClose() { - } - - @Override - protected final void doStop() { - final CountDownLatch latch = new CountDownLatch(1); - // make sure we run it on another thread than a possible IO handler thread - threadPool.generic().execute(() -> { - closeLock.writeLock().lock(); - try { - // first stop to accept any incoming connections so nobody can connect to this transport - for (Map.Entry> entry : serverChannels.entrySet()) { - String profile = entry.getKey(); - List channels = entry.getValue(); - ActionListener closeFailLogger = ActionListener.wrap(c -> {}, - e -> logger.warn(() -> new ParameterizedMessage("Error closing serverChannel for profile [{}]", profile), e)); - channels.forEach(c -> c.addCloseListener(closeFailLogger)); - TcpChannel.closeChannels(channels, true); - } - serverChannels.clear(); - - // close all of the incoming channels. The closeChannels method takes a list so we must convert the set. - TcpChannel.closeChannels(new ArrayList<>(acceptedChannels), true); - acceptedChannels.clear(); - - - // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close - // all instances and then clear them maps - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); - try { - IOUtils.closeWhileHandlingException(next.getValue()); - transportService.onNodeDisconnected(next.getKey()); - } finally { - iterator.remove(); - } - } - stopInternal(); - } finally { - closeLock.writeLock().unlock(); - latch.countDown(); - } - }); - - try { - latch.await(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore - } - } - - protected void onException(TcpChannel channel, Exception e) { - if (!lifecycle.started()) { - // just close and ignore - we are already stopped and just need to make sure we release all resources - TcpChannel.closeChannel(channel, false); - return; - } - - if (isCloseConnectionException(e)) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "close connection exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); - // close the channel, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); - } else if (isConnectException(e)) { - logger.trace((Supplier) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); - // close the channel as safe measure, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); - } else if (e instanceof BindException) { - logger.trace((Supplier) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); - // close the channel as safe measure, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); - } else if (e instanceof CancelledKeyException) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); - // close the channel as safe measure, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); - } else if (e instanceof TcpTransport.HttpOnTransportException) { - // in case we are able to return data, serialize the exception content and sent it back to the client - if (channel.isOpen()) { - BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); - final SendMetricListener closeChannel = new SendMetricListener(message.length()) { - @Override - protected void innerInnerOnResponse(Void v) { - TcpChannel.closeChannel(channel, false); - } - - @Override - protected void innerOnFailure(Exception e) { - logger.debug("failed to send message to httpOnTransport channel", e); - TcpChannel.closeChannel(channel, false); - } - }; - internalSendMessage(channel, message, closeChannel); - } - } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); - // close the channel, which will cause a node to be disconnected if relevant - TcpChannel.closeChannel(channel, false); - } - } - - protected void serverAcceptedChannel(TcpChannel channel) { - boolean addedOnThisCall = acceptedChannels.add(channel); - assert addedOnThisCall : "Channel should only be added to accept channel set once"; - channel.addCloseListener(ActionListener.wrap(() -> acceptedChannels.remove(channel))); - logger.trace(() -> new ParameterizedMessage("Tcp transport channel accepted: {}", channel)); - } - - /** - * Binds to the given {@link InetSocketAddress} - * - * @param name the profile name - * @param address the address to bind to - */ - protected abstract TcpChannel bind(String name, InetSocketAddress address) throws IOException; - - /** - * Initiate a single tcp socket channel to a node. Implementations do not have to observe the connectTimeout. - * It is provided for synchronous connection implementations. - * - * @param node the node - * @param connectTimeout the connection timeout - * @param connectListener listener to be called when connection complete - * @return the pending connection - * @throws IOException if an I/O exception occurs while opening the channel - */ - protected abstract TcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) - throws IOException; - - /** - * Called to tear down internal resources - */ - protected void stopInternal() { - } - - public boolean canCompress(TransportRequest request) { - return compress && (!(request instanceof BytesTransportRequest)); - } - - private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, - final TransportRequest request, TransportRequestOptions options, Version channelVersion, - byte status) throws IOException, - TransportException { - if (compress) { - options = TransportRequestOptions.builder(options).withCompress(true).build(); - } - - // only compress if asked and the request is not bytes. Otherwise only - // the header part is compressed, and the "body" can't be extracted as compressed - final boolean compressMessage = options.compress() && canCompress(request); - - status = TransportStatus.setRequest(status); - ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - final CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, compressMessage); - boolean addedReleaseListener = false; - try { - if (compressMessage) { - status = TransportStatus.setCompress(status); - } - - // we pick the smallest of the 2, to support both backward and forward compatibility - // note, this is the only place we need to do this, since from here on, we use the serialized version - // as the version to use also when the node receiving this request will send the response with - Version version = Version.min(getCurrentVersion(), channelVersion); - - stream.setVersion(version); - threadPool.getThreadContext().writeTo(stream); - stream.writeString(action); - BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream); - final TransportRequestOptions finalOptions = options; - // this might be called in a different thread - SendListener onRequestSent = new SendListener(channel, stream, - () -> transportService.onRequestSent(node, requestId, action, request, finalOptions), message.length()); - internalSendMessage(channel, message, onRequestSent); - addedReleaseListener = true; - } finally { - if (!addedReleaseListener) { - IOUtils.close(stream); - } - } - } - - /** - * sends a message to the given channel, using the given callbacks. - */ - private void internalSendMessage(TcpChannel channel, BytesReference message, SendMetricListener listener) { - try { - channel.sendMessage(message, listener); - } catch (Exception ex) { - // call listener to ensure that any resources are released - listener.onFailure(ex); - onException(channel, ex); - } - } - - /** - * Sends back an error response to the caller via the given channel - * - * @param nodeVersion the caller node version - * @param channel the channel to send the response to - * @param error the error to return - * @param requestId the request ID this response replies to - * @param action the action this response replies to - */ - public void sendErrorResponse(Version nodeVersion, TcpChannel channel, final Exception error, final long requestId, - final String action) throws IOException { - try (BytesStreamOutput stream = new BytesStreamOutput()) { - stream.setVersion(nodeVersion); - RemoteTransportException tx = new RemoteTransportException( - nodeName(), new TransportAddress(channel.getLocalAddress()), action, error); - threadPool.getThreadContext().writeTo(stream); - stream.writeException(tx); - byte status = 0; - status = TransportStatus.setResponse(status); - status = TransportStatus.setError(status); - final BytesReference bytes = stream.bytes(); - final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); - CompositeBytesReference message = new CompositeBytesReference(header, bytes); - SendListener onResponseSent = new SendListener(channel, null, - () -> transportService.onResponseSent(requestId, action, error), message.length()); - internalSendMessage(channel, message, onResponseSent); - } - } - - /** - * Sends the response to the given channel. This method should be used to send {@link TransportResponse} objects back to the caller. - * - * @see #sendErrorResponse(Version, TcpChannel, Exception, long, String) for sending back errors to the caller - */ - public void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId, - final String action, TransportResponseOptions options) throws IOException { - sendResponse(nodeVersion, channel, response, requestId, action, options, (byte) 0); - } - - private void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId, - final String action, TransportResponseOptions options, byte status) throws IOException { - if (compress) { - options = TransportResponseOptions.builder(options).withCompress(true).build(); - } - status = TransportStatus.setResponse(status); // TODO share some code with sendRequest - ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bStream, options.compress()); - boolean addedReleaseListener = false; - try { - if (options.compress()) { - status = TransportStatus.setCompress(status); - } - threadPool.getThreadContext().writeTo(stream); - stream.setVersion(nodeVersion); - BytesReference message = buildMessage(requestId, status, nodeVersion, response, stream); - - final TransportResponseOptions finalOptions = options; - // this might be called in a different thread - SendListener listener = new SendListener(channel, stream, - () -> transportService.onResponseSent(requestId, action, response, finalOptions), message.length()); - internalSendMessage(channel, message, listener); - addedReleaseListener = true; - } finally { - if (!addedReleaseListener) { - IOUtils.close(stream); - } - } - } - - /** - * Writes the Tcp message header into a bytes reference. - * - * @param requestId the request ID - * @param status the request status - * @param protocolVersion the protocol version used to serialize the data in the message - * @param length the payload length in bytes - * @see TcpHeader - */ - final BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { - try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) { - headerOutput.setVersion(protocolVersion); - TcpHeader.writeHeader(headerOutput, requestId, status, protocolVersion, length); - final BytesReference bytes = headerOutput.bytes(); - assert bytes.length() == TcpHeader.HEADER_SIZE : "header size mismatch expected: " + TcpHeader.HEADER_SIZE + " but was: " - + bytes.length(); - return bytes; - } - } - - /** - * Serializes the given message into a bytes representation - */ - private BytesReference buildMessage(long requestId, byte status, Version nodeVersion, TransportMessage message, - CompressibleBytesOutputStream stream) throws IOException { - final BytesReference zeroCopyBuffer; - if (message instanceof BytesTransportRequest) { // what a shitty optimization - we should use a direct send method instead - BytesTransportRequest bRequest = (BytesTransportRequest) message; - assert nodeVersion.equals(bRequest.version()); - bRequest.writeThin(stream); - zeroCopyBuffer = bRequest.bytes(); - } else { - message.writeTo(stream); - zeroCopyBuffer = BytesArray.EMPTY; - } - // we have to call materializeBytes() here before accessing the bytes. A CompressibleBytesOutputStream - // might be implementing compression. And materializeBytes() ensures that some marker bytes (EOS marker) - // are written. Otherwise we barf on the decompressing end when we read past EOF on purpose in the - // #validateRequest method. this might be a problem in deflate after all but it's important to write - // the marker bytes. - final BytesReference messageBody = stream.materializeBytes(); - final BytesReference header = buildHeader(requestId, status, stream.getVersion(), messageBody.length() + zeroCopyBuffer.length()); - return new CompositeBytesReference(header, messageBody, zeroCopyBuffer); - } - - /** - * Validates the first N bytes of the message header and returns false if the message is - * a ping message and has no payload ie. isn't a real user level message. - * - * @throws IllegalStateException if the message is too short, less than the header or less that the header plus the message size - * @throws HttpOnTransportException if the message has no valid header and appears to be a HTTP message - * @throws IllegalArgumentException if the message is greater that the maximum allowed frame size. This is dependent on the available - * memory. - */ - public static boolean validateMessageHeader(BytesReference buffer) throws IOException { - final int sizeHeaderLength = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; - if (buffer.length() < sizeHeaderLength) { - throw new IllegalStateException("message size must be >= to the header size"); - } - int offset = 0; - if (buffer.get(offset) != 'E' || buffer.get(offset + 1) != 'S') { - // special handling for what is probably HTTP - if (bufferStartsWith(buffer, offset, "GET ") || - bufferStartsWith(buffer, offset, "POST ") || - bufferStartsWith(buffer, offset, "PUT ") || - bufferStartsWith(buffer, offset, "HEAD ") || - bufferStartsWith(buffer, offset, "DELETE ") || - bufferStartsWith(buffer, offset, "OPTIONS ") || - bufferStartsWith(buffer, offset, "PATCH ") || - bufferStartsWith(buffer, offset, "TRACE ")) { - - throw new HttpOnTransportException("This is not a HTTP port"); - } - - // we have 6 readable bytes, show 4 (should be enough) - throw new StreamCorruptedException("invalid internal transport message format, got (" - + Integer.toHexString(buffer.get(offset) & 0xFF) + "," - + Integer.toHexString(buffer.get(offset + 1) & 0xFF) + "," - + Integer.toHexString(buffer.get(offset + 2) & 0xFF) + "," - + Integer.toHexString(buffer.get(offset + 3) & 0xFF) + ")"); - } - - final int dataLen; - try (StreamInput input = buffer.streamInput()) { - input.skip(TcpHeader.MARKER_BYTES_SIZE); - dataLen = input.readInt(); - if (dataLen == PING_DATA_SIZE) { - // discard the messages we read and continue, this is achieved by skipping the bytes - // and returning null - return false; - } - } - - if (dataLen <= 0) { - throw new StreamCorruptedException("invalid data length: " + dataLen); - } - // safety against too large frames being sent - if (dataLen > NINETY_PER_HEAP_SIZE) { - throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" - + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); - } - - if (buffer.length() < dataLen + sizeHeaderLength) { - throw new IllegalStateException("buffer must be >= to the message size but wasn't"); - } - return true; - } - - private static boolean bufferStartsWith(BytesReference buffer, int offset, String method) { - char[] chars = method.toCharArray(); - for (int i = 0; i < chars.length; i++) { - if (buffer.get(offset + i) != chars[i]) { - return false; - } - } - - return true; - } - - /** - * A helper exception to mark an incoming connection as potentially being HTTP - * so an appropriate error code can be returned - */ - public static class HttpOnTransportException extends ElasticsearchException { - - public HttpOnTransportException(String msg) { - super(msg); - } - - @Override - public RestStatus status() { - return RestStatus.BAD_REQUEST; - } - - public HttpOnTransportException(StreamInput in) throws IOException { - super(in); - } - } - - /** - * This method handles the message receive part for both request and responses - */ - public final void messageReceived(BytesReference reference, TcpChannel channel, String profileName, - InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException { - final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; - readBytesMetric.inc(totalMessageSize); - // we have additional bytes to read, outside of the header - boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; - StreamInput streamIn = reference.streamInput(); - boolean success = false; - try (ThreadContext.StoredContext tCtx = threadPool.getThreadContext().stashContext()) { - long requestId = streamIn.readLong(); - byte status = streamIn.readByte(); - Version version = Version.fromId(streamIn.readInt()); - if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamIn.available() > 0) { - Compressor compressor; - try { - final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE; - compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)); - } catch (NotCompressedException ex) { - int maxToRead = Math.min(reference.length(), 10); - StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) - .append("] content bytes out of [").append(reference.length()) - .append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are ["); - for (int i = 0; i < maxToRead; i++) { - sb.append(reference.get(i)).append(","); - } - sb.append("]"); - throw new IllegalStateException(sb.toString()); - } - streamIn = compressor.streamInput(streamIn); - } - final boolean isHandshake = TransportStatus.isHandshake(status); - ensureVersionCompatibility(version, getCurrentVersion(), isHandshake); - streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry); - streamIn.setVersion(version); - threadPool.getThreadContext().readHeaders(streamIn); - if (TransportStatus.isRequest(status)) { - handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status); - } else { - final TransportResponseHandler handler; - if (isHandshake) { - handler = pendingHandshakes.remove(requestId); - } else { - TransportResponseHandler theHandler = transportService.onResponseReceived(requestId); - if (theHandler == null && TransportStatus.isError(status)) { - handler = pendingHandshakes.remove(requestId); - } else { - handler = theHandler; - } - } - // ignore if its null, the service logs it - if (handler != null) { - if (TransportStatus.isError(status)) { - handlerResponseError(streamIn, handler); - } else { - handleResponse(remoteAddress, streamIn, handler); - } - // Check the entire message has been read - final int nextByte = streamIn.read(); - // calling read() is useful to make sure the message is fully read, even if there is an EOS marker - if (nextByte != -1) { - throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" - + handler + "], error [" + TransportStatus.isError(status) + "]; resetting"); - } - } - } - success = true; - } finally { - if (success) { - IOUtils.close(streamIn); - } else { - IOUtils.closeWhileHandlingException(streamIn); - } - } - } - - static void ensureVersionCompatibility(Version version, Version currentVersion, boolean isHandshake) { - // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version - // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the - // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility - // once the connection is established - final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; - if (version.isCompatible(compatibilityVersion) == false) { - final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); - String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; - throw new IllegalStateException(msg + version + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); - } - } - - private void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream, final TransportResponseHandler handler) { - final TransportResponse response; - try { - response = handler.read(stream); - response.remoteAddress(new TransportAddress(remoteAddress)); - } catch (Exception e) { - handleException(handler, new TransportSerializationException( - "Failed to deserialize response from handler [" + handler.getClass().getName() + "]", e)); - return; - } - threadPool.executor(handler.executor()).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - - @Override - protected void doRun() throws Exception { - handler.handleResponse(response); - } - }); - - } - - /** - * Executed for a received response error - */ - private void handlerResponseError(StreamInput stream, final TransportResponseHandler handler) { - Exception error; - try { - error = stream.readException(); - } catch (Exception e) { - error = new TransportSerializationException("Failed to deserialize exception response from stream", e); - } - handleException(handler, error); - } - - private void handleException(final TransportResponseHandler handler, Throwable error) { - if (!(error instanceof RemoteTransportException)) { - error = new RemoteTransportException(error.getMessage(), error); - } - final RemoteTransportException rtx = (RemoteTransportException) error; - threadPool.executor(handler.executor()).execute(() -> { - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); - } - }); - } - - protected String handleRequest(TcpChannel channel, String profileName, final StreamInput stream, long requestId, - int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) - throws IOException { - final String action = stream.readString(); - transportService.onRequestReceived(requestId, action); - TransportChannel transportChannel = null; - try { - if (TransportStatus.isHandshake(status)) { - final VersionHandshakeResponse response = new VersionHandshakeResponse(getCurrentVersion()); - sendResponse(version, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, - TransportStatus.setHandshake((byte) 0)); - } else { - final RequestHandlerRegistry reg = transportService.getRequestHandler(action); - if (reg == null) { - throw new ActionNotFoundTransportException(action); - } - if (reg.canTripCircuitBreaker()) { - getInFlightRequestBreaker().addEstimateBytesAndMaybeBreak(messageLengthBytes, ""); - } else { - getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes); - } - transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName, - messageLengthBytes); - final TransportRequest request = reg.newRequest(stream); - request.remoteAddress(new TransportAddress(remoteAddress)); - // in case we throw an exception, i.e. when the limit is hit, we don't want to verify - validateRequest(stream, requestId, action); - threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); - } - } catch (Exception e) { - // the circuit breaker tripped - if (transportChannel == null) { - transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName, 0); - } - try { - transportChannel.sendResponse(e); - } catch (IOException inner) { - inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", action), inner); - } - } - return action; - } - - // This template method is needed to inject custom error checking logic in tests. - protected void validateRequest(StreamInput stream, long requestId, String action) throws IOException { - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker - if (nextByte != -1) { - throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action - + "], available [" + stream.available() + "]; resetting"); - } - } - - class RequestHandler extends AbstractRunnable { - private final RequestHandlerRegistry reg; - private final TransportRequest request; - private final TransportChannel transportChannel; - - RequestHandler(RequestHandlerRegistry reg, TransportRequest request, TransportChannel transportChannel) { - this.reg = reg; - this.request = request; - this.transportChannel = transportChannel; - } - - @SuppressWarnings({"unchecked"}) - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, transportChannel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - if (lifecycleState() == Lifecycle.State.STARTED) { - // we can only send a response transport is started.... - try { - transportChannel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", reg.getAction()), inner); - } - } - } - } - - private static final class VersionHandshakeResponse extends TransportResponse { - private Version version; - - private VersionHandshakeResponse(Version version) { - this.version = version; - } - - private VersionHandshakeResponse() { - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - version = Version.readVersion(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - assert version != null; - Version.writeVersion(version, out); - } - } - - protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) - throws IOException, InterruptedException { - numHandshakes.inc(); - final long requestId = newRequestId(); - final HandshakeResponseHandler handler = new HandshakeResponseHandler(channel); - AtomicReference versionRef = handler.versionRef; - AtomicReference exceptionRef = handler.exceptionRef; - pendingHandshakes.put(requestId, handler); - boolean success = false; - try { - if (channel.isOpen() == false) { - // we have to protect us here since sendRequestToChannel won't barf if the channel is closed. - // it's weird but to change it will cause a lot of impact on the exception handling code all over the codebase. - // yet, if we don't check the state here we might have registered a pending handshake handler but the close - // listener calling #onChannelClosed might have already run and we are waiting on the latch below unitl we time out. - throw new IllegalStateException("handshake failed, channel already closed"); - } - // for the request we use the minCompatVersion since we don't know what's the version of the node we talk to - // we also have no payload on the request but the response will contain the actual version of the node we talk - // to as the payload. - final Version minCompatVersion = getCurrentVersion().minimumCompatibilityVersion(); - sendRequestToChannel(node, channel, requestId, HANDSHAKE_ACTION_NAME, TransportRequest.Empty.INSTANCE, - TransportRequestOptions.EMPTY, minCompatVersion, TransportStatus.setHandshake((byte) 0)); - if (handler.latch.await(timeout.millis(), TimeUnit.MILLISECONDS) == false) { - throw new ConnectTransportException(node, "handshake_timeout[" + timeout + "]"); - } - success = true; - if (exceptionRef.get() != null) { - throw new IllegalStateException("handshake failed", exceptionRef.get()); - } else { - Version version = versionRef.get(); - if (getCurrentVersion().isCompatible(version) == false) { - throw new IllegalStateException("Received message from unsupported version: [" + version - + "] minimal compatible version is: [" + getCurrentVersion().minimumCompatibilityVersion() + "]"); - } - return version; - } - } finally { - final TransportResponseHandler removedHandler = pendingHandshakes.remove(requestId); - // in the case of a timeout or an exception on the send part the handshake has not been removed yet. - // but the timeout is tricky since it's basically a race condition so we only assert on the success case. - assert success && removedHandler == null || success == false : "handler for requestId [" + requestId + "] is not been removed"; - } - } - - final int getNumPendingHandshakes() { // for testing - return pendingHandshakes.size(); - } - - final long getNumHandshakes() { - return numHandshakes.count(); // for testing - } - - @Override - public long newRequestId() { - return requestIdGenerator.incrementAndGet(); - } - - /** - * Called once the channel is closed for instance due to a disconnect or a closed socket etc. - */ - private void cancelHandshakeForChannel(TcpChannel channel) { - final Optional first = pendingHandshakes.entrySet().stream() - .filter((entry) -> entry.getValue().channel == channel).map(Map.Entry::getKey).findFirst(); - if (first.isPresent()) { - final Long requestId = first.get(); - final HandshakeResponseHandler handler = pendingHandshakes.remove(requestId); - if (handler != null) { - // there might be a race removing this or this method might be called twice concurrently depending on how - // the channel is closed ie. due to connection reset or broken pipes - handler.handleException(new TransportException("connection reset")); - } - } - } - - /** - * Ensures this transport is still started / open - * - * @throws IllegalStateException if the transport is not started / open - */ - protected final void ensureOpen() { - if (lifecycle.started() == false) { - throw new IllegalStateException("transport has been stopped"); - } - } - - /** - * This listener increments the transmitted bytes metric on success. - */ - private abstract class SendMetricListener extends NotifyOnceListener { - private final long messageSize; - - private SendMetricListener(long messageSize) { - this.messageSize = messageSize; - } - - @Override - protected final void innerOnResponse(Void object) { - transmittedBytesMetric.inc(messageSize); - innerInnerOnResponse(object); - } - - protected abstract void innerInnerOnResponse(Void object); - } - - private final class SendListener extends SendMetricListener { - private final TcpChannel channel; - private final Closeable optionalCloseable; - private final Runnable transportAdaptorCallback; - - private SendListener(TcpChannel channel, Closeable optionalCloseable, Runnable transportAdaptorCallback, long messageLength) { - super(messageLength); - this.channel = channel; - this.optionalCloseable = optionalCloseable; - this.transportAdaptorCallback = transportAdaptorCallback; - } - - @Override - protected void innerInnerOnResponse(Void v) { - closeAndCallback(null); - } - - @Override - protected void innerOnFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e); - closeAndCallback(e); - } - - private void closeAndCallback(final Exception e) { - try { - IOUtils.close(optionalCloseable, transportAdaptorCallback::run); - } catch (final IOException inner) { - if (e != null) { - inner.addSuppressed(e); - } - throw new UncheckedIOException(inner); - } - } - } - - @Override - public final TransportStats getStats() { - return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytesMetric.count(), - transmittedBytesMetric.sum()); - } - - /** - * Returns all profile settings for the given settings object - */ - public static Set getProfileSettings(Settings settings) { - HashSet profiles = new HashSet<>(); - boolean isDefaultSet = false; - for (String profile : settings.getGroups("transport.profiles.", true).keySet()) { - profiles.add(new ProfileSettings(settings, profile)); - if (DEFAULT_PROFILE.equals(profile)) { - isDefaultSet = true; - } - } - if (isDefaultSet == false) { - profiles.add(new ProfileSettings(settings, DEFAULT_PROFILE)); - } - return Collections.unmodifiableSet(profiles); - } - - /** - * Representation of a transport profile settings for a transport.profiles.$profilename.* - */ - public static final class ProfileSettings { - public final String profileName; - public final boolean tcpNoDelay; - public final boolean tcpKeepAlive; - public final boolean reuseAddress; - public final ByteSizeValue sendBufferSize; - public final ByteSizeValue receiveBufferSize; - public final List bindHosts; - public final List publishHosts; - public final String portOrRange; - public final int publishPort; - public final boolean isDefaultProfile; - - public ProfileSettings(Settings settings, String profileName) { - this.profileName = profileName; - isDefaultProfile = DEFAULT_PROFILE.equals(profileName); - tcpKeepAlive = TCP_KEEP_ALIVE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - tcpNoDelay = TCP_NO_DELAY_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - reuseAddress = TCP_REUSE_ADDRESS_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - sendBufferSize = TCP_SEND_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - receiveBufferSize = TCP_RECEIVE_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - List profileBindHosts = BIND_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - bindHosts = (profileBindHosts.isEmpty() ? NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(settings) - : profileBindHosts); - publishHosts = PUBLISH_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - Setting concretePort = PORT_PROFILE.getConcreteSettingForNamespace(profileName); - if (concretePort.exists(settings) == false && isDefaultProfile == false) { - throw new IllegalStateException("profile [" + profileName + "] has no port configured"); - } - portOrRange = PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - publishPort = isDefaultProfile ? PUBLISH_PORT.get(settings) : - PUBLISH_PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - } - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransportChannel.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransportChannel.java deleted file mode 100644 index b0e4695..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TcpTransportChannel.java +++ /dev/null @@ -1,90 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.transport.TcpChannel; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseOptions; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; - -public final class TcpTransportChannel implements TransportChannel { - private final TcpTransport transport; - private final Version version; - private final String action; - private final long requestId; - private final String profileName; - private final long reservedBytes; - private final AtomicBoolean released = new AtomicBoolean(); - private final String channelType; - private final TcpChannel channel; - - TcpTransportChannel(TcpTransport transport, TcpChannel channel, String channelType, String action, - long requestId, Version version, String profileName, long reservedBytes) { - this.version = version; - this.channel = channel; - this.transport = transport; - this.action = action; - this.requestId = requestId; - this.profileName = profileName; - this.reservedBytes = reservedBytes; - this.channelType = channelType; - } - - @Override - public String getProfileName() { - return profileName; - } - - @Override - public void sendResponse(TransportResponse response) throws IOException { - sendResponse(response, TransportResponseOptions.EMPTY); - } - - @Override - public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { - try { - transport.sendResponse(version, channel, response, requestId, action, options); - } finally { - release(false); - } - } - - @Override - public void sendResponse(Exception exception) throws IOException { - try { - transport.sendErrorResponse(version, channel, exception, requestId, action); - } finally { - release(true); - } - } - - private Exception releaseBy; - - private void release(boolean isExceptionResponse) { - if (released.compareAndSet(false, true)) { - assert (releaseBy = new Exception()) != null; // easier to debug if it's already closed - transport.getInFlightRequestBreaker().addWithoutBreaking(-reservedBytes); - } else if (isExceptionResponse == false) { - // only fail if we are not sending an error - we might send the error triggered by the previous - // sendResponse call - throw new IllegalStateException("reserved bytes are already released", releaseBy); - } - } - - @Override - public String getChannelType() { - return channelType; - } - - @Override - public Version getVersion() { - return version; - } - - public TcpChannel getChannel() { - return channel; - } -} - diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Transport.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/Transport.java deleted file mode 100644 index 95ca1e2..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/Transport.java +++ /dev/null @@ -1,116 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.CheckedBiConsumer; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.breaker.NoopCircuitBreaker; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportStats; - -import java.io.Closeable; -import java.io.IOException; -import java.net.UnknownHostException; -import java.util.List; -import java.util.Map; - -public interface Transport extends LifecycleComponent { - - Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, Property.NodeScope); - - void setTransportService(TransportService service); - - /** - * The address the transport is bound on. - */ - BoundTransportAddress boundAddress(); - - /** - * Further profile bound addresses - * @return null iff profiles are unsupported, otherwise a map with name of profile and its bound transport address - */ - Map profileBoundAddresses(); - - /** - * Returns an address from its string representation. - */ - TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException; - - /** - * Returns true if the node is connected. - */ - boolean nodeConnected(DiscoveryNode node); - - /** - * Connects to a node with the given connection profile. If the node is already connected this method has no effect. - * Once a successful is established, it can be validated before being exposed. - */ - void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) throws ConnectTransportException; - - /** - * Disconnected from the given node, if not connected, will do nothing. - */ - void disconnectFromNode(DiscoveryNode node); - - List getLocalAddresses(); - - default CircuitBreaker getInFlightRequestBreaker() { - return new NoopCircuitBreaker("in-flight-noop"); - } - - /** - * Returns a new request ID to use when sending a message via {@link Connection#sendRequest(long, String, - * TransportRequest, TransportRequestOptions)} - */ - long newRequestId(); - - Connection getConnection(DiscoveryNode node); - - /** - * Opens a new connection to the given node and returns it. In contrast to - * {@link #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer)} the returned connection is not managed by - * the transport implementation. This connection must be closed once it's not needed anymore. - * This connection type can be used to execute a handshake between two nodes before the node will be published via - * {@link #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer)}. - */ - Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException; - - TransportStats getStats(); - - /** - * A unidirectional connection to a {@link DiscoveryNode} - */ - interface Connection extends Closeable { - /** - * The node this connection is associated with - */ - DiscoveryNode getNode(); - - void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws - IOException, TransportException; - - /** - * Returns the version of the node this connection was established with. - */ - default Version getVersion() { - return getNode().getVersion(); - } - - /** - * Returns a key that this connection can be cached on. Delegating subclasses must delegate method call to - * the original connection. - */ - default Object getCacheKey() { - return this; - } - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportBulkClient.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportBulkClient.java deleted file mode 100644 index a516429..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportBulkClient.java +++ /dev/null @@ -1,129 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.xbib.elasticsearch.client.AbstractClient; -import org.xbib.elasticsearch.client.BulkControl; -import org.xbib.elasticsearch.client.BulkMetric; -import org.xbib.elasticsearch.client.NetworkUtils; - -import java.io.IOException; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -/** - * Transport client with additional methods for bulk processing. - */ -public class TransportBulkClient extends AbstractClient { - - private static final Logger logger = LogManager.getLogger(TransportBulkClient.class.getName()); - - public TransportBulkClient init(ElasticsearchClient client, Settings settings, BulkMetric metric, BulkControl control) { - super.init(client, settings, metric, control); - // auto-connect here - try { - Collection addrs = findAddresses(settings); - if (!connect(addrs, settings.getAsBoolean("autodiscover", false))) { - throw new NoNodeAvailableException("no cluster nodes available, check settings " - + settings.toString()); - } - } catch (IOException e) { - logger.error(e.getMessage(), e); - } - return this; - } - - protected ElasticsearchClient createClient(Settings settings) { - if (settings != null) { - String version = System.getProperty("os.name") - + " " + System.getProperty("java.vm.name") - + " " + System.getProperty("java.vm.vendor") - + " " + System.getProperty("java.runtime.version") - + " " + System.getProperty("java.vm.version"); - logger.info("creating transport client on {} with effective settings {}", - version, settings.toString()); - return new TransportClient(Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())) - .put(EsExecutors.PROCESSORS_SETTING.getKey(), settings.get(EsExecutors.PROCESSORS_SETTING.getKey())) - .put("client.transport.ignore_cluster_name", true) - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .build(), Collections.singletonList(Netty4Plugin.class)); - } - return null; - } - - @Override - public synchronized void shutdown() throws IOException { - super.shutdown(); - logger.info("shutting down..."); - if (client() != null) { - TransportClient client = (TransportClient) client(); - client.close(); - client.threadPool().shutdown(); - } - logger.info("shutting down completed"); - } - - private Collection findAddresses(Settings settings) throws IOException { - List hostnames = settings.getAsList("host", Collections.singletonList("localhost")); - int port = settings.getAsInt("port", 9300); - Collection addresses = new ArrayList<>(); - for (String hostname : hostnames) { - String[] splitHost = hostname.split(":", 2); - if (splitHost.length == 2) { - String host = splitHost[0]; - InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); - try { - port = Integer.parseInt(splitHost[1]); - } catch (Exception e) { - logger.warn(e.getMessage(), e); - } - addresses.add(new TransportAddress(inetAddress, port)); - } - if (splitHost.length == 1) { - String host = splitHost[0]; - InetAddress inetAddress = NetworkUtils.resolveInetAddress(host, null); - addresses.add(new TransportAddress(inetAddress, port)); - } - } - return addresses; - } - - private boolean connect(Collection addresses, boolean autodiscover) { - logger.info("trying to connect to {}", addresses); - if (client() == null) { - throw new IllegalStateException("no client?"); - } - TransportClient transportClient = (TransportClient) client(); - transportClient.addTransportAddresses(addresses); - List nodes = transportClient.connectedNodes(); - logger.info("nodes = {}", nodes); - if (nodes != null && !nodes.isEmpty()) { - if (autodiscover) { - logger.info("trying to auto-discover all cluster nodes..."); - ClusterStateRequestBuilder clusterStateRequestBuilder = - new ClusterStateRequestBuilder(client(), ClusterStateAction.INSTANCE); - ClusterStateResponse clusterStateResponse = clusterStateRequestBuilder.execute().actionGet(); - DiscoveryNodes discoveryNodes = clusterStateResponse.getState().getNodes(); - transportClient.addDiscoveryNodes(discoveryNodes); - logger.info("after auto-discovery connected to {}", transportClient.connectedNodes()); - } - return true; - } - return false; - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportClient.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportClient.java deleted file mode 100644 index f7b79cd..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportClient.java +++ /dev/null @@ -1,507 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; -import static java.util.stream.Collectors.toList; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.ActionModule; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessRequest; -import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.node.InternalSettingsPreparer; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.threadpool.ExecutorBuilder; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.FutureTransportResponseHandler; -import org.elasticsearch.transport.TransportRequestOptions; - -import java.io.Closeable; -import java.io.IOException; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * Simplified transport client, without the node sampling and retrying mode like in the mainline version. - * - * Configurable connect ping interval setting added. - */ -public class TransportClient extends AbstractClient { - - private static final Logger logger = LogManager.getLogger(TransportClient.class); - - private static final String CLIENT_TYPE = "transport"; - - private final Injector injector; - - private final long pingTimeout; - - private final ClusterName clusterName; - - private final TransportService transportService; - - private final AtomicInteger tempNodeId = new AtomicInteger(); - - private final AtomicInteger nodeCounter = new AtomicInteger(); - - private final Object mutex = new Object(); - - private volatile List nodes = Collections.emptyList(); - - private volatile List listedNodes = Collections.emptyList(); - - private volatile List filteredNodes = Collections.emptyList(); - - private volatile boolean closed; - - /** - * Creates a new TransportClient with the given settings and plugins. - * @param settings settings - */ - public TransportClient(Settings settings) { - this(buildParams(settings, Settings.EMPTY, Collections.emptyList())); - } - - /** - * Creates a new TransportClient with the given settings and plugins. - * @param settings settings - * @param plugins plugins - */ - public TransportClient(Settings settings, Collection> plugins) { - this(buildParams(settings, Settings.EMPTY, plugins)); - } - - /** - * Creates a new TransportClient with the given settings, defaults and plugins. - * @param settings the client settings - * @param defaultSettings default settings that are merged after the plugins have added it's additional settings. - * @param plugins the client plugins - */ - protected TransportClient(Settings settings, Settings defaultSettings, Collection> plugins) { - this(buildParams(settings, defaultSettings, plugins)); - } - - private TransportClient(final Injector injector) { - super(getSettings(injector), getThreadPool(injector)); - this.injector = injector; - this.clusterName = new ClusterName(getSettings(injector).get("cluster.name", "elasticsearch")); - this.transportService = injector.getInstance(TransportService.class); - this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis(); - } - - private static Settings getSettings(Injector injector) { - return injector.getInstance(Settings.class); - } - - private static ThreadPool getThreadPool(Injector injector) { - return injector.getInstance(ThreadPool.class); - } - - /** - * Returns the current registered transport addresses to use. - * - * @return list of transport addresess - */ - public List transportAddresses() { - List list = new ArrayList<>(); - for (DiscoveryNode listedNode : listedNodes) { - list.add(listedNode.getAddress()); - } - return Collections.unmodifiableList(list); - } - - /** - * Returns the current connected transport nodes that this client will use. - * The nodes include all the nodes that are currently alive based on the transport - * addresses provided. - * - * @return list of nodes - */ - public List connectedNodes() { - return this.nodes; - } - - /** - * The list of filtered nodes that were not connected to, for example, due to mismatch in cluster name. - * - * @return list of nodes - */ - public List filteredNodes() { - return this.filteredNodes; - } - - /** - * Returns the listed nodes in the transport client, once added to it. - * - * @return list of nodes - */ - public List listedNodes() { - return this.listedNodes; - } - - /** - * Adds a list of transport addresses that will be used to connect to. - * The Node this transport address represents will be used if its possible to connect to it. - * If it is unavailable, it will be automatically connected to once it is up. - * In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}. - * - * @param discoveryNodes nodes - * @return this transport client - */ - public TransportClient addDiscoveryNodes(DiscoveryNodes discoveryNodes) { - Collection addresses = new ArrayList<>(); - for (DiscoveryNode discoveryNode : discoveryNodes) { - addresses.add(discoveryNode.getAddress()); - } - addTransportAddresses(addresses); - return this; - } - - /** - * Adds a list of transport addresses that will be used to connect to. - * The Node this transport address represents will be used if its possible to connect to it. - * If it is unavailable, it will be automatically connected to once it is up. - * In order to get the list of all the current connected nodes, please see {@link #connectedNodes()}. - * - * @param transportAddresses transport addresses - */ - public TransportClient addTransportAddresses(Collection transportAddresses) { - synchronized (mutex) { - if (closed) { - throw new IllegalStateException("transport client is closed, can't add addresses"); - } - Set discoveryNodeList = new HashSet<>(listedNodes); - logger.info("before adding: nodes={} listednodes={} transportAddresses={}", - nodes, listedNodes, transportAddresses); - for (TransportAddress newTransportAddress : transportAddresses) { - boolean found = false; - for (DiscoveryNode discoveryNode : discoveryNodeList) { - logger.debug("checking existing address [{}] against new [{}]", - discoveryNode.getAddress(), newTransportAddress); - if (discoveryNode.getAddress().equals(newTransportAddress)) { // sameHost - found = true; - logger.debug("address [{}] already connected, ignoring", newTransportAddress, discoveryNode); - break; - } - } - if (!found) { - DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeId.incrementAndGet(), - newTransportAddress, Version.CURRENT.minimumCompatibilityVersion()); - logger.info("adding new address [{}]", node); - discoveryNodeList.add(node); - } - } - listedNodes = Collections.unmodifiableList(new ArrayList<>(discoveryNodeList)); - connect(); - } - return this; - } - - /** - * Removes a transport address from the list of transport addresses that are used to connect to. - * - * @param transportAddress transport address to remove - * @return this transport client - */ - public TransportClient removeTransportAddress(TransportAddress transportAddress) { - synchronized (mutex) { - if (closed) { - throw new IllegalStateException("transport client is closed, can't remove an address"); - } - List builder = new ArrayList<>(); - for (DiscoveryNode otherNode : listedNodes) { - if (!otherNode.getAddress().equals(transportAddress)) { - builder.add(otherNode); - } else { - logger.debug("removing address [{}]", otherNode); - } - } - listedNodes = Collections.unmodifiableList(builder); - } - return this; - } - - @Override - @SuppressWarnings("rawtypes") - public void close() { - synchronized (mutex) { - if (closed) { - return; - } - closed = true; - logger.info("disconnecting from nodes {}", nodes); - for (DiscoveryNode node : nodes) { - transportService.disconnectFromNode(node); - } - nodes = Collections.emptyList(); - logger.info("disconnecting from listed nodes {}", listedNodes); - for (DiscoveryNode listedNode : listedNodes) { - transportService.disconnectFromNode(listedNode); - } - listedNodes = Collections.emptyList(); - } - transportService.close(); - PluginsService pluginsService = injector.getInstance(PluginsService.class); - for (Class guiceService : pluginsService.getGuiceServiceClasses()) { - logger.info("closing plugin service {}", guiceService); - injector.getInstance(guiceService).close(); - } - // closing all plugins - pluginsService.filterPlugins(Plugin.class).forEach(plugin -> { - try { - logger.info("closing plugin {}", plugin); - plugin.close(); - } catch (IOException e) { - logger.warn(e.getMessage(), e); - } - }); - try { - ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS); - } catch (Exception e) { - logger.warn(e.getMessage(), e); - } - } - - private void connect() { - Set newNodes = new HashSet<>(); - Set newFilteredNodes = new HashSet<>(); - for (DiscoveryNode listedNode : listedNodes) { - if (!transportService.nodeConnected(listedNode)) { - try { - logger.info("connecting to listed node " + listedNode); - transportService.connectToNode(listedNode); - } catch (Exception e) { - logger.warn("failed to connect to node " + listedNode, e); - continue; - } - } - try { - LivenessResponse livenessResponse = transportService.submitRequest(listedNode, - TransportLivenessAction.NAME, new LivenessRequest(), - TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE) - .withTimeout(pingTimeout).build(), - new FutureTransportResponseHandler() { - @SuppressWarnings("deprecation") - @Override - public LivenessResponse newInstance() { - return new LivenessResponse(); - } - }).txGet(); - if (!clusterName.equals(livenessResponse.getClusterName())) { - logger.warn("node {} not part of the cluster {}, ignoring", listedNode, clusterName); - newFilteredNodes.add(listedNode); - } else if (livenessResponse.getDiscoveryNode() != null) { - DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode(); - newNodes.add(new DiscoveryNode(nodeWithInfo.getName(), nodeWithInfo.getId(), - nodeWithInfo.getEphemeralId(), nodeWithInfo.getHostName(), - nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(), - nodeWithInfo.getRoles(), nodeWithInfo.getVersion())); - } else { - logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", - listedNode); - newNodes.add(listedNode); - } - } catch (Exception e) { - logger.warn("failed to get node info for {}, disconnecting", e, listedNode); - transportService.disconnectFromNode(listedNode); - } - } - for (Iterator it = newNodes.iterator(); it.hasNext(); ) { - DiscoveryNode node = it.next(); - if (!transportService.nodeConnected(node)) { - try { - logger.debug("connecting to new node [{}]", node); - transportService.connectToNode(node); - } catch (Exception e) { - it.remove(); - logger.warn("failed to connect to new node [" + node + "], removed", e); - } - } - } - this.nodes = Collections.unmodifiableList(new ArrayList<>(newNodes)); - logger.info("connected to nodes: {}", nodes); - this.filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); - } - - @Override - @SuppressWarnings({"unchecked", "rawtypes"}) - protected > - void doExecute(Action action, final R request, final ActionListener listener) { - List nodeList = this.nodes; - if (nodeList.isEmpty()) { - throw new NoNodeAvailableException("none of the configured nodes are available: " + this.listedNodes); - } - int index = nodeCounter.incrementAndGet(); - if (index < 0) { - index = 0; - nodeCounter.set(0); - } - DiscoveryNode discoveryNode = nodeList.get(index % nodeList.size()); - // try once and never more - try { - ActionRequestValidationException validationException = request.validate(); - if (validationException != null) { - listener.onFailure(validationException); - return; - } - TransportRequestOptions transportOptions = action.transportOptions(settings); - transportService.sendRequest(discoveryNode, action.name(), request, transportOptions, - new ActionListenerResponseHandler<>(listener, action::newResponse)); - } catch (Exception e) { - listener.onFailure(e); - } - } - - private static Injector buildParams(Settings givenSettings, Settings defaultSettings, - Collection> plugins) { - Settings providedSettings = givenSettings; - if (!Node.NODE_NAME_SETTING.exists(providedSettings)) { - providedSettings = Settings.builder().put(providedSettings) - .put(Node.NODE_NAME_SETTING.getKey(), "_client_") - .build(); - } - final PluginsService pluginsService = newPluginService(providedSettings, plugins); - final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build(); - final List resourcesToClose = new ArrayList<>(); - final ThreadPool threadPool = new ThreadPool(settings); - resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); - final NetworkService networkService = new NetworkService(Collections.emptyList()); - try { - final List> additionalSettings = new ArrayList<>(pluginsService.getPluginSettings()); - final List additionalSettingsFilter = new ArrayList<>(pluginsService.getPluginSettingsFilter()); - for (final ExecutorBuilder builder : threadPool.builders()) { - additionalSettings.addAll(builder.getRegisteredSettings()); - } - SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); - SearchModule searchModule = new SearchModule(settings, true, - pluginsService.filterPlugins(SearchPlugin.class)); - List entries = new ArrayList<>(); - entries.addAll(NetworkModule.getNamedWriteables()); - entries.addAll(searchModule.getNamedWriteables()); - entries.addAll(pluginsService.filterPlugins(Plugin.class).stream() - .flatMap(p -> p.getNamedWriteables().stream()) - .collect(toList())); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); - NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of( - searchModule.getNamedXContents().stream(), - pluginsService.filterPlugins(Plugin.class).stream() - .flatMap(p -> p.getNamedXContent().stream()) - ).flatMap(Function.identity()).collect(toList())); - ModulesBuilder modules = new ModulesBuilder(); - // plugin modules must be added here, before others or we can get crazy injection errors - for (Module pluginModule : pluginsService.createGuiceModules()) { - modules.add(pluginModule); - } - modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool)); - ActionModule actionModule = new ActionModule(true, settings, null, - settingsModule.getIndexScopedSettings(), - settingsModule.getClusterSettings(), - settingsModule.getSettingsFilter(), - threadPool, - pluginsService.filterPlugins(ActionPlugin.class), null, null, null); - modules.add(actionModule); - CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(), - settingsModule.getClusterSettings()); - PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings); - BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService); - resourcesToClose.add(circuitBreakerService); - resourcesToClose.add(bigArrays); - modules.add(settingsModule); - NetworkModule networkModule = new NetworkModule(settings, true, - pluginsService.filterPlugins(NetworkPlugin.class), threadPool, - bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, - xContentRegistry, networkService, null); - final Transport transport = networkModule.getTransportSupplier().get(); - final TransportService transportService = new TransportService(settings, transport, threadPool, - networkModule.getTransportInterceptor(), - boundTransportAddress -> DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 0), - UUIDs.randomBase64UUID()), null, Collections.emptySet()); - modules.add((b -> { - b.bind(BigArrays.class).toInstance(bigArrays); - b.bind(PluginsService.class).toInstance(pluginsService); - b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService); - b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); - b.bind(Transport.class).toInstance(transport); - b.bind(TransportService.class).toInstance(transportService); - b.bind(NetworkService.class).toInstance(networkService); - })); - Injector injector = modules.createInjector(); - List pluginLifecycleComponents = pluginsService.getGuiceServiceClasses() - .stream().map(injector::getInstance).collect(Collectors.toList()); - resourcesToClose.addAll(pluginLifecycleComponents); - transportService.start(); - transportService.acceptIncomingRequests(); - resourcesToClose.clear(); - return injector; - } finally { - IOUtils.closeWhileHandlingException(resourcesToClose); - } - } - - private static TransportAddress dummyAddress(NetworkModule networkModule) { - final TransportAddress address; - try { - address = networkModule.getTransportSupplier().get().addressesFromString("0.0.0.0:0", 1)[0]; - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - return address; - } - - private static PluginsService newPluginService(final Settings settings, Collection> plugins) { - final Settings.Builder settingsBuilder = Settings.builder() - .put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval - .put(NetworkService.NETWORK_SERVER.getKey(), false) - .put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE); - if (!settings.isEmpty()) { - logger.info(settings.toString()); - settingsBuilder.put(InternalSettingsPreparer.prepareSettings(settings)); - } - return new PluginsService(settingsBuilder.build(), null, null, null, plugins); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportConnectionListener.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportConnectionListener.java deleted file mode 100644 index db349a5..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportConnectionListener.java +++ /dev/null @@ -1,27 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.cluster.node.DiscoveryNode; - -public interface TransportConnectionListener { - - /** - * Called once a node connection is opened and registered. - */ - default void onNodeConnected(DiscoveryNode node) {} - - /** - * Called once a node connection is closed and unregistered. - */ - default void onNodeDisconnected(DiscoveryNode node) {} - - /** - * Called once a node connection is closed. The connection might not have been registered in the - * transport as a shared connection to a specific node - */ - default void onConnectionClosed(Transport.Connection connection) {} - - /** - * Called once a node connection is opened. - */ - default void onConnectionOpened(Transport.Connection connection) {} -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportInterceptor.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportInterceptor.java deleted file mode 100644 index 6235693..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportInterceptor.java +++ /dev/null @@ -1,31 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; - -/** - * This interface allows plugins to intercept requests on both the sender and the receiver side. - */ -public interface TransportInterceptor { - - default TransportRequestHandler interceptHandler(String action, String executor, - boolean forceExecution, - TransportRequestHandler actualHandler) { - return actualHandler; - } - - - default AsyncSender interceptSender(AsyncSender sender) { - return sender; - } - - - interface AsyncSender { - void sendRequest(Transport.Connection connection, String action, - TransportRequest request, TransportRequestOptions options, - TransportResponseHandler handler); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportService.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportService.java deleted file mode 100644 index d035f85..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportService.java +++ /dev/null @@ -1,1224 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.FutureTransportResponseHandler; -import org.elasticsearch.transport.NodeDisconnectedException; -import org.elasticsearch.transport.NodeNotConnectedException; -import org.elasticsearch.transport.PlainTransportFuture; -import org.elasticsearch.transport.ReceiveTimeoutTransportException; -import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.ResponseHandlerFailureTransportException; -import org.elasticsearch.transport.SendRequestTransportException; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportFuture; -import org.elasticsearch.transport.TransportInfo; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportStats; - -import java.io.IOException; -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.function.Supplier; -import java.util.stream.Stream; - -import static java.util.Collections.emptyList; -import static org.elasticsearch.common.settings.Setting.listSetting; - -public class TransportService extends AbstractLifecycleComponent { - - public static final String DIRECT_RESPONSE_PROFILE = ".direct"; - public static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; - - private final CountDownLatch blockIncomingRequestsLatch = new CountDownLatch(1); - protected final Transport transport; - protected final ThreadPool threadPool; - protected final ClusterName clusterName; - protected final TaskManager taskManager; - private final TransportInterceptor.AsyncSender asyncSender; - private final Function localNodeFactory; - private final boolean connectToRemoteCluster; - - volatile Map requestHandlers = Collections.emptyMap(); - final Object requestHandlerMutex = new Object(); - - final ConcurrentMapLong clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - - final CopyOnWriteArrayList connectionListeners = new CopyOnWriteArrayList<>(); - - private final TransportInterceptor interceptor; - - // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they - // do show up, we can print more descriptive information about them - final Map timeoutInfoHandlers = - Collections.synchronizedMap(new LinkedHashMap(100, .75F, true) { - @Override - protected boolean removeEldestEntry(Map.Entry eldest) { - return size() > 100; - } - }); - - public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() {}; - - // tracer log - - public static final Setting> TRACE_LOG_INCLUDE_SETTING = - listSetting("transport.tracer.include", emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope); - public static final Setting> TRACE_LOG_EXCLUDE_SETTING = - listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), - Function.identity(), Property.Dynamic, Property.NodeScope); - - private final Logger tracerLog; - - volatile String[] tracerLogInclude; - volatile String[] tracerLogExclude; - - private final RemoteClusterService remoteClusterService; - - /** if set will call requests sent to this id to shortcut and executed locally */ - volatile DiscoveryNode localNode = null; - private final Transport.Connection localNodeConnection = new Transport.Connection() { - @Override - public DiscoveryNode getNode() { - return localNode; - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - sendLocalRequest(requestId, action, request, options); - } - - @Override - public void close() throws IOException { - } - }; - - /** - * Build the service. - * - * @param clusterSettings if non null, the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings - * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. - */ - public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, - Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders) { - super(settings); - this.transport = transport; - this.threadPool = threadPool; - this.localNodeFactory = localNodeFactory; - this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); - setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); - tracerLog = Loggers.getLogger(logger, ".tracer"); - taskManager = createTaskManager(settings, threadPool, taskHeaders); - this.interceptor = transportInterceptor; - this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); - this.connectToRemoteCluster = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings); - remoteClusterService = new RemoteClusterService(settings, this); - if (clusterSettings != null) { - clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); - clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); - if (connectToRemoteCluster) { - remoteClusterService.listenForUpdates(clusterSettings); - } - } - } - - public RemoteClusterService getRemoteClusterService() { - return remoteClusterService; - } - - public DiscoveryNode getLocalNode() { - return localNode; - } - - public TaskManager getTaskManager() { - return taskManager; - } - - protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool, Set taskHeaders) { - return new TaskManager(settings, threadPool, taskHeaders); - } - - /** - * The executor service for this transport service. - * - * @return the executor service - */ - protected ExecutorService getExecutorService() { - return threadPool.generic(); - } - - void setTracerLogInclude(List tracerLogInclude) { - this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); - } - - void setTracerLogExclude(List tracerLogExclude) { - this.tracerLogExclude = tracerLogExclude.toArray(Strings.EMPTY_ARRAY); - } - - @Override - protected void doStart() { - transport.setTransportService(this); - transport.start(); - - if (transport.boundAddress() != null && logger.isInfoEnabled()) { - logger.info("{}", transport.boundAddress()); - for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { - logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); - } - } - localNode = localNodeFactory.apply(transport.boundAddress()); - registerRequestHandler( - HANDSHAKE_ACTION_NAME, - () -> HandshakeRequest.INSTANCE, - ThreadPool.Names.SAME, - false, false, - (request, channel) -> channel.sendResponse( - new HandshakeResponse(localNode, clusterName, localNode.getVersion()))); - if (connectToRemoteCluster) { - // here we start to connect to the remote clusters - remoteClusterService.initializeRemoteClusters(); - } - } - - @Override - protected void doStop() { - try { - transport.stop(); - } finally { - // in case the transport is not connected to our local node (thus cleaned on node disconnect) - // make sure to clean any leftover on going handles - for (Map.Entry entry : clientHandlers.entrySet()) { - final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); - if (holderToNotify != null) { - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - getExecutorService().execute(new AbstractRunnable() { - @Override - public void onRejection(Exception e) { - // if we get rejected during node shutdown we don't wanna bubble it up - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on rejection, action: {}", - holderToNotify.action()), - e); - } - @Override - public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on exception, action: {}", - holderToNotify.action()), - e); - } - @Override - public void doRun() { - TransportException ex = new TransportException("transport stopped, action: " + holderToNotify.action()); - holderToNotify.handler().handleException(ex); - } - }); - } - } - } - } - - @Override - protected void doClose() throws IOException { - IOUtils.close(remoteClusterService, transport); - } - - /** - * start accepting incoming requests. - * when the transport layer starts up it will block any incoming requests until - * this method is called - */ - public final void acceptIncomingRequests() { - blockIncomingRequestsLatch.countDown(); - } - - public TransportInfo info() { - BoundTransportAddress boundTransportAddress = boundAddress(); - if (boundTransportAddress == null) { - return null; - } - return new TransportInfo(boundTransportAddress, transport.profileBoundAddresses()); - } - - public TransportStats stats() { - return transport.getStats(); - } - - public BoundTransportAddress boundAddress() { - return transport.boundAddress(); - } - - public List getLocalAddresses() { - return transport.getLocalAddresses(); - } - - /** - * Returns true iff the given node is already connected. - */ - public boolean nodeConnected(DiscoveryNode node) { - return isLocalNode(node) || transport.nodeConnected(node); - } - - public void connectToNode(DiscoveryNode node) throws ConnectTransportException { - connectToNode(node, null); - } - - /** - * Connect to the specified node with the given connection profile - * - * @param node the node to connect to - * @param connectionProfile the connection profile to use when connecting to this node - */ - public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { - if (isLocalNode(node)) { - return; - } - transport.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { - // We don't validate cluster names to allow for tribe node connections. - final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true); - // removed for TransportClient - //if (node.equals(remote) == false) { - // throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); - //} - }); - } - - /** - * Establishes and returns a new connection to the given node. The connection is NOT maintained by this service, it's the callers - * responsibility to close the connection once it goes out of scope. - * @param node the node to connect to - * @param profile the connection profile to use - */ - public Transport.Connection openConnection(final DiscoveryNode node, ConnectionProfile profile) throws IOException { - if (isLocalNode(node)) { - return localNodeConnection; - } else { - return transport.openConnection(node, profile); - } - } - - /** - * Executes a high-level handshake using the given connection - * and returns the discovery node of the node the connection - * was established with. The handshake will fail if the cluster - * name on the target node mismatches the local cluster name. - * - * @param connection the connection to a specific node - * @param handshakeTimeout handshake timeout - * @return the connected node - * @throws ConnectTransportException if the connection failed - * @throws IllegalStateException if the handshake failed - */ - public DiscoveryNode handshake( - final Transport.Connection connection, - final long handshakeTimeout) throws ConnectTransportException { - return handshake(connection, handshakeTimeout, clusterName::equals); - } - - /** - * Executes a high-level handshake using the given connection - * and returns the discovery node of the node the connection - * was established with. The handshake will fail if the cluster - * name on the target node doesn't match the local cluster name. - * - * @param connection the connection to a specific node - * @param handshakeTimeout handshake timeout - * @param clusterNamePredicate cluster name validation predicate - * @return the connected node - * @throws ConnectTransportException if the connection failed - * @throws IllegalStateException if the handshake failed - */ - public DiscoveryNode handshake( - final Transport.Connection connection, - final long handshakeTimeout, Predicate clusterNamePredicate) throws ConnectTransportException { - final HandshakeResponse response; - final DiscoveryNode node = connection.getNode(); - try { - PlainTransportFuture futureHandler = new PlainTransportFuture<>( - new FutureTransportResponseHandler() { - @Override - public HandshakeResponse newInstance() { - return new HandshakeResponse(); - } - }); - sendRequest(connection, HANDSHAKE_ACTION_NAME, HandshakeRequest.INSTANCE, - TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), futureHandler); - response = futureHandler.txGet(); - } catch (Exception e) { - throw new IllegalStateException("handshake failed with " + node, e); - } - - if (!clusterNamePredicate.test(response.clusterName)) { - throw new IllegalStateException("handshake failed, mismatched cluster name [" + response.clusterName + "] - " + node); - } else if (response.version.isCompatible(localNode.getVersion()) == false) { - throw new IllegalStateException("handshake failed, incompatible version [" + response.version + "] - " + node); - } - logger.info("handshake: success with node {}", response.discoveryNode); - return response.discoveryNode; - } - - static class HandshakeRequest extends TransportRequest { - - public static final HandshakeRequest INSTANCE = new HandshakeRequest(); - - private HandshakeRequest() { - } - - } - - public static class HandshakeResponse extends TransportResponse { - private DiscoveryNode discoveryNode; - private ClusterName clusterName; - private Version version; - - HandshakeResponse() { - } - - public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { - this.discoveryNode = discoveryNode; - this.version = version; - this.clusterName = clusterName; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - discoveryNode = in.readOptionalWriteable(DiscoveryNode::new); - clusterName = new ClusterName(in); - version = Version.readVersion(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalWriteable(discoveryNode); - clusterName.writeTo(out); - Version.writeVersion(version, out); - } - } - - public void disconnectFromNode(DiscoveryNode node) { - if (isLocalNode(node)) { - return; - } - transport.disconnectFromNode(node); - } - - public void addConnectionListener(TransportConnectionListener listener) { - connectionListeners.add(listener); - } - - public void removeConnectionListener(TransportConnectionListener listener) { - connectionListeners.remove(listener); - } - - public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, - TransportResponseHandler handler) throws TransportException { - return submitRequest(node, action, request, TransportRequestOptions.EMPTY, handler); - } - - public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) throws TransportException { - PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); - try { - Transport.Connection connection = getConnection(node); - sendRequest(connection, action, request, options, futureHandler); - } catch (NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - futureHandler.handleException(ex); - } - return futureHandler; - } - - public void sendRequest(final DiscoveryNode node, final String action, - final TransportRequest request, - final TransportResponseHandler handler) { - try { - Transport.Connection connection = getConnection(node); - sendRequest(connection, action, request, TransportRequestOptions.EMPTY, handler); - } catch (NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - handler.handleException(ex); - } - } - - public final void sendRequest(final DiscoveryNode node, final String action, - final TransportRequest request, - final TransportRequestOptions options, - TransportResponseHandler handler) { - try { - Transport.Connection connection = getConnection(node); - sendRequest(connection, action, request, options, handler); - } catch (NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - handler.handleException(ex); - } - } - - public final void sendRequest(final Transport.Connection connection, final String action, - final TransportRequest request, - final TransportRequestOptions options, - TransportResponseHandler handler) { - - asyncSender.sendRequest(connection, action, request, options, handler); - } - - /** - * Returns either a real transport connection or a local node connection if we are using the local node optimization. - * @throws NodeNotConnectedException if the given node is not connected - */ - public Transport.Connection getConnection(DiscoveryNode node) { - if (isLocalNode(node)) { - return localNodeConnection; - } else { - return transport.getConnection(node); - } - } - - public final void sendChildRequest(final DiscoveryNode node, final String action, - final TransportRequest request, final Task parentTask, - final TransportRequestOptions options, - final TransportResponseHandler handler) { - try { - Transport.Connection connection = getConnection(node); - sendChildRequest(connection, action, request, parentTask, options, handler); - } catch (NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - handler.handleException(ex); - } - } - - public void sendChildRequest(final Transport.Connection connection, final String action, - final TransportRequest request, final Task parentTask, - final TransportResponseHandler handler) { - sendChildRequest(connection, action, request, parentTask, TransportRequestOptions.EMPTY, handler); - } - - public void sendChildRequest(final Transport.Connection connection, final String action, - final TransportRequest request, final Task parentTask, - final TransportRequestOptions options, - final TransportResponseHandler handler) { - request.setParentTask(localNode.getId(), parentTask.getId()); - try { - sendRequest(connection, action, request, options, handler); - } catch (TaskCancelledException ex) { - // The parent task is already cancelled - just fail the request - handler.handleException(new TransportException(ex)); - } catch (NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - handler.handleException(ex); - } - - } - - private void sendRequestInternal(final Transport.Connection connection, final String action, - final TransportRequest request, - final TransportRequestOptions options, - TransportResponseHandler handler) { - if (connection == null) { - throw new IllegalStateException("can't send request to a null connection"); - } - DiscoveryNode node = connection.getNode(); - final long requestId = transport.newRequestId(); - final TimeoutHandler timeoutHandler; - try { - - if (options.timeout() == null) { - timeoutHandler = null; - } else { - timeoutHandler = new TimeoutHandler(requestId); - } - Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); - TransportResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); - clientHandlers.put(requestId, new RequestHolder<>(responseHandler, connection, action, timeoutHandler)); - if (lifecycle.stoppedOrClosed()) { - // if we are not started the exception handling will remove the RequestHolder again and calls the handler to notify - // the caller. It will only notify if the toStop code hasn't done the work yet. - throw new TransportException("TransportService is closed stopped can't send request"); - } - if (timeoutHandler != null) { - assert options.timeout() != null; - timeoutHandler.future = threadPool.schedule(options.timeout(), ThreadPool.Names.GENERIC, timeoutHandler); - } - connection.sendRequest(requestId, action, request, options); // local node optimization happens upstream - } catch (final Exception e) { - // usually happen either because we failed to connect to the node - // or because we failed serializing the message - final RequestHolder holderToNotify = clientHandlers.remove(requestId); - // If holderToNotify == null then handler has already been taken care of. - if (holderToNotify != null) { - holderToNotify.cancelTimeout(); - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e); - threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() { - @Override - public void onRejection(Exception e) { - // if we get rejected during node shutdown we don't wanna bubble it up - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on rejection, action: {}", - holderToNotify.action()), - e); - } - @Override - public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failed to notify response handler on exception, action: {}", - holderToNotify.action()), - e); - } - @Override - protected void doRun() throws Exception { - holderToNotify.handler().handleException(sendRequestException); - } - }); - } else { - logger.debug("Exception while sending request, handler likely already notified due to timeout", e); - } - } - } - - private void sendLocalRequest(long requestId, final String action, final TransportRequest request, TransportRequestOptions options) { - final DirectResponseChannel channel = new DirectResponseChannel(logger, localNode, action, requestId, this, threadPool); - try { - onRequestSent(localNode, requestId, action, request, options); - onRequestReceived(requestId, action); - final RequestHandlerRegistry reg = getRequestHandler(action); - if (reg == null) { - throw new ActionNotFoundTransportException("Action [" + action + "] not found"); - } - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - //noinspection unchecked - reg.processMessageReceived(request, channel); - } else { - threadPool.executor(executor).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - //noinspection unchecked - reg.processMessageReceived(request, channel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failed to notify channel of error message for action [{}]", action), inner); - } - } - }); - } - - } catch (Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failed to notify channel of error message for action [{}]", action), inner); - } - } - } - - private boolean shouldTraceAction(String action) { - if (tracerLogInclude.length > 0) { - if (Regex.simpleMatch(tracerLogInclude, action) == false) { - return false; - } - } - if (tracerLogExclude.length > 0) { - return !Regex.simpleMatch(tracerLogExclude, action); - } - return true; - } - - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - return transport.addressesFromString(address, perAddressLimit); - } - - /** - * Registers a new request handler - * - * @param action The action the request handler is associated with - * @param requestFactory a callable to be used construct new instances for streaming - * @param executor The executor the request handling will be executed on - * @param handler The handler itself that implements the request handling - */ - public void registerRequestHandler(String action, Supplier requestFactory, - String executor, TransportRequestHandler handler) { - handler = interceptor.interceptHandler(action, executor, false, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( - action, Streamable.newWriteableReader(requestFactory), taskManager, handler, executor, false, true); - registerRequestHandler(reg); - } - - /** - * Registers a new request handler - * - * @param action The action the request handler is associated with - * @param requestReader a callable to be used construct new instances for streaming - * @param executor The executor the request handling will be executed on - * @param handler The handler itself that implements the request handling - */ - public void registerRequestHandler(String action, String executor, - Writeable.Reader requestReader, - TransportRequestHandler handler) { - handler = interceptor.interceptHandler(action, executor, false, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( - action, requestReader, taskManager, handler, executor, false, true); - registerRequestHandler(reg); - } - - /** - * Registers a new request handler - * - * @param action The action the request handler is associated with - * @param request The request class that will be used to construct new instances for streaming - * @param executor The executor the request handling will be executed on - * @param forceExecution Force execution on the executor queue and never reject it - * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. - * @param handler The handler itself that implements the request handling - */ - public void registerRequestHandler(String action, Supplier request, - String executor, boolean forceExecution, - boolean canTripCircuitBreaker, - TransportRequestHandler handler) { - handler = interceptor.interceptHandler(action, executor, forceExecution, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( - action, Streamable.newWriteableReader(request), taskManager, handler, executor, forceExecution, canTripCircuitBreaker); - registerRequestHandler(reg); - } - - /** - * Registers a new request handler - * - * @param action The action the request handler is associated with - * @param requestReader The request class that will be used to construct new instances for streaming - * @param executor The executor the request handling will be executed on - * @param forceExecution Force execution on the executor queue and never reject it - * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. - * @param handler The handler itself that implements the request handling - */ - public void registerRequestHandler(String action, - String executor, boolean forceExecution, - boolean canTripCircuitBreaker, - Writeable.Reader requestReader, - TransportRequestHandler handler) { - handler = interceptor.interceptHandler(action, executor, forceExecution, handler); - RequestHandlerRegistry reg = new RequestHandlerRegistry<>( - action, requestReader, taskManager, handler, executor, forceExecution, canTripCircuitBreaker); - registerRequestHandler(reg); - } - - private void registerRequestHandler(RequestHandlerRegistry reg) { - synchronized (requestHandlerMutex) { - if (requestHandlers.containsKey(reg.getAction())) { - throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); - } - requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); - } - } - - /** called by the {@link Transport} implementation once a request has been sent */ - void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, - TransportRequestOptions options) { - if (traceEnabled() && shouldTraceAction(action)) { - traceRequestSent(node, requestId, action, options); - } - } - - protected boolean traceEnabled() { - return tracerLog.isTraceEnabled(); - } - - /** called by the {@link Transport} implementation once a response was sent to calling node */ - void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions options) { - if (traceEnabled() && shouldTraceAction(action)) { - traceResponseSent(requestId, action); - } - } - - /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ - void onResponseSent(long requestId, String action, Exception e) { - if (traceEnabled() && shouldTraceAction(action)) { - traceResponseSent(requestId, action, e); - } - } - - protected void traceResponseSent(long requestId, String action, Exception e) { - tracerLog.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); - } - - /** - * called by the {@link Transport} implementation when an incoming request arrives but before - * any parsing of it has happened (with the exception of the requestId and action) - */ - void onRequestReceived(long requestId, String action) { - try { - blockIncomingRequestsLatch.await(); - } catch (InterruptedException e) { - logger.trace("interrupted while waiting for incoming requests block to be removed"); - } - if (traceEnabled() && shouldTraceAction(action)) { - traceReceivedRequest(requestId, action); - } - } - - public RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); - } - - /** - * called by the {@link Transport} implementation when a response or an exception has been received for a previously - * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not - * found. - */ - public TransportResponseHandler onResponseReceived(final long requestId) { - RequestHolder holder = clientHandlers.remove(requestId); - - if (holder == null) { - checkForTimeout(requestId); - return null; - } - holder.cancelTimeout(); - if (traceEnabled() && shouldTraceAction(holder.action())) { - traceReceivedResponse(requestId, holder.connection().getNode(), holder.action()); - } - return holder.handler(); - } - - private void checkForTimeout(long requestId) { - // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished - final DiscoveryNode sourceNode; - final String action; - assert clientHandlers.get(requestId) == null; - TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); - if (timeoutInfoHolder != null) { - long time = System.currentTimeMillis(); - logger.warn("Received response for a request that has timed out, sent [{}ms] ago, timed out [{}ms] ago, " + - "action [{}], node [{}], id [{}]", time - timeoutInfoHolder.sentTime(), time - timeoutInfoHolder.timeoutTime(), - timeoutInfoHolder.action(), timeoutInfoHolder.node(), requestId); - action = timeoutInfoHolder.action(); - sourceNode = timeoutInfoHolder.node(); - } else { - logger.warn("Transport response handler not found of id [{}]", requestId); - action = null; - sourceNode = null; - } - // call tracer out of lock - if (traceEnabled() == false) { - return; - } - if (action == null) { - assert sourceNode == null; - traceUnresolvedResponse(requestId); - } else if (shouldTraceAction(action)) { - traceReceivedResponse(requestId, sourceNode, action); - } - } - - void onNodeConnected(final DiscoveryNode node) { - // capture listeners before spawning the background callback so the following pattern won't trigger a call - // connectToNode(); connection is completed successfully - // addConnectionListener(); this listener shouldn't be called - final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); - getExecutorService().execute(() -> listenersToNotify.forEach(listener -> listener.onNodeConnected(node))); - } - - void onConnectionOpened(Transport.Connection connection) { - // capture listeners before spawning the background callback so the following pattern won't trigger a call - // connectToNode(); connection is completed successfully - // addConnectionListener(); this listener shouldn't be called - final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); - getExecutorService().execute(() -> listenersToNotify.forEach(listener -> listener.onConnectionOpened(connection))); - } - - public void onNodeDisconnected(final DiscoveryNode node) { - try { - getExecutorService().execute( () -> { - for (final TransportConnectionListener connectionListener : connectionListeners) { - connectionListener.onNodeDisconnected(node); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Rejected execution on NodeDisconnected", ex); - } - } - - void onConnectionClosed(Transport.Connection connection) { - try { - for (Map.Entry entry : clientHandlers.entrySet()) { - RequestHolder holder = entry.getValue(); - if (holder.connection().getCacheKey().equals(connection.getCacheKey())) { - final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); - if (holderToNotify != null) { - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - getExecutorService().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException( - connection.getNode(), holderToNotify.action()))); - } - } - } - } catch (EsRejectedExecutionException ex) { - logger.debug("Rejected execution on onConnectionClosed", ex); - } - } - - protected void traceReceivedRequest(long requestId, String action) { - tracerLog.trace("[{}][{}] received request", requestId, action); - } - - protected void traceResponseSent(long requestId, String action) { - tracerLog.trace("[{}][{}] sent response", requestId, action); - } - - protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) { - tracerLog.trace("[{}][{}] received response from [{}]", requestId, action, sourceNode); - } - - protected void traceUnresolvedResponse(long requestId) { - tracerLog.trace("[{}] received response but can't resolve it to a request", requestId); - } - - protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { - tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); - } - - class TimeoutHandler implements Runnable { - - private final long requestId; - - private final long sentTime = System.currentTimeMillis(); - - volatile ScheduledFuture future; - - TimeoutHandler(long requestId) { - this.requestId = requestId; - } - - @Override - public void run() { - // we get first to make sure we only add the TimeoutInfoHandler if needed. - final RequestHolder holder = clientHandlers.get(requestId); - if (holder != null) { - // add it to the timeout information holder, in case we are going to get a response later - long timeoutTime = System.currentTimeMillis(); - timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(holder.connection().getNode(), holder.action(), sentTime, - timeoutTime)); - // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id - final RequestHolder removedHolder = clientHandlers.remove(requestId); - if (removedHolder != null) { - assert removedHolder == holder : "two different holder instances for request [" + requestId + "]"; - removedHolder.handler().handleException( - new ReceiveTimeoutTransportException(holder.connection().getNode(), holder.action(), - "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]")); - } else { - // response was processed, remove timeout info. - timeoutInfoHandlers.remove(requestId); - } - } - } - - /** - * cancels timeout handling. this is a best effort only to avoid running it. remove the requestId from {@link #clientHandlers} - * to make sure this doesn't run. - */ - public void cancel() { - assert clientHandlers.get(requestId) == null : - "cancel must be called after the requestId [" + requestId + "] has been removed from clientHandlers"; - FutureUtils.cancel(future); - } - } - - static class TimeoutInfoHolder { - - private final DiscoveryNode node; - private final String action; - private final long sentTime; - private final long timeoutTime; - - TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { - this.node = node; - this.action = action; - this.sentTime = sentTime; - this.timeoutTime = timeoutTime; - } - - public DiscoveryNode node() { - return node; - } - - public String action() { - return action; - } - - public long sentTime() { - return sentTime; - } - - public long timeoutTime() { - return timeoutTime; - } - } - - static class RequestHolder { - - private final TransportResponseHandler handler; - - private final Transport.Connection connection; - - private final String action; - - private final TimeoutHandler timeoutHandler; - - RequestHolder(TransportResponseHandler handler, Transport.Connection connection, String action, TimeoutHandler timeoutHandler) { - this.handler = handler; - this.connection = connection; - this.action = action; - this.timeoutHandler = timeoutHandler; - } - - public TransportResponseHandler handler() { - return handler; - } - - public Transport.Connection connection() { - return this.connection; - } - - public String action() { - return this.action; - } - - public void cancelTimeout() { - if (timeoutHandler != null) { - timeoutHandler.cancel(); - } - } - } - - /** - * This handler wrapper ensures that the response thread executes with the correct thread context. Before any of the handle methods - * are invoked we restore the context. - */ - public static final class ContextRestoreResponseHandler implements TransportResponseHandler { - - private final TransportResponseHandler delegate; - private final Supplier contextSupplier; - - public ContextRestoreResponseHandler(Supplier contextSupplier, TransportResponseHandler delegate) { - this.delegate = delegate; - this.contextSupplier = contextSupplier; - } - - @Override - public T read(StreamInput in) throws IOException { - return delegate.read(in); - } - - @Override - public void handleResponse(T response) { - try (ThreadContext.StoredContext ignore = contextSupplier.get()) { - delegate.handleResponse(response); - } - } - - @Override - public void handleException(TransportException exp) { - try (ThreadContext.StoredContext ignore = contextSupplier.get()) { - delegate.handleException(exp); - } - } - - @Override - public String executor() { - return delegate.executor(); - } - - @Override - public String toString() { - return getClass().getName() + "/" + delegate.toString(); - } - - } - - static class DirectResponseChannel implements TransportChannel { - final Logger logger; - final DiscoveryNode localNode; - private final String action; - private final long requestId; - final TransportService service; - final ThreadPool threadPool; - - DirectResponseChannel(Logger logger, DiscoveryNode localNode, String action, long requestId, TransportService service, - ThreadPool threadPool) { - this.logger = logger; - this.localNode = localNode; - this.action = action; - this.requestId = requestId; - this.service = service; - this.threadPool = threadPool; - } - - @Override - public String getProfileName() { - return DIRECT_RESPONSE_PROFILE; - } - - @Override - public void sendResponse(TransportResponse response) throws IOException { - sendResponse(response, TransportResponseOptions.EMPTY); - } - - @Override - public void sendResponse(final TransportResponse response, TransportResponseOptions options) throws IOException { - service.onResponseSent(requestId, action, response, options); - final TransportResponseHandler handler = service.onResponseReceived(requestId); - // ignore if its null, the service logs it - if (handler != null) { - final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { - processResponse(handler, response); - } else { - threadPool.executor(executor).execute(() -> processResponse(handler, response)); - } - } - } - - @SuppressWarnings("unchecked") - protected void processResponse(TransportResponseHandler handler, TransportResponse response) { - try { - handler.handleResponse(response); - } catch (Exception e) { - processException(handler, wrapInRemote(new ResponseHandlerFailureTransportException(e))); - } - } - - @Override - public void sendResponse(Exception exception) throws IOException { - service.onResponseSent(requestId, action, exception); - final TransportResponseHandler handler = service.onResponseReceived(requestId); - // ignore if its null, the service logs it - if (handler != null) { - final RemoteTransportException rtx = wrapInRemote(exception); - final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { - processException(handler, rtx); - } else { - threadPool.executor(handler.executor()).execute(() -> processException(handler, rtx)); - } - } - } - - protected RemoteTransportException wrapInRemote(Exception e) { - if (e instanceof RemoteTransportException) { - return (RemoteTransportException) e; - } - return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); - } - - protected void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) { - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( - "failed to handle exception for action [{}], handler [{}]", action, handler), e); - } - } - - @Override - public String getChannelType() { - return "direct"; - } - - @Override - public Version getVersion() { - return localNode.getVersion(); - } - } - - /** - * Returns the internal thread pool - */ - public ThreadPool getThreadPool() { - return threadPool; - } - - private boolean isLocalNode(DiscoveryNode discoveryNode) { - return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); - } -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportStatus.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportStatus.java deleted file mode 100644 index ed69ad5..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/TransportStatus.java +++ /dev/null @@ -1,52 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -public final class TransportStatus { - - private static final byte STATUS_REQRES = 1 << 0; - private static final byte STATUS_ERROR = 1 << 1; - private static final byte STATUS_COMPRESS = 1 << 2; - private static final byte STATUS_HANDSHAKE = 1 << 3; - - public static boolean isRequest(byte value) { - return (value & STATUS_REQRES) == 0; - } - - public static byte setRequest(byte value) { - value &= ~STATUS_REQRES; - return value; - } - - public static byte setResponse(byte value) { - value |= STATUS_REQRES; - return value; - } - - public static boolean isError(byte value) { - return (value & STATUS_ERROR) != 0; - } - - public static byte setError(byte value) { - value |= STATUS_ERROR; - return value; - } - - public static boolean isCompress(byte value) { - return (value & STATUS_COMPRESS) != 0; - } - - public static byte setCompress(byte value) { - value |= STATUS_COMPRESS; - return value; - } - - static boolean isHandshake(byte value) { // pkg private since it's only used internally - return (value & STATUS_HANDSHAKE) != 0; - } - - static byte setHandshake(byte value) { // pkg private since it's only used internally - value |= STATUS_HANDSHAKE; - return value; - } - - -} diff --git a/transport/src/main/java/org/xbib/elasticsearch/client/transport/package-info.java b/transport/src/main/java/org/xbib/elasticsearch/client/transport/package-info.java deleted file mode 100644 index 50220b5..0000000 --- a/transport/src/main/java/org/xbib/elasticsearch/client/transport/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for Elasticsearch transport client. - */ -package org.xbib.elasticsearch.client.transport; diff --git a/transport/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods b/transport/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods deleted file mode 100644 index c94ea28..0000000 --- a/transport/src/main/resources/META-INF/services/org.xbib.elasticsearch.client.ClientMethods +++ /dev/null @@ -1 +0,0 @@ -org.xbib.elasticsearch.client.transport.TransportBulkClient \ No newline at end of file diff --git a/transport/src/main/resources/extra-security.policy b/transport/src/main/resources/extra-security.policy deleted file mode 100644 index 24db998..0000000 --- a/transport/src/main/resources/extra-security.policy +++ /dev/null @@ -1,15 +0,0 @@ - -grant codeBase "${codebase.netty-common}" { - // for reading the system-wide configuration for the backlog of established sockets - permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; - // netty makes and accepts socket connections - permission java.net.SocketPermission "*", "accept,connect,resolve"; - // 4.1.24 io.netty.util.concurrent.GlobalEventExecutor$2.run(GlobalEventExecutor.java:228) - permission java.lang.RuntimePermission "setContextClassLoader"; -}; - -grant codeBase "${codebase.netty-transport}" { - // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 - // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! - permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; -}; diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TestRunnerThreadsFilter.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TestRunnerThreadsFilter.java deleted file mode 100644 index 0ad52e3..0000000 --- a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TestRunnerThreadsFilter.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import com.carrotsearch.randomizedtesting.ThreadFilter; - -public class TestRunnerThreadsFilter implements ThreadFilter { - - @Override - public boolean reject(Thread thread) { - return thread.getName().startsWith("ObjectCleanerThread"); - } -} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientDuplicateIDTests.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientDuplicateIDTests.java deleted file mode 100644 index 7fbb20e..0000000 --- a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientDuplicateIDTests.java +++ /dev/null @@ -1,106 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Collection; -import java.util.Collections; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class TransportBulkClientDuplicateIDTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(TransportBulkClientDuplicateIDTests.class.getName()); - - private static final long MAX_ACTIONS = 100L; - - private static final long NUM_ACTIONS = 12345L; - - private TransportAddress address; - - @Before - public void fetchTransportAddress() { - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - address = nodeInfo.getTransport().getAddress().publishAddress(); - } - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(Netty4Plugin.class); - } - - @Override - public Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .build(); - } - - private Settings transportClientSettings() { - return Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") - .put("host", address.address().getHostString() + ":" + address.getPort()) - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created - .build(); - } - - public void testDuplicateDocIDs() throws Exception { - final TransportBulkClient client = ClientBuilder.builder() - .put(transportClientSettings()) - .put(ClientBuilder.MAX_CONCURRENT_REQUESTS, 2) // avoid EsRejectedExecutionException - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", randomAlphaOfLength(1), false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setTypes("test") - .setQuery(matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("hits = {}", hits); - assertTrue(hits < NUM_ACTIONS); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - logger.info("numactions = {}, submitted = {}, succeeded= {}, failed = {}", NUM_ACTIONS, - client.getMetric().getSubmitted().getCount(), - client.getMetric().getSucceeded().getCount(), - client.getMetric().getFailed().getCount()); - assertEquals(NUM_ACTIONS, client.getMetric().getSubmitted().getCount()); - assertEquals(NUM_ACTIONS, client.getMetric().getSucceeded().getCount()); - } - } -} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientReplicaTests.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientReplicaTests.java deleted file mode 100644 index 60a97ab..0000000 --- a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientReplicaTests.java +++ /dev/null @@ -1,128 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.admin.indices.stats.IndexStats; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.index.shard.IndexingStats; -import org.elasticsearch.testframework.ESIntegTestCase; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.util.Map; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) -public class TransportBulkClientReplicaTests extends ESIntegTestCase { - - private static final Logger logger = LogManager.getLogger(TransportBulkClientTests.class.getName()); - - private String clusterName; - - private TransportAddress address; - - @Before - public void fetchTransportAddress() { - clusterName = client().admin().cluster().prepareClusterStats().get().getClusterName().value(); - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - address = nodeInfo.getTransport().getAddress().publishAddress(); - } - - private Settings ourTransportClientSettings() { - return Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName) - .put("host", address.address().getHostString() + ":" + address.getPort()) - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created - .build(); - } - - public void testReplicaLevel() throws Exception { - - //ensureStableCluster(4); - - Settings settingsTest1 = Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 2) - .build(); - - Settings settingsTest2 = Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .build(); - - final TransportBulkClient client = ClientBuilder.builder() - .put(ourTransportClientSettings()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - try { - client.newIndex("test1", settingsTest1, null) - .newIndex("test2", settingsTest2, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 1234; i++) { - client.index("test1", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - for (int i = 0; i < 1234; i++) { - client.index("test2", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("60s"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - logger.info("refreshing"); - client.refreshIndex("test1"); - client.refreshIndex("test2"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test1", "test2") - .setQuery(matchAllQuery()); - long hits = searchRequestBuilder.execute().actionGet().getHits().getTotalHits(); - logger.info("query total hits={}", hits); - assertEquals(2468, hits); - IndicesStatsRequestBuilder indicesStatsRequestBuilder = new IndicesStatsRequestBuilder(client.client(), - IndicesStatsAction.INSTANCE).all(); - IndicesStatsResponse response = indicesStatsRequestBuilder.execute().actionGet(); - for (Map.Entry m : response.getIndices().entrySet()) { - IndexStats indexStats = m.getValue(); - CommonStats commonStats = indexStats.getTotal(); - IndexingStats indexingStats = commonStats.getIndexing(); - IndexingStats.Stats stats = indexingStats.getTotal(); - logger.info("index {}: count = {}", m.getKey(), stats.getIndexCount()); - for (Map.Entry me : indexStats.getIndexShards().entrySet()) { - IndexShardStats indexShardStats = me.getValue(); - CommonStats commonShardStats = indexShardStats.getTotal(); - logger.info("shard {} count = {}", me.getKey(), - commonShardStats.getIndexing().getTotal().getIndexCount()); - } - } - try { - client.deleteIndex("test1") - .deleteIndex("test2"); - } catch (Exception e) { - logger.error("delete index failed, ignored. Reason:", e); - } - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } -} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientTests.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientTests.java deleted file mode 100644 index b9e22bb..0000000 --- a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientTests.java +++ /dev/null @@ -1,256 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.testframework.ESSingleNodeTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -public class TransportBulkClientTests extends ESSingleNodeTestCase { - - private static final Logger logger = LogManager.getLogger(TransportBulkClientTests.class.getName()); - - private static final Long MAX_ACTIONS = 10L; - - private static final Long NUM_ACTIONS = 1234L; - - private TransportAddress address; - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(Netty4Plugin.class); - } - - @Override - public Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .build(); - } - - private Settings transportClientSettings() { - return Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "test-cluster") - .put("host", address.address().getHostString() + ":" + address.getPort()) - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created - .build(); - } - - @Before - public void fetchTransportAddress() { - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - address = nodeInfo.getTransport().getAddress().publishAddress(); - } - - public void testBulkTransportClientNewIndex() throws Exception { - logger.info("firing up BulkTransportClient"); - final TransportBulkClient client = ClientBuilder.builder() - .put(transportClientSettings()) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - try { - logger.info("creating index"); - client.newIndex("test"); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - logger.info("deleting/creating index: start"); - client.deleteIndex("test") - .newIndex("test") - .deleteIndex("test"); - logger.info("deleting/creating index: end"); - } catch (NoNodeAvailableException e) { - logger.error("no node available"); - } finally { - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - public void testBulkTransportClientMapping() throws Exception { - final TransportBulkClient client = ClientBuilder.builder() - .put(transportClientSettings()) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject("test") - .startObject("properties") - .startObject("location") - .field("type", "geo_point") - .endObject() - .endObject() - .endObject() - .endObject(); - client.mapping("test", Strings.toString(builder)); - client.newIndex("test"); - GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices("test"); - GetMappingsResponse getMappingsResponse = - client.client().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet(); - logger.info("mappings={}", getMappingsResponse.getMappings()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - - public void testBulkTransportClientSingleDoc() throws IOException { - logger.info("firing up BulkTransportClient"); - final TransportBulkClient client = ClientBuilder.builder() - .put(transportClientSettings()) - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - try { - logger.info("creating index"); - client.newIndex("test"); - logger.info("indexing one doc"); - client.index("test", "test", "1", false, "{ \"name\" : \"Hello World\"}"); // single doc ingest - logger.info("flush"); - client.flushIngest(); - logger.info("wait for responses"); - client.waitForResponses("30s"); - logger.info("waited for responses"); - } catch (InterruptedException e) { - // ignore - } catch (ExecutionException e) { - logger.error(e.getMessage(), e); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - assertEquals(1, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - public void testBulkTransportClientRandomDocs() throws Exception { - long numactions = NUM_ACTIONS; - final TransportBulkClient client = ClientBuilder.builder() - .put(transportClientSettings()) - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, MAX_ACTIONS) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - try { - client.newIndex("test"); - for (int i = 0; i < NUM_ACTIONS; i++) { - client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - logger.info("assuring {} == {}", numactions, client.getMetric().getSucceeded().getCount()); - assertEquals(numactions, client.getMetric().getSucceeded().getCount()); - assertFalse(client.hasThrowable()); - client.shutdown(); - } - } - - public void testBulkTransportClientThreadedRandomDocs() throws Exception { - int maxthreads = Runtime.getRuntime().availableProcessors(); - long maxactions = MAX_ACTIONS; - final long maxloop = NUM_ACTIONS; - logger.info("TransportClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); - final TransportBulkClient client = ClientBuilder.builder() - .put(transportClientSettings()) - .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) - .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) // = effectively disables autoflush for this test - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - try { - client.newIndex("test").startBulk("test", 30 * 1000, 1000); - ExecutorService executorService = Executors.newFixedThreadPool(maxthreads); - final CountDownLatch latch = new CountDownLatch(maxthreads); - for (int i = 0; i < maxthreads; i++) { - executorService.execute(() -> { - for (int i1 = 0; i1 < maxloop; i1++) { - client.index("test", "test", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - latch.countDown(); - }); - } - logger.info("waiting for max 30 seconds..."); - latch.await(30, TimeUnit.SECONDS); - logger.info("client flush ..."); - client.flushIngest(); - client.waitForResponses("30s"); - logger.info("executor service to be shut down ..."); - executorService.shutdown(); - logger.info("executor service is shut down"); - client.stopBulk("test"); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - logger.info("assuring {} == {}", maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - client.refreshIndex("test"); - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) - .setIndices("test") - .setQuery(QueryBuilders.matchAllQuery()) - .setSize(0); - assertEquals(maxthreads * maxloop, - searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); - client.shutdown(); - } - } -} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientUpdateReplicaLevelTests.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientUpdateReplicaLevelTests.java deleted file mode 100644 index 37fc4be..0000000 --- a/transport/src/test/java/org/xbib/elasticsearch/client/transport/TransportBulkClientUpdateReplicaLevelTests.java +++ /dev/null @@ -1,81 +0,0 @@ -package org.xbib.elasticsearch.client.transport; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.testframework.ESIntegTestCase; -import org.junit.Before; -import org.xbib.elasticsearch.client.ClientBuilder; -import org.xbib.elasticsearch.client.SimpleBulkControl; -import org.xbib.elasticsearch.client.SimpleBulkMetric; - -@ThreadLeakFilters(defaultFilters = true, filters = {TestRunnerThreadsFilter.class}) -@ESIntegTestCase.ClusterScope(scope=ESIntegTestCase.Scope.SUITE, numDataNodes=3) -public class TransportBulkClientUpdateReplicaLevelTests extends ESIntegTestCase { - - private static final Logger logger = LogManager.getLogger(TransportBulkClientUpdateReplicaLevelTests.class.getName()); - - private String clusterName; - - private TransportAddress address; - - @Before - public void fetchClusterInfo() { - clusterName = client().admin().cluster().prepareClusterStats().get().getClusterName().value(); - NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); - address = nodeInfo.getTransport().getAddress().publishAddress(); - } - - private Settings ourTransportClientSettings() { - return Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName) - .put("host", address.address().getHostString() + ":" + address.getPort()) - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created - .build(); - } - - public void testUpdateReplicaLevel() throws Exception { - - //ensureStableCluster(3); - - int shardsAfterReplica; - - Settings settings = Settings.builder() - .put("index.number_of_shards", 2) - .put("index.number_of_replicas", 0) - .build(); - - final TransportBulkClient client = ClientBuilder.builder() - .put(ourTransportClientSettings()) - .setMetric(new SimpleBulkMetric()) - .setControl(new SimpleBulkControl()) - .getClient(TransportBulkClient.class); - - try { - client.newIndex("replicatest", settings, null); - client.waitForCluster("GREEN", "30s"); - for (int i = 0; i < 12345; i++) { - client.index("replicatest", "replicatest", null, false, "{ \"name\" : \"" + randomAlphaOfLength(32) + "\"}"); - } - client.flushIngest(); - client.waitForResponses("30s"); - shardsAfterReplica = client.updateReplicaLevel("replicatest", 3); - assertEquals(shardsAfterReplica, 2 * (3 + 1)); - } catch (NoNodeAvailableException e) { - logger.warn("skipping, no node available"); - } finally { - client.shutdown(); - if (client.hasThrowable()) { - logger.error("error", client.getThrowable()); - } - assertFalse(client.hasThrowable()); - } - } - -} diff --git a/transport/src/test/java/org/xbib/elasticsearch/client/transport/package-info.java b/transport/src/test/java/org/xbib/elasticsearch/client/transport/package-info.java deleted file mode 100644 index 3b21564..0000000 --- a/transport/src/test/java/org/xbib/elasticsearch/client/transport/package-info.java +++ /dev/null @@ -1,4 +0,0 @@ -/** - * Classes for testing the transport client. - */ -package org.xbib.elasticsearch.client.transport;