refactored bulk controller, improved api, renamed und cleaned methods, result classes, removed redundant code and methods
This commit is contained in:
parent
4701447d3c
commit
5184b75b36
39 changed files with 1400 additions and 1090 deletions
|
@ -1,21 +0,0 @@
|
||||||
package org.xbib.elx.api;
|
|
||||||
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
public interface BulkControl {
|
|
||||||
|
|
||||||
void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval);
|
|
||||||
|
|
||||||
boolean isBulk(String indexName);
|
|
||||||
|
|
||||||
void finishBulk(String indexName);
|
|
||||||
|
|
||||||
Set<String> indices();
|
|
||||||
|
|
||||||
Map<String, Long> getStartBulkRefreshIntervals();
|
|
||||||
|
|
||||||
Map<String, Long> getStopBulkRefreshIntervals();
|
|
||||||
|
|
||||||
String getMaxWaitTime();
|
|
||||||
}
|
|
36
elx-api/src/main/java/org/xbib/elx/api/BulkController.java
Normal file
36
elx-api/src/main/java/org/xbib/elx/api/BulkController.java
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
package org.xbib.elx.api;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.delete.DeleteRequest;
|
||||||
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
|
import org.elasticsearch.action.update.UpdateRequest;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.Flushable;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
public interface BulkController extends Closeable, Flushable {
|
||||||
|
|
||||||
|
void init(Settings settings);
|
||||||
|
|
||||||
|
Throwable getLastBulkError();
|
||||||
|
|
||||||
|
void startBulkMode(IndexDefinition indexDefinition) throws IOException;
|
||||||
|
|
||||||
|
void startBulkMode(String indexName, long startRefreshIntervalInSeconds,
|
||||||
|
long stopRefreshIntervalInSeconds) throws IOException;
|
||||||
|
|
||||||
|
void index(IndexRequest indexRequest);
|
||||||
|
|
||||||
|
void delete(DeleteRequest deleteRequest);
|
||||||
|
|
||||||
|
void update(UpdateRequest updateRequest);
|
||||||
|
|
||||||
|
boolean waitForResponses(long timeout, TimeUnit timeUnit);
|
||||||
|
|
||||||
|
void stopBulkMode(IndexDefinition indexDefinition) throws IOException;
|
||||||
|
|
||||||
|
void stopBulkMode(String index, long timeout, TimeUnit timeUnit) throws IOException;
|
||||||
|
|
||||||
|
}
|
|
@ -1,9 +1,14 @@
|
||||||
package org.xbib.elx.api;
|
package org.xbib.elx.api;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.xbib.metrics.Count;
|
import org.xbib.metrics.Count;
|
||||||
import org.xbib.metrics.Metered;
|
import org.xbib.metrics.Metered;
|
||||||
|
|
||||||
public interface BulkMetric {
|
import java.io.Closeable;
|
||||||
|
|
||||||
|
public interface BulkMetric extends Closeable {
|
||||||
|
|
||||||
|
void init(Settings settings);
|
||||||
|
|
||||||
Metered getTotalIngest();
|
Metered getTotalIngest();
|
||||||
|
|
||||||
|
@ -19,9 +24,9 @@ public interface BulkMetric {
|
||||||
|
|
||||||
Count getFailed();
|
Count getFailed();
|
||||||
|
|
||||||
|
long elapsed();
|
||||||
|
|
||||||
void start();
|
void start();
|
||||||
|
|
||||||
void stop();
|
void stop();
|
||||||
|
|
||||||
long elapsed();
|
|
||||||
}
|
}
|
||||||
|
|
64
elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java
Normal file
64
elx-api/src/main/java/org/xbib/elx/api/BulkProcessor.java
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
package org.xbib.elx.api;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionRequest;
|
||||||
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.Flushable;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
public interface BulkProcessor extends Closeable, Flushable {
|
||||||
|
|
||||||
|
BulkProcessor add(ActionRequest<?> request);
|
||||||
|
|
||||||
|
BulkProcessor add(ActionRequest<?> request, Object payload);
|
||||||
|
|
||||||
|
boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException;
|
||||||
|
|
||||||
|
boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
|
||||||
|
|
||||||
|
interface BulkRequestHandler {
|
||||||
|
|
||||||
|
void execute(BulkRequest bulkRequest, long executionId);
|
||||||
|
|
||||||
|
boolean close(long timeout, TimeUnit unit) throws InterruptedException;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A listener for the execution.
|
||||||
|
*/
|
||||||
|
public interface Listener {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback before the bulk is executed.
|
||||||
|
*
|
||||||
|
* @param executionId execution ID
|
||||||
|
* @param request request
|
||||||
|
*/
|
||||||
|
void beforeBulk(long executionId, BulkRequest request);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback after a successful execution of bulk request.
|
||||||
|
*
|
||||||
|
* @param executionId execution ID
|
||||||
|
* @param request request
|
||||||
|
* @param response response
|
||||||
|
*/
|
||||||
|
void afterBulk(long executionId, BulkRequest request, BulkResponse response);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback after a failed execution of bulk request.
|
||||||
|
*
|
||||||
|
* Note that in case an instance of <code>InterruptedException</code> is passed, which means that request
|
||||||
|
* processing has been
|
||||||
|
* cancelled externally, the thread's interruption status has been restored prior to calling this method.
|
||||||
|
*
|
||||||
|
* @param executionId execution ID
|
||||||
|
* @param request request
|
||||||
|
* @param failure failure
|
||||||
|
*/
|
||||||
|
void afterBulk(long executionId, BulkRequest request, Throwable failure);
|
||||||
|
}
|
||||||
|
}
|
|
@ -6,15 +6,19 @@ import org.elasticsearch.action.update.UpdateRequest;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.io.Flushable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface for extended managing and indexing methods of an Elasticsearch client.
|
* Interface for extended managing and indexing methods of an Elasticsearch client.
|
||||||
*/
|
*/
|
||||||
public interface ExtendedClient {
|
public interface ExtendedClient extends Flushable, Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set an Elasticsearch client to extend from it. May be null for TransportClient.
|
* Set an Elasticsearch client to extend from it. May be null for TransportClient.
|
||||||
|
@ -30,40 +34,27 @@ public interface ExtendedClient {
|
||||||
*/
|
*/
|
||||||
ElasticsearchClient getClient();
|
ElasticsearchClient getClient();
|
||||||
|
|
||||||
/**
|
|
||||||
* Initiative the extended client, cerates instances and connect to cluster, if required.
|
|
||||||
*
|
|
||||||
* @param settings settings
|
|
||||||
* @return this client
|
|
||||||
* @throws IOException if init fails
|
|
||||||
*/
|
|
||||||
ExtendedClient init(Settings settings) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set bulk metric.
|
|
||||||
* @param bulkMetric the bulk metric
|
|
||||||
* @return this client
|
|
||||||
*/
|
|
||||||
ExtendedClient setBulkMetric(BulkMetric bulkMetric);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get bulk metric.
|
* Get bulk metric.
|
||||||
* @return the bulk metric
|
* @return the bulk metric
|
||||||
*/
|
*/
|
||||||
BulkMetric getBulkMetric();
|
BulkMetric getBulkMetric();
|
||||||
|
|
||||||
/**
|
|
||||||
* Set bulk control.
|
|
||||||
* @param bulkControl the bulk control
|
|
||||||
* @return this
|
|
||||||
*/
|
|
||||||
ExtendedClient setBulkControl(BulkControl bulkControl);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get buulk control.
|
* Get buulk control.
|
||||||
* @return the bulk control
|
* @return the bulk control
|
||||||
*/
|
*/
|
||||||
BulkControl getBulkControl();
|
BulkController getBulkController();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initiative the extended client, the bulk metric and bulk controller,
|
||||||
|
* creates instances and connect to cluster, if required.
|
||||||
|
*
|
||||||
|
* @param settings settings
|
||||||
|
* @return this client
|
||||||
|
* @throws IOException if init fails
|
||||||
|
*/
|
||||||
|
ExtendedClient init(Settings settings) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build index definition from settings.
|
* Build index definition from settings.
|
||||||
|
@ -256,18 +247,12 @@ public interface ExtendedClient {
|
||||||
* Stops bulk mode.
|
* Stops bulk mode.
|
||||||
*
|
*
|
||||||
* @param index index
|
* @param index index
|
||||||
* @param maxWaitTime maximum wait time
|
* @param timeout maximum wait time
|
||||||
|
* @param timeUnit time unit for timeout
|
||||||
* @return this
|
* @return this
|
||||||
* @throws IOException if bulk could not be stopped
|
* @throws IOException if bulk could not be stopped
|
||||||
*/
|
*/
|
||||||
ExtendedClient stopBulk(String index, String maxWaitTime) throws IOException;
|
ExtendedClient stopBulk(String index, long timeout, TimeUnit timeUnit) throws IOException;
|
||||||
|
|
||||||
/**
|
|
||||||
* Flush bulk indexing, move all pending documents to the cluster.
|
|
||||||
*
|
|
||||||
* @return this
|
|
||||||
*/
|
|
||||||
ExtendedClient flushIngest();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update replica level.
|
* Update replica level.
|
||||||
|
@ -284,10 +269,11 @@ public interface ExtendedClient {
|
||||||
* @param index index
|
* @param index index
|
||||||
* @param level the replica level
|
* @param level the replica level
|
||||||
* @param maxWaitTime maximum wait time
|
* @param maxWaitTime maximum wait time
|
||||||
|
* @param timeUnit time unit
|
||||||
* @return this
|
* @return this
|
||||||
* @throws IOException if replica setting could not be updated
|
* @throws IOException if replica setting could not be updated
|
||||||
*/
|
*/
|
||||||
ExtendedClient updateReplicaLevel(String index, int level, String maxWaitTime) throws IOException;
|
ExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get replica level.
|
* Get replica level.
|
||||||
|
@ -330,43 +316,57 @@ public interface ExtendedClient {
|
||||||
* Force segment merge of an index.
|
* Force segment merge of an index.
|
||||||
* @param index the index
|
* @param index the index
|
||||||
* @param maxWaitTime maximum wait time
|
* @param maxWaitTime maximum wait time
|
||||||
|
* @param timeUnit time unit
|
||||||
* @return this
|
* @return this
|
||||||
*/
|
*/
|
||||||
boolean forceMerge(String index, String maxWaitTime);
|
boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for all outstanding bulk responses.
|
* Wait for all outstanding bulk responses.
|
||||||
*
|
*
|
||||||
* @param maxWaitTime maximum wait time
|
* @param timeout maximum wait time
|
||||||
|
* @param timeUnit unit of timeout value
|
||||||
* @return true if wait succeeded, false if wait timed out
|
* @return true if wait succeeded, false if wait timed out
|
||||||
*/
|
*/
|
||||||
boolean waitForResponses(String maxWaitTime);
|
boolean waitForResponses(long timeout, TimeUnit timeUnit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for cluster being healthy.
|
* Wait for cluster being healthy.
|
||||||
*
|
*
|
||||||
* @param healthColor cluster health color to wait for
|
* @param healthColor cluster health color to wait for
|
||||||
* @param maxWaitTime time value
|
* @param maxWaitTime time value
|
||||||
|
* @param timeUnit time unit
|
||||||
* @return true if wait succeeded, false if wait timed out
|
* @return true if wait succeeded, false if wait timed out
|
||||||
*/
|
*/
|
||||||
boolean waitForCluster(String healthColor, String maxWaitTime);
|
boolean waitForCluster(String healthColor, long maxWaitTime, TimeUnit timeUnit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get current health color.
|
* Get current health color.
|
||||||
*
|
*
|
||||||
* @param maxWaitTime maximum wait time
|
* @param maxWaitTime maximum wait time
|
||||||
|
* @param timeUnit time unit
|
||||||
* @return the cluster health color
|
* @return the cluster health color
|
||||||
*/
|
*/
|
||||||
String getHealthColor(String maxWaitTime);
|
String getHealthColor(long maxWaitTime, TimeUnit timeUnit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for index recovery (after replica change).
|
* Wait for index recovery (after replica change).
|
||||||
*
|
*
|
||||||
* @param index index
|
* @param index index
|
||||||
* @param maxWaitTime maximum wait time
|
* @param maxWaitTime maximum wait time
|
||||||
|
* @param timeUnit time unit
|
||||||
* @return true if wait succeeded, false if wait timed out
|
* @return true if wait succeeded, false if wait timed out
|
||||||
*/
|
*/
|
||||||
boolean waitForRecovery(String index, String maxWaitTime);
|
boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update index setting.
|
||||||
|
* @param index the index
|
||||||
|
* @param key the key of the value to be updated
|
||||||
|
* @param value the new value
|
||||||
|
* @throws IOException if update index setting failed
|
||||||
|
*/
|
||||||
|
void updateIndexSetting(String index, String key, Object value) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolve alias.
|
* Resolve alias.
|
||||||
|
@ -385,14 +385,6 @@ public interface ExtendedClient {
|
||||||
*/
|
*/
|
||||||
String resolveMostRecentIndex(String alias);
|
String resolveMostRecentIndex(String alias);
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all alias filters.
|
|
||||||
*
|
|
||||||
* @param alias the alias
|
|
||||||
* @return map of alias filters
|
|
||||||
*/
|
|
||||||
Map<String, String> getAliasFilters(String alias);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get all index filters.
|
* Get all index filters.
|
||||||
* @param index the index
|
* @param index the index
|
||||||
|
@ -401,48 +393,49 @@ public interface ExtendedClient {
|
||||||
Map<String, String> getIndexFilters(String index);
|
Map<String, String> getIndexFilters(String index);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Switch from one index to another.
|
* Shift from one index to another.
|
||||||
* @param indexDefinition the index definition
|
* @param indexDefinition the index definition
|
||||||
* @param extraAliases new aliases
|
* @param additionalAliases new aliases
|
||||||
* @return this
|
* @return this
|
||||||
*/
|
*/
|
||||||
ExtendedClient switchIndex(IndexDefinition indexDefinition, List<String> extraAliases);
|
IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List<String> additionalAliases);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Switch from one index to another.
|
* Shift from one index to another.
|
||||||
* @param indexDefinition the index definition
|
* @param indexDefinition the index definition
|
||||||
* @param extraAliases new aliases
|
* @param additionalAliases new aliases
|
||||||
* @param indexAliasAdder method to add aliases
|
* @param indexAliasAdder method to add aliases
|
||||||
* @return this
|
* @return this
|
||||||
*/
|
*/
|
||||||
ExtendedClient switchIndex(IndexDefinition indexDefinition, List<String> extraAliases, IndexAliasAdder indexAliasAdder);
|
IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List<String> additionalAliases,
|
||||||
|
IndexAliasAdder indexAliasAdder);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Switch from one index to another.
|
* Shift from one index to another.
|
||||||
*
|
|
||||||
* @param index the index name
|
* @param index the index name
|
||||||
* @param fullIndexName the index name with timestamp
|
* @param fullIndexName the index name with timestamp
|
||||||
* @param extraAliases a list of names that should be set as index aliases
|
* @param additionalAliases a list of names that should be set as index aliases
|
||||||
* @return this
|
* @return this
|
||||||
*/
|
*/
|
||||||
ExtendedClient switchIndex(String index, String fullIndexName, List<String> extraAliases);
|
IndexShiftResult shiftIndex(String index, String fullIndexName, List<String> additionalAliases);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Switch from one index to another.
|
* Shift from one index to another.
|
||||||
*
|
|
||||||
* @param index the index name
|
* @param index the index name
|
||||||
* @param fullIndexName the index name with timestamp
|
* @param fullIndexName the index name with timestamp
|
||||||
* @param extraAliases a list of names that should be set as index aliases
|
* @param additionalAliases a list of names that should be set as index aliases
|
||||||
* @param adder an adder method to create alias term queries
|
* @param adder an adder method to create alias term queries
|
||||||
* @return this
|
* @return this
|
||||||
*/
|
*/
|
||||||
ExtendedClient switchIndex(String index, String fullIndexName, List<String> extraAliases, IndexAliasAdder adder);
|
IndexShiftResult shiftIndex(String index, String fullIndexName, List<String> additionalAliases,
|
||||||
|
IndexAliasAdder adder);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prune index.
|
* Prune index.
|
||||||
* @param indexDefinition the index definition
|
* @param indexDefinition the index definition
|
||||||
|
* @return the index prune result
|
||||||
*/
|
*/
|
||||||
void pruneIndex(IndexDefinition indexDefinition);
|
IndexPruneResult pruneIndex(IndexDefinition indexDefinition);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply retention policy to prune indices. All indices before delta should be deleted,
|
* Apply retention policy to prune indices. All indices before delta should be deleted,
|
||||||
|
@ -452,8 +445,10 @@ public interface ExtendedClient {
|
||||||
* @param fullIndexName index name with timestamp
|
* @param fullIndexName index name with timestamp
|
||||||
* @param delta timestamp delta (for index timestamps)
|
* @param delta timestamp delta (for index timestamps)
|
||||||
* @param mintokeep minimum number of indices to keep
|
* @param mintokeep minimum number of indices to keep
|
||||||
|
* @param perform true if pruning should be executed, false if not
|
||||||
|
* @return the index prune result
|
||||||
*/
|
*/
|
||||||
void pruneIndex(String index, String fullIndexName, int delta, int mintokeep);
|
IndexPruneResult pruneIndex(String index, String fullIndexName, int delta, int mintokeep, boolean perform);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find the timestamp of the most recently indexed document in the index.
|
* Find the timestamp of the most recently indexed document in the index.
|
||||||
|
@ -470,24 +465,4 @@ public interface ExtendedClient {
|
||||||
* @return the cluster name
|
* @return the cluster name
|
||||||
*/
|
*/
|
||||||
String getClusterName();
|
String getClusterName();
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns true is a throwable exists.
|
|
||||||
*
|
|
||||||
* @return true if a Throwable exists
|
|
||||||
*/
|
|
||||||
boolean hasThrowable();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return last throwable if exists.
|
|
||||||
*
|
|
||||||
* @return last throwable
|
|
||||||
*/
|
|
||||||
Throwable getThrowable();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Shutdown the client.
|
|
||||||
* @throws IOException if shutdown fails
|
|
||||||
*/
|
|
||||||
void shutdown() throws IOException;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,148 +2,69 @@ package org.xbib.elx.api;
|
||||||
|
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
public class IndexDefinition {
|
public interface IndexDefinition {
|
||||||
|
|
||||||
private String index;
|
IndexDefinition setIndex(String index);
|
||||||
|
|
||||||
private String fullIndexName;
|
String getIndex();
|
||||||
|
|
||||||
private String dateTimePattern;
|
IndexDefinition setFullIndexName(String fullIndexName);
|
||||||
|
|
||||||
private URL settingsUrl;
|
String getFullIndexName();
|
||||||
|
|
||||||
private URL mappingsUrl;
|
IndexDefinition setSettingsUrl(String settingsUrlString) throws MalformedURLException;
|
||||||
|
|
||||||
private boolean enabled;
|
IndexDefinition setSettingsUrl(URL settingsUrl);
|
||||||
|
|
||||||
private boolean ignoreErrors;
|
URL getSettingsUrl();
|
||||||
|
|
||||||
private boolean switchAliases;
|
IndexDefinition setMappingsUrl(String mappingsUrlString) throws MalformedURLException;
|
||||||
|
|
||||||
private boolean hasForceMerge;
|
IndexDefinition setMappingsUrl(URL mappingsUrl);
|
||||||
|
|
||||||
private int replicaLevel;
|
URL getMappingsUrl();
|
||||||
|
|
||||||
private IndexRetention indexRetention;
|
IndexDefinition setDateTimePattern(String timeWindow);
|
||||||
|
|
||||||
private String maxWaitTime;
|
String getDateTimePattern();
|
||||||
|
|
||||||
public IndexDefinition setIndex(String index) {
|
IndexDefinition setEnabled(boolean enabled);
|
||||||
this.index = index;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getIndex() {
|
boolean isEnabled();
|
||||||
return index;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setFullIndexName(String fullIndexName) {
|
IndexDefinition setIgnoreErrors(boolean ignoreErrors);
|
||||||
this.fullIndexName = fullIndexName;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getFullIndexName() {
|
boolean ignoreErrors();
|
||||||
return fullIndexName;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setSettingsUrl(String settingsUrlString) throws MalformedURLException {
|
IndexDefinition setShift(boolean shift);
|
||||||
this.settingsUrl = settingsUrlString != null ? new URL(settingsUrlString) : null;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setSettingsUrl(URL settingsUrl) {
|
boolean isShiftEnabled();
|
||||||
this.settingsUrl = settingsUrl;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public URL getSettingsUrl() {
|
IndexDefinition setForceMerge(boolean hasForceMerge);
|
||||||
return settingsUrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setMappingsUrl(String mappingsUrlString) throws MalformedURLException {
|
boolean hasForceMerge();
|
||||||
this.mappingsUrl = mappingsUrlString != null ? new URL(mappingsUrlString) : null;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setMappingsUrl(URL mappingsUrl) {
|
IndexDefinition setReplicaLevel(int replicaLevel);
|
||||||
this.mappingsUrl = mappingsUrl;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public URL getMappingsUrl() {
|
int getReplicaLevel();
|
||||||
return mappingsUrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setDateTimePattern(String timeWindow) {
|
IndexDefinition setRetention(IndexRetention indexRetention);
|
||||||
this.dateTimePattern = timeWindow;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getDateTimePattern() {
|
IndexRetention getRetention();
|
||||||
return dateTimePattern;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setEnabled(boolean enabled) {
|
IndexDefinition setMaxWaitTime(long maxWaitTime, TimeUnit timeUnit);
|
||||||
this.enabled = enabled;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isEnabled() {
|
long getMaxWaitTime();
|
||||||
return enabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setIgnoreErrors(boolean ignoreErrors) {
|
TimeUnit getMaxWaitTimeUnit();
|
||||||
this.ignoreErrors = ignoreErrors;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean ignoreErrors() {
|
IndexDefinition setStartRefreshInterval(long seconds);
|
||||||
return ignoreErrors;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setSwitchAliases(boolean switchAliases) {
|
long getStartRefreshInterval();
|
||||||
this.switchAliases = switchAliases;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isSwitchAliases() {
|
IndexDefinition setStopRefreshInterval(long seconds);
|
||||||
return switchAliases;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setForceMerge(boolean hasForceMerge) {
|
long getStopRefreshInterval();
|
||||||
this.hasForceMerge = hasForceMerge;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean hasForceMerge() {
|
|
||||||
return hasForceMerge;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setReplicaLevel(int replicaLevel) {
|
|
||||||
this.replicaLevel = replicaLevel;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getReplicaLevel() {
|
|
||||||
return replicaLevel;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setRetention(IndexRetention indexRetention) {
|
|
||||||
this.indexRetention = indexRetention;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexRetention getRetention() {
|
|
||||||
return indexRetention;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexDefinition setMaxWaitTime(String maxWaitTime) {
|
|
||||||
this.maxWaitTime = maxWaitTime;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getMaxWaitTime() {
|
|
||||||
return maxWaitTime;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
16
elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java
Normal file
16
elx-api/src/main/java/org/xbib/elx/api/IndexPruneResult.java
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package org.xbib.elx.api;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public interface IndexPruneResult {
|
||||||
|
|
||||||
|
enum State { NOTHING_TO_DO, SUCCESS, NONE };
|
||||||
|
|
||||||
|
State getState();
|
||||||
|
|
||||||
|
List<String> getCandidateIndices();
|
||||||
|
|
||||||
|
List<String> getDeletedIndices();
|
||||||
|
|
||||||
|
boolean isAcknowledged();
|
||||||
|
}
|
|
@ -1,27 +1,13 @@
|
||||||
package org.xbib.elx.api;
|
package org.xbib.elx.api;
|
||||||
|
|
||||||
public class IndexRetention {
|
public interface IndexRetention {
|
||||||
|
|
||||||
private int timestampDiff;
|
IndexRetention setDelta(int delta);
|
||||||
|
|
||||||
private int minToKeep;
|
int getDelta();
|
||||||
|
|
||||||
public IndexRetention setDelta(int timestampDiff) {
|
IndexRetention setMinToKeep(int minToKeep);
|
||||||
this.timestampDiff = timestampDiff;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getDelta() {
|
int getMinToKeep();
|
||||||
return timestampDiff;
|
|
||||||
}
|
|
||||||
|
|
||||||
public IndexRetention setMinToKeep(int minToKeep) {
|
|
||||||
this.minToKeep = minToKeep;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getMinToKeep() {
|
|
||||||
return minToKeep;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
10
elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java
Normal file
10
elx-api/src/main/java/org/xbib/elx/api/IndexShiftResult.java
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
package org.xbib.elx.api;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public interface IndexShiftResult {
|
||||||
|
|
||||||
|
List<String> getMovedAliases();
|
||||||
|
|
||||||
|
List<String> getNewAliases();
|
||||||
|
}
|
|
@ -4,7 +4,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
|
||||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||||
|
@ -44,16 +43,12 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
|
||||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
|
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
|
||||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
import org.elasticsearch.action.delete.DeleteRequest;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.search.SearchAction;
|
import org.elasticsearch.action.search.SearchAction;
|
||||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.action.update.UpdateRequest;
|
import org.elasticsearch.action.update.UpdateRequest;
|
||||||
import org.elasticsearch.client.Client;
|
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.client.transport.NoNodeAvailableException;
|
import org.elasticsearch.client.transport.NoNodeAvailableException;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
|
@ -62,7 +57,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
import org.elasticsearch.index.query.QueryBuilder;
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
|
@ -71,12 +65,14 @@ import org.elasticsearch.search.SearchHit;
|
||||||
import org.elasticsearch.search.sort.SortBuilder;
|
import org.elasticsearch.search.sort.SortBuilder;
|
||||||
import org.elasticsearch.search.sort.SortBuilders;
|
import org.elasticsearch.search.sort.SortBuilders;
|
||||||
import org.elasticsearch.search.sort.SortOrder;
|
import org.elasticsearch.search.sort.SortOrder;
|
||||||
import org.xbib.elx.api.BulkControl;
|
import org.xbib.elx.api.BulkController;
|
||||||
import org.xbib.elx.api.BulkMetric;
|
import org.xbib.elx.api.BulkMetric;
|
||||||
import org.xbib.elx.api.ExtendedClient;
|
import org.xbib.elx.api.ExtendedClient;
|
||||||
import org.xbib.elx.api.IndexAliasAdder;
|
import org.xbib.elx.api.IndexAliasAdder;
|
||||||
import org.xbib.elx.api.IndexDefinition;
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
|
import org.xbib.elx.api.IndexPruneResult;
|
||||||
import org.xbib.elx.api.IndexRetention;
|
import org.xbib.elx.api.IndexRetention;
|
||||||
|
import org.xbib.elx.api.IndexShiftResult;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
@ -99,6 +95,7 @@ import java.util.TreeSet;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
|
@ -120,27 +117,58 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
*/
|
*/
|
||||||
private ElasticsearchClient client;
|
private ElasticsearchClient client;
|
||||||
|
|
||||||
/**
|
|
||||||
* Our replacement for the buk processor.
|
|
||||||
*/
|
|
||||||
private BulkProcessor bulkProcessor;
|
|
||||||
|
|
||||||
private BulkMetric bulkMetric;
|
private BulkMetric bulkMetric;
|
||||||
|
|
||||||
private BulkControl bulkControl;
|
private BulkController bulkController;
|
||||||
|
|
||||||
private Throwable throwable;
|
private AtomicBoolean closed;
|
||||||
|
|
||||||
private boolean closed;
|
private static final IndexShiftResult EMPTY_INDEX_SHIFT_RESULT = new IndexShiftResult() {
|
||||||
|
@Override
|
||||||
|
public List<String> getMovedAliases() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getNewAliases() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private static final IndexPruneResult EMPTY_INDEX_PRUNE_RESULT = new IndexPruneResult() {
|
||||||
|
@Override
|
||||||
|
public State getState() {
|
||||||
|
return State.NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getCandidateIndices() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getDeletedIndices() {
|
||||||
|
return Collections.emptyList();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isAcknowledged() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
protected abstract ElasticsearchClient createClient(Settings settings) throws IOException;
|
protected abstract ElasticsearchClient createClient(Settings settings) throws IOException;
|
||||||
|
|
||||||
protected AbstractExtendedClient() {
|
protected AbstractExtendedClient() {
|
||||||
|
closed = new AtomicBoolean(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public AbstractExtendedClient setClient(ElasticsearchClient client) {
|
public AbstractExtendedClient setClient(ElasticsearchClient client) {
|
||||||
this.client = client;
|
this.client = client;
|
||||||
|
this.bulkMetric = new DefaultBulkMetric();
|
||||||
|
bulkMetric.start();
|
||||||
|
this.bulkController = new DefaultBulkController(this, bulkMetric);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,28 +177,14 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
return client;
|
return client;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public AbstractExtendedClient setBulkMetric(BulkMetric metric) {
|
|
||||||
this.bulkMetric = metric;
|
|
||||||
// you must start bulk metric or it will bail out at stop()
|
|
||||||
bulkMetric.start();
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BulkMetric getBulkMetric() {
|
public BulkMetric getBulkMetric() {
|
||||||
return bulkMetric;
|
return bulkMetric;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public AbstractExtendedClient setBulkControl(BulkControl bulkControl) {
|
public BulkController getBulkController() {
|
||||||
this.bulkControl = bulkControl;
|
return bulkController;
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BulkControl getBulkControl() {
|
|
||||||
return bulkControl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -181,120 +195,33 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
if (bulkMetric != null) {
|
if (bulkMetric != null) {
|
||||||
bulkMetric.start();
|
bulkMetric.start();
|
||||||
}
|
}
|
||||||
BulkProcessor.Listener listener = new BulkProcessor.Listener() {
|
if (bulkController != null) {
|
||||||
|
bulkController.init(settings);
|
||||||
private final Logger logger = LogManager.getLogger("org.xbib.elx.BulkProcessor.Listener");
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void beforeBulk(long executionId, BulkRequest request) {
|
|
||||||
long l = 0;
|
|
||||||
if (bulkMetric != null) {
|
|
||||||
l = bulkMetric.getCurrentIngest().getCount();
|
|
||||||
bulkMetric.getCurrentIngest().inc();
|
|
||||||
int n = request.numberOfActions();
|
|
||||||
bulkMetric.getSubmitted().inc(n);
|
|
||||||
bulkMetric.getCurrentIngestNumDocs().inc(n);
|
|
||||||
bulkMetric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
|
|
||||||
}
|
|
||||||
logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
|
|
||||||
executionId,
|
|
||||||
request.numberOfActions(),
|
|
||||||
request.estimatedSizeInBytes(),
|
|
||||||
l);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
|
|
||||||
long l = 0;
|
|
||||||
if (bulkMetric != null) {
|
|
||||||
l = bulkMetric.getCurrentIngest().getCount();
|
|
||||||
bulkMetric.getCurrentIngest().dec();
|
|
||||||
bulkMetric.getSucceeded().inc(response.getItems().length);
|
|
||||||
}
|
|
||||||
int n = 0;
|
|
||||||
for (BulkItemResponse itemResponse : response.getItems()) {
|
|
||||||
if (bulkMetric != null) {
|
|
||||||
bulkMetric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
|
|
||||||
}
|
|
||||||
if (itemResponse.isFailed()) {
|
|
||||||
n++;
|
|
||||||
if (bulkMetric != null) {
|
|
||||||
bulkMetric.getSucceeded().dec(1);
|
|
||||||
bulkMetric.getFailed().inc(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (bulkMetric != null) {
|
|
||||||
logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests",
|
|
||||||
executionId,
|
|
||||||
bulkMetric.getSucceeded().getCount(),
|
|
||||||
bulkMetric.getFailed().getCount(),
|
|
||||||
response.getTook().millis(),
|
|
||||||
l);
|
|
||||||
}
|
|
||||||
if (n > 0) {
|
|
||||||
logger.error("bulk [{}] failed with {} failed items, failure message = {}",
|
|
||||||
executionId, n, response.buildFailureMessage());
|
|
||||||
} else {
|
|
||||||
if (bulkMetric != null) {
|
|
||||||
bulkMetric.getCurrentIngestNumDocs().dec(response.getItems().length);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
|
|
||||||
if (bulkMetric != null) {
|
|
||||||
bulkMetric.getCurrentIngest().dec();
|
|
||||||
}
|
|
||||||
throwable = failure;
|
|
||||||
closed = true;
|
|
||||||
logger.error("after bulk [" + executionId + "] error", failure);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if (client != null) {
|
|
||||||
int maxActionsPerRequest = settings.getAsInt(Parameters.MAX_ACTIONS_PER_REQUEST.name(),
|
|
||||||
Parameters.DEFAULT_MAX_ACTIONS_PER_REQUEST.getNum());
|
|
||||||
int maxConcurrentRequests = settings.getAsInt(Parameters.MAX_CONCURRENT_REQUESTS.name(),
|
|
||||||
Parameters.DEFAULT_MAX_CONCURRENT_REQUESTS.getNum());
|
|
||||||
TimeValue flushIngestInterval = settings.getAsTime(Parameters.FLUSH_INTERVAL.name(),
|
|
||||||
TimeValue.timeValueSeconds(Parameters.DEFAULT_FLUSH_INTERVAL.getNum()));
|
|
||||||
ByteSizeValue maxVolumePerRequest = settings.getAsBytesSize(Parameters.MAX_VOLUME_PER_REQUEST.name(),
|
|
||||||
ByteSizeValue.parseBytesSizeValue(Parameters.DEFAULT_MAX_VOLUME_PER_REQUEST.getString(),
|
|
||||||
"maxVolumePerRequest"));
|
|
||||||
logger.info("bulk processor up with maxActionsPerRequest = {} maxConcurrentRequests = {} " +
|
|
||||||
"flushIngestInterval = {} maxVolumePerRequest = {}",
|
|
||||||
maxActionsPerRequest, maxConcurrentRequests, flushIngestInterval, maxVolumePerRequest);
|
|
||||||
BulkProcessor.Builder builder = BulkProcessor.builder((Client) client, listener)
|
|
||||||
.setBulkActions(maxActionsPerRequest)
|
|
||||||
.setConcurrentRequests(maxConcurrentRequests)
|
|
||||||
.setFlushInterval(flushIngestInterval)
|
|
||||||
.setBulkSize(maxVolumePerRequest);
|
|
||||||
this.bulkProcessor = builder.build();
|
|
||||||
}
|
}
|
||||||
this.closed = false;
|
|
||||||
this.throwable = null;
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void shutdown() throws IOException {
|
public void flush() throws IOException {
|
||||||
|
if (bulkController != null) {
|
||||||
|
bulkController.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
if (bulkProcessor != null) {
|
if (closed.compareAndSet(false, true)) {
|
||||||
logger.info("closing bulk processor");
|
if (bulkMetric != null) {
|
||||||
bulkProcessor.close();
|
logger.info("closing bulk metric before bulk controller (for precise measurement)");
|
||||||
}
|
bulkMetric.close();
|
||||||
if (bulkMetric != null) {
|
|
||||||
logger.info("stopping metric before bulk stop (for precise measurement)");
|
|
||||||
bulkMetric.stop();
|
|
||||||
}
|
|
||||||
if (bulkControl != null && bulkControl.indices() != null && !bulkControl.indices().isEmpty()) {
|
|
||||||
logger.info("stopping bulk mode for indices {}...", bulkControl.indices());
|
|
||||||
for (String index : bulkControl.indices()) {
|
|
||||||
stopBulk(index, bulkControl.getMaxWaitTime());
|
|
||||||
}
|
}
|
||||||
|
if (bulkController != null) {
|
||||||
|
logger.info("closing bulk controller");
|
||||||
|
bulkController.close();
|
||||||
|
}
|
||||||
|
logger.info("shutdown complete");
|
||||||
}
|
}
|
||||||
logger.info("shutdown complete");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -320,7 +247,7 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient newIndex(IndexDefinition indexDefinition) throws IOException {
|
public ExtendedClient newIndex(IndexDefinition indexDefinition) throws IOException {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
waitForCluster("YELLOW", "30s");
|
waitForCluster("YELLOW", 30L, TimeUnit.SECONDS);
|
||||||
URL indexSettings = indexDefinition.getSettingsUrl();
|
URL indexSettings = indexDefinition.getSettingsUrl();
|
||||||
if (indexSettings == null) {
|
if (indexSettings == null) {
|
||||||
logger.warn("warning while creating index '{}', no settings/mappings",
|
logger.warn("warning while creating index '{}', no settings/mappings",
|
||||||
|
@ -417,37 +344,27 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds)
|
public ExtendedClient startBulk(String index, long startRefreshIntervalSeconds, long stopRefreshIntervalSeconds)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
ensureActive();
|
if (bulkController != null) {
|
||||||
if (bulkControl == null) {
|
ensureActive();
|
||||||
return this;
|
bulkController.startBulkMode(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds);
|
||||||
}
|
|
||||||
if (!bulkControl.isBulk(index) && startRefreshIntervalSeconds > 0L && stopRefreshIntervalSeconds > 0L) {
|
|
||||||
bulkControl.startBulk(index, startRefreshIntervalSeconds, stopRefreshIntervalSeconds);
|
|
||||||
updateIndexSetting(index, "refresh_interval", startRefreshIntervalSeconds + "s");
|
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient stopBulk(IndexDefinition indexDefinition) throws IOException {
|
public ExtendedClient stopBulk(IndexDefinition indexDefinition) throws IOException {
|
||||||
return stopBulk(indexDefinition.getFullIndexName(), indexDefinition.getMaxWaitTime());
|
if (bulkController != null) {
|
||||||
|
ensureActive();
|
||||||
|
bulkController.stopBulkMode(indexDefinition);
|
||||||
|
}
|
||||||
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient stopBulk(String index, String maxWaitTime) throws IOException {
|
public ExtendedClient stopBulk(String index, long timeout, TimeUnit timeUnit) throws IOException {
|
||||||
ensureActive();
|
if (bulkController != null) {
|
||||||
if (bulkControl == null) {
|
ensureActive();
|
||||||
return this;
|
bulkController.stopBulkMode(index, timeout, timeUnit);
|
||||||
}
|
|
||||||
flushIngest();
|
|
||||||
if (waitForResponses(maxWaitTime)) {
|
|
||||||
if (bulkControl.isBulk(index)) {
|
|
||||||
long secs = bulkControl.getStopBulkRefreshIntervals().get(index);
|
|
||||||
if (secs > 0L) {
|
|
||||||
updateIndexSetting(index, "refresh_interval", secs + "s");
|
|
||||||
}
|
|
||||||
bulkControl.finishBulk(index);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -465,16 +382,7 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient index(IndexRequest indexRequest) {
|
public ExtendedClient index(IndexRequest indexRequest) {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
try {
|
bulkController.index(indexRequest);
|
||||||
if (bulkMetric != null) {
|
|
||||||
bulkMetric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id());
|
|
||||||
}
|
|
||||||
bulkProcessor.add(indexRequest);
|
|
||||||
} catch (Exception e) {
|
|
||||||
throwable = e;
|
|
||||||
closed = true;
|
|
||||||
logger.error("bulk add of index request failed: " + e.getMessage(), e);
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -486,16 +394,7 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient delete(DeleteRequest deleteRequest) {
|
public ExtendedClient delete(DeleteRequest deleteRequest) {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
try {
|
bulkController.delete(deleteRequest);
|
||||||
if (bulkMetric != null) {
|
|
||||||
bulkMetric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
|
|
||||||
}
|
|
||||||
bulkProcessor.add(deleteRequest);
|
|
||||||
} catch (Exception e) {
|
|
||||||
throwable = e;
|
|
||||||
closed = true;
|
|
||||||
logger.error("bulk add of delete failed: " + e.getMessage(), e);
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -512,49 +411,23 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient update(UpdateRequest updateRequest) {
|
public ExtendedClient update(UpdateRequest updateRequest) {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
try {
|
bulkController.update(updateRequest);
|
||||||
if (bulkMetric != null) {
|
|
||||||
bulkMetric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id());
|
|
||||||
}
|
|
||||||
bulkProcessor.add(updateRequest);
|
|
||||||
} catch (Exception e) {
|
|
||||||
throwable = e;
|
|
||||||
closed = true;
|
|
||||||
logger.error("bulk add of update request failed: " + e.getMessage(), e);
|
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient flushIngest() {
|
public boolean waitForResponses(long timeout, TimeUnit timeUnit) {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
logger.debug("flushing bulk processor");
|
return bulkController.waitForResponses(timeout, timeUnit);
|
||||||
bulkProcessor.flush();
|
|
||||||
return this;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean waitForResponses(String maxWaitTime) {
|
public boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit) {
|
||||||
ensureActive();
|
|
||||||
long millis = TimeValue.parseTimeValue(maxWaitTime, TimeValue.timeValueMinutes(1),"millis").getMillis();
|
|
||||||
logger.debug("waiting for " + millis + " millis");
|
|
||||||
try {
|
|
||||||
return bulkProcessor.awaitFlush(millis, TimeUnit.MILLISECONDS);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
logger.error("interrupted");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean waitForRecovery(String index, String maxWaitTime) {
|
|
||||||
ensureActive();
|
ensureActive();
|
||||||
ensureIndexGiven(index);
|
ensureIndexGiven(index);
|
||||||
RecoveryResponse response = client.execute(RecoveryAction.INSTANCE, new RecoveryRequest(index)).actionGet();
|
RecoveryResponse response = client.execute(RecoveryAction.INSTANCE, new RecoveryRequest(index)).actionGet();
|
||||||
int shards = response.getTotalShards();
|
int shards = response.getTotalShards();
|
||||||
TimeValue timeout = TimeValue.parseTimeValue(maxWaitTime, TimeValue.timeValueSeconds(10),
|
TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
|
||||||
getClass().getSimpleName() + ".timeout");
|
|
||||||
ClusterHealthResponse healthResponse =
|
ClusterHealthResponse healthResponse =
|
||||||
client.execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest(index)
|
client.execute(ClusterHealthAction.INSTANCE, new ClusterHealthRequest(index)
|
||||||
.waitForActiveShards(shards).timeout(timeout)).actionGet();
|
.waitForActiveShards(shards).timeout(timeout)).actionGet();
|
||||||
|
@ -566,26 +439,26 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean waitForCluster(String statusString, String maxWaitTime) {
|
public boolean waitForCluster(String statusString, long maxWaitTime, TimeUnit timeUnit) {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString);
|
ClusterHealthStatus status = ClusterHealthStatus.fromString(statusString);
|
||||||
TimeValue timeout = TimeValue.parseTimeValue(maxWaitTime, TimeValue.timeValueSeconds(10),
|
TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
|
||||||
getClass().getSimpleName() + ".timeout");
|
|
||||||
ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE,
|
ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE,
|
||||||
new ClusterHealthRequest().timeout(timeout).waitForStatus(status)).actionGet();
|
new ClusterHealthRequest().timeout(timeout).waitForStatus(status)).actionGet();
|
||||||
if (healthResponse != null && healthResponse.isTimedOut()) {
|
if (healthResponse != null && healthResponse.isTimedOut()) {
|
||||||
logger.error("timeout, cluster state is " + healthResponse.getStatus().name() + " and not " + status.name());
|
if (logger.isErrorEnabled()) {
|
||||||
|
logger.error("timeout, cluster state is " + healthResponse.getStatus().name() + " and not " + status.name());
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getHealthColor(String maxWaitTime) {
|
public String getHealthColor(long maxWaitTime, TimeUnit timeUnit) {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
try {
|
try {
|
||||||
TimeValue timeout = TimeValue.parseTimeValue(maxWaitTime, TimeValue.timeValueSeconds(10),
|
TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
|
||||||
getClass().getSimpleName() + ".timeout");
|
|
||||||
ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE,
|
ClusterHealthResponse healthResponse = client.execute(ClusterHealthAction.INSTANCE,
|
||||||
new ClusterHealthRequest().timeout(timeout)).actionGet();
|
new ClusterHealthRequest().timeout(timeout)).actionGet();
|
||||||
ClusterHealthStatus status = healthResponse.getStatus();
|
ClusterHealthStatus status = healthResponse.getStatus();
|
||||||
|
@ -604,15 +477,16 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient updateReplicaLevel(IndexDefinition indexDefinition, int level) throws IOException {
|
public ExtendedClient updateReplicaLevel(IndexDefinition indexDefinition, int level) throws IOException {
|
||||||
return updateReplicaLevel(indexDefinition.getFullIndexName(), level, indexDefinition.getMaxWaitTime());
|
return updateReplicaLevel(indexDefinition.getFullIndexName(), level,
|
||||||
|
indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient updateReplicaLevel(String index, int level, String maxWaitTime) throws IOException {
|
public ExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) throws IOException {
|
||||||
waitForCluster("YELLOW", maxWaitTime); // let cluster settle down from critical operations
|
waitForCluster("YELLOW", maxWaitTime, timeUnit); // let cluster settle down from critical operations
|
||||||
if (level > 0) {
|
if (level > 0) {
|
||||||
updateIndexSetting(index, "number_of_replicas", level);
|
updateIndexSetting(index, "number_of_replicas", level);
|
||||||
waitForRecovery(index, maxWaitTime);
|
waitForRecovery(index, maxWaitTime, timeUnit);
|
||||||
}
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -684,12 +558,6 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
return indices.isEmpty() ? alias : indices.iterator().next();
|
return indices.isEmpty() ? alias : indices.iterator().next();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<String, String> getAliasFilters(String alias) {
|
|
||||||
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client, GetAliasesAction.INSTANCE);
|
|
||||||
return getFilters(getAliasesRequestBuilder.setIndices(resolveAlias(alias)).execute().actionGet());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<String, String> getIndexFilters(String index) {
|
public Map<String, String> getIndexFilters(String index) {
|
||||||
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client, GetAliasesAction.INSTANCE);
|
GetAliasesRequestBuilder getAliasesRequestBuilder = new GetAliasesRequestBuilder(client, GetAliasesAction.INSTANCE);
|
||||||
|
@ -697,50 +565,49 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient switchIndex(IndexDefinition indexDefinition, List<String> extraAliases) {
|
public IndexShiftResult shiftIndex(IndexDefinition indexDefinition, List<String> additionalAliases) {
|
||||||
return switchIndex(indexDefinition, extraAliases, null);
|
return shiftIndex(indexDefinition, additionalAliases, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient switchIndex(IndexDefinition indexDefinition,
|
public IndexShiftResult shiftIndex(IndexDefinition indexDefinition,
|
||||||
List<String> extraAliases, IndexAliasAdder indexAliasAdder) {
|
List<String> additionalAliases, IndexAliasAdder indexAliasAdder) {
|
||||||
if (extraAliases == null) {
|
if (additionalAliases == null) {
|
||||||
return this;
|
return EMPTY_INDEX_SHIFT_RESULT;
|
||||||
}
|
}
|
||||||
if (indexDefinition.isSwitchAliases()) {
|
if (indexDefinition.isShiftEnabled()) {
|
||||||
switchIndex(indexDefinition.getIndex(),
|
return shiftIndex(indexDefinition.getIndex(),
|
||||||
indexDefinition.getFullIndexName(), extraAliases.stream()
|
indexDefinition.getFullIndexName(), additionalAliases.stream()
|
||||||
.filter(a -> a != null && !a.isEmpty())
|
.filter(a -> a != null && !a.isEmpty())
|
||||||
.collect(Collectors.toList()), indexAliasAdder);
|
.collect(Collectors.toList()), indexAliasAdder);
|
||||||
}
|
}
|
||||||
return this;
|
return EMPTY_INDEX_SHIFT_RESULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient switchIndex(String index, String fullIndexName, List<String> extraAliases) {
|
public IndexShiftResult shiftIndex(String index, String fullIndexName, List<String> additionalAliases) {
|
||||||
return switchIndex(index, fullIndexName, extraAliases, null);
|
return shiftIndex(index, fullIndexName, additionalAliases, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ExtendedClient switchIndex(String index, String fullIndexName,
|
public IndexShiftResult shiftIndex(String index, String fullIndexName,
|
||||||
List<String> extraAliases, IndexAliasAdder adder) {
|
List<String> additionalAliases, IndexAliasAdder adder) {
|
||||||
ensureActive();
|
ensureActive();
|
||||||
if (index.equals(fullIndexName)) {
|
if (index.equals(fullIndexName)) {
|
||||||
return this; // nothing to switch to
|
return EMPTY_INDEX_SHIFT_RESULT; // nothing to shift to
|
||||||
}
|
}
|
||||||
// two situations: 1. there is a new alias 2. there is already an old index with the alias
|
// two situations: 1. there is a new alias 2. there is already an old index with the alias
|
||||||
String oldIndex = resolveAlias(index);
|
String oldIndex = resolveAlias(index);
|
||||||
final Map<String, String> oldFilterMap = oldIndex.equals(index) ? null : getIndexFilters(oldIndex);
|
final Map<String, String> oldFilterMap = oldIndex.equals(index) ? null : getIndexFilters(oldIndex);
|
||||||
final List<String> newAliases = new LinkedList<>();
|
final List<String> newAliases = new LinkedList<>();
|
||||||
final List<String> switchAliases = new LinkedList<>();
|
final List<String> moveAliases = new LinkedList<>();
|
||||||
IndicesAliasesRequestBuilder requestBuilder = new IndicesAliasesRequestBuilder(client, IndicesAliasesAction.INSTANCE);
|
IndicesAliasesRequestBuilder requestBuilder = new IndicesAliasesRequestBuilder(client, IndicesAliasesAction.INSTANCE);
|
||||||
if (oldFilterMap == null || !oldFilterMap.containsKey(index)) {
|
if (oldFilterMap == null || !oldFilterMap.containsKey(index)) {
|
||||||
// never apply a filter for trunk index name
|
// never apply a filter for trunk index name
|
||||||
requestBuilder.addAlias(fullIndexName, index);
|
requestBuilder.addAlias(fullIndexName, index);
|
||||||
newAliases.add(index);
|
newAliases.add(index);
|
||||||
}
|
}
|
||||||
// switch existing aliases
|
// move existing aliases
|
||||||
if (oldFilterMap != null) {
|
if (oldFilterMap != null) {
|
||||||
for (Map.Entry<String, String> entry : oldFilterMap.entrySet()) {
|
for (Map.Entry<String, String> entry : oldFilterMap.entrySet()) {
|
||||||
String alias = entry.getKey();
|
String alias = entry.getKey();
|
||||||
|
@ -751,12 +618,12 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
} else {
|
} else {
|
||||||
requestBuilder.addAlias(fullIndexName, alias);
|
requestBuilder.addAlias(fullIndexName, alias);
|
||||||
}
|
}
|
||||||
switchAliases.add(alias);
|
moveAliases.add(alias);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// a list of aliases that should be added, check if new or old
|
// a list of aliases that should be added, check if new or old
|
||||||
if (extraAliases != null) {
|
if (additionalAliases != null) {
|
||||||
for (String extraAlias : extraAliases) {
|
for (String extraAlias : additionalAliases) {
|
||||||
if (oldFilterMap == null || !oldFilterMap.containsKey(extraAlias)) {
|
if (oldFilterMap == null || !oldFilterMap.containsKey(extraAlias)) {
|
||||||
// index alias adder only active on extra aliases, and if alias is new
|
// index alias adder only active on extra aliases, and if alias is new
|
||||||
if (adder != null) {
|
if (adder != null) {
|
||||||
|
@ -773,82 +640,72 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
} else {
|
} else {
|
||||||
requestBuilder.addAlias(fullIndexName, extraAlias);
|
requestBuilder.addAlias(fullIndexName, extraAlias);
|
||||||
}
|
}
|
||||||
switchAliases.add(extraAlias);
|
moveAliases.add(extraAlias);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!newAliases.isEmpty() || !switchAliases.isEmpty()) {
|
if (!newAliases.isEmpty() || !moveAliases.isEmpty()) {
|
||||||
logger.info("new aliases = {}, switch aliases = {}", newAliases, switchAliases);
|
logger.info("new aliases = {}, moved aliases = {}", newAliases, moveAliases);
|
||||||
requestBuilder.execute().actionGet();
|
requestBuilder.execute().actionGet();
|
||||||
}
|
}
|
||||||
return this;
|
return new SuccessIndexShiftResult(moveAliases, newAliases);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void pruneIndex(IndexDefinition indexDefinition) {
|
public IndexPruneResult pruneIndex(IndexDefinition indexDefinition) {
|
||||||
pruneIndex(indexDefinition.getIndex(), indexDefinition.getFullIndexName(),
|
return pruneIndex(indexDefinition.getIndex(), indexDefinition.getFullIndexName(),
|
||||||
indexDefinition.getRetention().getDelta(), indexDefinition.getRetention().getMinToKeep());
|
indexDefinition.getRetention().getDelta(), indexDefinition.getRetention().getMinToKeep(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void pruneIndex(String index, String fullIndexName, int delta, int mintokeep) {
|
public IndexPruneResult pruneIndex(String index, String fullIndexName, int delta, int mintokeep, boolean perform) {
|
||||||
if (delta == 0 && mintokeep == 0) {
|
if (delta == 0 && mintokeep == 0) {
|
||||||
return;
|
return EMPTY_INDEX_PRUNE_RESULT;
|
||||||
|
}
|
||||||
|
if (index.equals(fullIndexName)) {
|
||||||
|
return EMPTY_INDEX_PRUNE_RESULT;
|
||||||
}
|
}
|
||||||
ensureActive();
|
ensureActive();
|
||||||
if (index.equals(fullIndexName)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client, GetIndexAction.INSTANCE);
|
GetIndexRequestBuilder getIndexRequestBuilder = new GetIndexRequestBuilder(client, GetIndexAction.INSTANCE);
|
||||||
GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet();
|
GetIndexResponse getIndexResponse = getIndexRequestBuilder.execute().actionGet();
|
||||||
Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
|
Pattern pattern = Pattern.compile("^(.*?)(\\d+)$");
|
||||||
Set<String> indices = new TreeSet<>();
|
|
||||||
logger.info("{} indices", getIndexResponse.getIndices().length);
|
logger.info("{} indices", getIndexResponse.getIndices().length);
|
||||||
|
List<String> candidateIndices = new ArrayList<>();
|
||||||
for (String s : getIndexResponse.getIndices()) {
|
for (String s : getIndexResponse.getIndices()) {
|
||||||
Matcher m = pattern.matcher(s);
|
Matcher m = pattern.matcher(s);
|
||||||
if (m.matches() && index.equals(m.group(1)) && !s.equals(fullIndexName)) {
|
if (m.matches() && index.equals(m.group(1)) && !s.equals(fullIndexName)) {
|
||||||
indices.add(s);
|
candidateIndices.add(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (indices.isEmpty()) {
|
if (candidateIndices.isEmpty()) {
|
||||||
logger.info("no indices found, retention policy skipped");
|
return EMPTY_INDEX_PRUNE_RESULT;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
if (mintokeep > 0 && indices.size() <= mintokeep) {
|
if (mintokeep > 0 && candidateIndices.size() <= mintokeep) {
|
||||||
logger.info("{} indices found, not enough for retention policy ({}), skipped",
|
return new NothingToDoPruneResult(candidateIndices, Collections.emptyList());
|
||||||
indices.size(), mintokeep);
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
logger.info("candidates for deletion = {}", indices);
|
|
||||||
}
|
}
|
||||||
List<String> indicesToDelete = new ArrayList<>();
|
List<String> indicesToDelete = new ArrayList<>();
|
||||||
// our index
|
|
||||||
Matcher m1 = pattern.matcher(fullIndexName);
|
Matcher m1 = pattern.matcher(fullIndexName);
|
||||||
if (m1.matches()) {
|
if (m1.matches()) {
|
||||||
Integer i1 = Integer.parseInt(m1.group(2));
|
Integer i1 = Integer.parseInt(m1.group(2));
|
||||||
for (String s : indices) {
|
for (String s : candidateIndices) {
|
||||||
Matcher m2 = pattern.matcher(s);
|
Matcher m2 = pattern.matcher(s);
|
||||||
if (m2.matches()) {
|
if (m2.matches()) {
|
||||||
Integer i2 = Integer.parseInt(m2.group(2));
|
Integer i2 = Integer.parseInt(m2.group(2));
|
||||||
int kept = indices.size() - indicesToDelete.size();
|
int kept = candidateIndices.size() - indicesToDelete.size();
|
||||||
if ((delta == 0 || (delta > 0 && i1 - i2 > delta)) && mintokeep <= kept) {
|
if ((delta == 0 || (delta > 0 && i1 - i2 > delta)) && mintokeep <= kept) {
|
||||||
indicesToDelete.add(s);
|
indicesToDelete.add(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.info("indices to delete = {}", indicesToDelete);
|
|
||||||
if (indicesToDelete.isEmpty()) {
|
if (indicesToDelete.isEmpty()) {
|
||||||
logger.info("not enough indices found to delete, retention policy complete");
|
return new NothingToDoPruneResult(candidateIndices, indicesToDelete);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
String[] s = new String[indicesToDelete.size()];
|
String[] s = new String[indicesToDelete.size()];
|
||||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest()
|
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest()
|
||||||
.indices(indicesToDelete.toArray(s));
|
.indices(indicesToDelete.toArray(s));
|
||||||
DeleteIndexResponse response = client.execute(DeleteIndexAction.INSTANCE, deleteIndexRequest).actionGet();
|
DeleteIndexResponse response = client.execute(DeleteIndexAction.INSTANCE, deleteIndexRequest).actionGet();
|
||||||
if (!response.isAcknowledged()) {
|
return new SuccessPruneResult(candidateIndices, indicesToDelete, response);
|
||||||
logger.warn("retention delete index operation was not acknowledged");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -875,15 +732,15 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
@Override
|
@Override
|
||||||
public boolean forceMerge(IndexDefinition indexDefinition) {
|
public boolean forceMerge(IndexDefinition indexDefinition) {
|
||||||
if (indexDefinition.hasForceMerge()) {
|
if (indexDefinition.hasForceMerge()) {
|
||||||
return forceMerge(indexDefinition.getFullIndexName(), indexDefinition.getMaxWaitTime());
|
return forceMerge(indexDefinition.getFullIndexName(), indexDefinition.getMaxWaitTime(),
|
||||||
|
indexDefinition.getMaxWaitTimeUnit());
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean forceMerge(String index, String maxWaitTime) {
|
public boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit) {
|
||||||
TimeValue timeout = TimeValue.parseTimeValue(maxWaitTime, TimeValue.timeValueSeconds(10),
|
TimeValue timeout = toTimeValue(maxWaitTime, timeUnit);
|
||||||
getClass().getSimpleName() + ".timeout");
|
|
||||||
ForceMergeRequestBuilder forceMergeRequestBuilder =
|
ForceMergeRequestBuilder forceMergeRequestBuilder =
|
||||||
new ForceMergeRequestBuilder(client, ForceMergeAction.INSTANCE);
|
new ForceMergeRequestBuilder(client, ForceMergeAction.INSTANCE);
|
||||||
forceMergeRequestBuilder.setIndices(index);
|
forceMergeRequestBuilder.setIndices(index);
|
||||||
|
@ -909,44 +766,35 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
String fullIndexName;
|
String fullIndexName;
|
||||||
String dateTimePattern = settings.get("dateTimePattern");
|
String dateTimePattern = settings.get("dateTimePattern");
|
||||||
if (dateTimePattern != null) {
|
if (dateTimePattern != null) {
|
||||||
fullIndexName = resolveAlias(indexName +
|
// check if index name with current date already exists, resolve to it
|
||||||
DateTimeFormatter.ofPattern(dateTimePattern)
|
fullIndexName = resolveAlias(indexName + DateTimeFormatter.ofPattern(dateTimePattern)
|
||||||
.withZone(ZoneId.systemDefault()) // not GMT
|
.withZone(ZoneId.systemDefault()) // not GMT
|
||||||
.format(LocalDate.now()));
|
.format(LocalDate.now()));
|
||||||
logger.info("index name {} resolved to full index name = {}", indexName, fullIndexName);
|
|
||||||
} else {
|
} else {
|
||||||
|
// check if index name already exists, resolve to it
|
||||||
fullIndexName = resolveMostRecentIndex(indexName);
|
fullIndexName = resolveMostRecentIndex(indexName);
|
||||||
logger.info("index name {} resolved to full index name = {}", indexName, fullIndexName);
|
|
||||||
}
|
}
|
||||||
IndexRetention indexRetention = new IndexRetention()
|
IndexRetention indexRetention = new DefaultIndexRetention()
|
||||||
.setMinToKeep(settings.getAsInt("retention.mintokeep", 0))
|
.setMinToKeep(settings.getAsInt("retention.mintokeep", 0))
|
||||||
.setDelta(settings.getAsInt("retention.delta", 0));
|
.setDelta(settings.getAsInt("retention.delta", 0));
|
||||||
|
return new DefaultIndexDefinition()
|
||||||
return new IndexDefinition()
|
.setEnabled(isEnabled)
|
||||||
.setIndex(indexName)
|
.setIndex(indexName)
|
||||||
.setFullIndexName(fullIndexName)
|
.setFullIndexName(fullIndexName)
|
||||||
.setSettingsUrl(settings.get("settings"))
|
.setSettingsUrl(settings.get("settings"))
|
||||||
.setMappingsUrl(settings.get("mapping"))
|
.setMappingsUrl(settings.get("mapping"))
|
||||||
.setDateTimePattern(dateTimePattern)
|
.setDateTimePattern(dateTimePattern)
|
||||||
.setEnabled(isEnabled)
|
|
||||||
.setIgnoreErrors(settings.getAsBoolean("skiperrors", false))
|
.setIgnoreErrors(settings.getAsBoolean("skiperrors", false))
|
||||||
.setSwitchAliases(settings.getAsBoolean("aliases", true))
|
.setShift(settings.getAsBoolean("shift", true))
|
||||||
.setReplicaLevel(settings.getAsInt("replica", 0))
|
.setReplicaLevel(settings.getAsInt("replica", 0))
|
||||||
.setMaxWaitTime(settings.get("timout", "30s"))
|
.setMaxWaitTime(settings.getAsLong("timeout", 30L), TimeUnit.SECONDS)
|
||||||
.setRetention(indexRetention);
|
.setRetention(indexRetention)
|
||||||
|
.setStartRefreshInterval(settings.getAsLong("bulk.startrefreshinterval", -1L))
|
||||||
|
.setStopRefreshInterval(settings.getAsLong("bulk.stoprefreshinterval", -1L));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hasThrowable() {
|
public void updateIndexSetting(String index, String key, Object value) throws IOException {
|
||||||
return throwable != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Throwable getThrowable() {
|
|
||||||
return throwable;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void updateIndexSetting(String index, String key, Object value) throws IOException {
|
|
||||||
ensureActive();
|
ensureActive();
|
||||||
if (index == null) {
|
if (index == null) {
|
||||||
throw new IOException("no index name given");
|
throw new IOException("no index name given");
|
||||||
|
@ -971,9 +819,6 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
if (client == null) {
|
if (client == null) {
|
||||||
throw new IllegalStateException("no client");
|
throw new IllegalStateException("no client");
|
||||||
}
|
}
|
||||||
if (closed) {
|
|
||||||
throw new ElasticsearchException("client is closed");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void ensureIndexGiven(String index) {
|
private void ensureIndexGiven(String index) {
|
||||||
|
@ -1096,4 +941,115 @@ public abstract class AbstractExtendedClient implements ExtendedClient {
|
||||||
.forEachOrdered(e -> result.put(e.getKey(), e.getValue()));
|
.forEachOrdered(e -> result.put(e.getKey(), e.getValue()));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static TimeValue toTimeValue(long timeValue, TimeUnit timeUnit) {
|
||||||
|
switch (timeUnit) {
|
||||||
|
case DAYS:
|
||||||
|
return TimeValue.timeValueHours(24 * timeValue);
|
||||||
|
case HOURS:
|
||||||
|
return TimeValue.timeValueHours(timeValue);
|
||||||
|
case MINUTES:
|
||||||
|
return TimeValue.timeValueMinutes(timeValue);
|
||||||
|
case SECONDS:
|
||||||
|
return TimeValue.timeValueSeconds(timeValue);
|
||||||
|
case MILLISECONDS:
|
||||||
|
return TimeValue.timeValueMillis(timeValue);
|
||||||
|
case MICROSECONDS:
|
||||||
|
return TimeValue.timeValueNanos(1000 * timeValue);
|
||||||
|
case NANOSECONDS:
|
||||||
|
return TimeValue.timeValueNanos(timeValue);
|
||||||
|
default:
|
||||||
|
throw new IllegalArgumentException("unknown time unit: " + timeUnit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class SuccessIndexShiftResult implements IndexShiftResult {
|
||||||
|
|
||||||
|
List<String> movedAliases;
|
||||||
|
|
||||||
|
List<String> newAliases;
|
||||||
|
|
||||||
|
SuccessIndexShiftResult(List<String> movedAliases, List<String> newAliases) {
|
||||||
|
this.movedAliases = movedAliases;
|
||||||
|
this.newAliases = newAliases;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getMovedAliases() {
|
||||||
|
return movedAliases;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getNewAliases() {
|
||||||
|
return newAliases;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class SuccessPruneResult implements IndexPruneResult {
|
||||||
|
|
||||||
|
List<String> candidateIndices;
|
||||||
|
|
||||||
|
List<String> indicesToDelete;
|
||||||
|
|
||||||
|
DeleteIndexResponse response;
|
||||||
|
|
||||||
|
SuccessPruneResult(List<String> candidateIndices, List<String> indicesToDelete,
|
||||||
|
DeleteIndexResponse response) {
|
||||||
|
this.candidateIndices = candidateIndices;
|
||||||
|
this.indicesToDelete = indicesToDelete;
|
||||||
|
this.response = response;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexPruneResult.State getState() {
|
||||||
|
return IndexPruneResult.State.SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getCandidateIndices() {
|
||||||
|
return candidateIndices;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getDeletedIndices() {
|
||||||
|
return indicesToDelete;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isAcknowledged() {
|
||||||
|
return response.isAcknowledged();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class NothingToDoPruneResult implements IndexPruneResult {
|
||||||
|
|
||||||
|
List<String> candidateIndices;
|
||||||
|
|
||||||
|
List<String> indicesToDelete;
|
||||||
|
|
||||||
|
NothingToDoPruneResult(List<String> candidateIndices, List<String> indicesToDelete) {
|
||||||
|
this.candidateIndices = candidateIndices;
|
||||||
|
this.indicesToDelete = indicesToDelete;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexPruneResult.State getState() {
|
||||||
|
return IndexPruneResult.State.SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getCandidateIndices() {
|
||||||
|
return candidateIndices;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<String> getDeletedIndices() {
|
||||||
|
return indicesToDelete;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isAcknowledged() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,8 +5,6 @@ import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.xbib.elx.api.BulkControl;
|
|
||||||
import org.xbib.elx.api.BulkMetric;
|
|
||||||
import org.xbib.elx.api.ExtendedClient;
|
import org.xbib.elx.api.ExtendedClient;
|
||||||
import org.xbib.elx.api.ExtendedClientProvider;
|
import org.xbib.elx.api.ExtendedClientProvider;
|
||||||
|
|
||||||
|
@ -26,10 +24,6 @@ public class ClientBuilder {
|
||||||
|
|
||||||
private Class<? extends ExtendedClientProvider> provider;
|
private Class<? extends ExtendedClientProvider> provider;
|
||||||
|
|
||||||
private BulkMetric metric;
|
|
||||||
|
|
||||||
private BulkControl control;
|
|
||||||
|
|
||||||
public ClientBuilder() {
|
public ClientBuilder() {
|
||||||
this(null);
|
this(null);
|
||||||
}
|
}
|
||||||
|
@ -48,8 +42,6 @@ public class ClientBuilder {
|
||||||
for (ExtendedClientProvider provider : serviceLoader) {
|
for (ExtendedClientProvider provider : serviceLoader) {
|
||||||
providerMap.put(provider.getClass(), provider);
|
providerMap.put(provider.getClass(), provider);
|
||||||
}
|
}
|
||||||
this.metric = new SimpleBulkMetric();
|
|
||||||
this.control = new SimpleBulkControl();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ClientBuilder builder() {
|
public static ClientBuilder builder() {
|
||||||
|
@ -100,25 +92,11 @@ public class ClientBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ClientBuilder setMetric(BulkMetric metric) {
|
|
||||||
this.metric = metric;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ClientBuilder setControl(BulkControl control) {
|
|
||||||
this.control = control;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public <C extends ExtendedClient> C build() throws IOException {
|
public <C extends ExtendedClient> C build() throws IOException {
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
throw new IllegalArgumentException("no provider");
|
throw new IllegalArgumentException("no provider");
|
||||||
}
|
}
|
||||||
return (C) providerMap.get(provider).getExtendedClient()
|
return (C) providerMap.get(provider).getExtendedClient().setClient(client).init(settingsBuilder.build());
|
||||||
.setClient(client)
|
|
||||||
.setBulkMetric(metric)
|
|
||||||
.setBulkControl(control)
|
|
||||||
.init(settingsBuilder.build());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,309 @@
|
||||||
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||||
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
|
import org.elasticsearch.action.delete.DeleteRequest;
|
||||||
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
|
import org.elasticsearch.action.update.UpdateRequest;
|
||||||
|
import org.elasticsearch.client.Client;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
import org.xbib.elx.api.BulkController;
|
||||||
|
import org.xbib.elx.api.BulkMetric;
|
||||||
|
import org.xbib.elx.api.BulkProcessor;
|
||||||
|
import org.xbib.elx.api.ExtendedClient;
|
||||||
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
public class DefaultBulkController implements BulkController {
|
||||||
|
|
||||||
|
private static final Logger logger = LogManager.getLogger(DefaultBulkController.class);
|
||||||
|
|
||||||
|
private final ExtendedClient client;
|
||||||
|
|
||||||
|
private final BulkMetric bulkMetric;
|
||||||
|
|
||||||
|
private final List<String> indexNames;
|
||||||
|
|
||||||
|
private final Map<String, Long> startBulkRefreshIntervals;
|
||||||
|
|
||||||
|
private final Map<String, Long> stopBulkRefreshIntervals;
|
||||||
|
|
||||||
|
private long maxWaitTime;
|
||||||
|
|
||||||
|
private TimeUnit maxWaitTimeUnit;
|
||||||
|
|
||||||
|
private BulkProcessor bulkProcessor;
|
||||||
|
|
||||||
|
private BulkListener bulkListener;
|
||||||
|
|
||||||
|
private AtomicBoolean active;
|
||||||
|
|
||||||
|
public DefaultBulkController(ExtendedClient client, BulkMetric bulkMetric) {
|
||||||
|
this.client = client;
|
||||||
|
this.bulkMetric = bulkMetric;
|
||||||
|
this.indexNames = new ArrayList<>();
|
||||||
|
this.active = new AtomicBoolean(false);
|
||||||
|
this.startBulkRefreshIntervals = new HashMap<>();
|
||||||
|
this.stopBulkRefreshIntervals = new HashMap<>();
|
||||||
|
this.maxWaitTime = 30L;
|
||||||
|
this.maxWaitTimeUnit = TimeUnit.SECONDS;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Throwable getLastBulkError() {
|
||||||
|
return bulkListener.getLastBulkError();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void init(Settings settings) {
|
||||||
|
int maxActionsPerRequest = settings.getAsInt(Parameters.MAX_ACTIONS_PER_REQUEST.name(),
|
||||||
|
Parameters.DEFAULT_MAX_ACTIONS_PER_REQUEST.getNum());
|
||||||
|
int maxConcurrentRequests = settings.getAsInt(Parameters.MAX_CONCURRENT_REQUESTS.name(),
|
||||||
|
Parameters.DEFAULT_MAX_CONCURRENT_REQUESTS.getNum());
|
||||||
|
TimeValue flushIngestInterval = settings.getAsTime(Parameters.FLUSH_INTERVAL.name(),
|
||||||
|
TimeValue.timeValueSeconds(Parameters.DEFAULT_FLUSH_INTERVAL.getNum()));
|
||||||
|
ByteSizeValue maxVolumePerRequest = settings.getAsBytesSize(Parameters.MAX_VOLUME_PER_REQUEST.name(),
|
||||||
|
ByteSizeValue.parseBytesSizeValue(Parameters.DEFAULT_MAX_VOLUME_PER_REQUEST.getString(),
|
||||||
|
"maxVolumePerRequest"));
|
||||||
|
if (logger.isInfoEnabled()) {
|
||||||
|
logger.info("bulk processor up with maxActionsPerRequest = {} maxConcurrentRequests = {} " +
|
||||||
|
"flushIngestInterval = {} maxVolumePerRequest = {}",
|
||||||
|
maxActionsPerRequest, maxConcurrentRequests, flushIngestInterval, maxVolumePerRequest);
|
||||||
|
}
|
||||||
|
this.bulkListener = new BulkListener();
|
||||||
|
DefaultBulkProcessor.Builder builder = DefaultBulkProcessor.builder((Client) client.getClient(), bulkListener)
|
||||||
|
.setBulkActions(maxActionsPerRequest)
|
||||||
|
.setConcurrentRequests(maxConcurrentRequests)
|
||||||
|
.setFlushInterval(flushIngestInterval)
|
||||||
|
.setBulkSize(maxVolumePerRequest);
|
||||||
|
this.bulkProcessor = builder.build();
|
||||||
|
this.active.set(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void startBulkMode(IndexDefinition indexDefinition) throws IOException {
|
||||||
|
startBulkMode(indexDefinition.getFullIndexName(), indexDefinition.getStartRefreshInterval(),
|
||||||
|
indexDefinition.getStopRefreshInterval());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void startBulkMode(String indexName,
|
||||||
|
long startRefreshIntervalInSeconds,
|
||||||
|
long stopRefreshIntervalInSeconds) throws IOException {
|
||||||
|
if (!indexNames.contains(indexName)) {
|
||||||
|
indexNames.add(indexName);
|
||||||
|
startBulkRefreshIntervals.put(indexName, startRefreshIntervalInSeconds);
|
||||||
|
stopBulkRefreshIntervals.put(indexName, stopRefreshIntervalInSeconds);
|
||||||
|
if (startRefreshIntervalInSeconds != 0L) {
|
||||||
|
client.updateIndexSetting(indexName, "refresh_interval", startRefreshIntervalInSeconds + "s");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void index(IndexRequest indexRequest) {
|
||||||
|
if (!active.get()) {
|
||||||
|
throw new IllegalStateException("inactive");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
bulkMetric.getCurrentIngest().inc(indexRequest.index(), indexRequest.type(), indexRequest.id());
|
||||||
|
}
|
||||||
|
bulkProcessor.add(indexRequest);
|
||||||
|
} catch (Exception e) {
|
||||||
|
bulkListener.lastBulkError = e;
|
||||||
|
active.set(false);
|
||||||
|
if (logger.isErrorEnabled()) {
|
||||||
|
logger.error("bulk add of index failed: " + e.getMessage(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void delete(DeleteRequest deleteRequest) {
|
||||||
|
if (!active.get()) {
|
||||||
|
throw new IllegalStateException("inactive");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
bulkMetric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
|
||||||
|
}
|
||||||
|
bulkProcessor.add(deleteRequest);
|
||||||
|
} catch (Exception e) {
|
||||||
|
bulkListener.lastBulkError = e;
|
||||||
|
active.set(false);
|
||||||
|
if (logger.isErrorEnabled()) {
|
||||||
|
logger.error("bulk add of delete failed: " + e.getMessage(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void update(UpdateRequest updateRequest) {
|
||||||
|
if (!active.get()) {
|
||||||
|
throw new IllegalStateException("inactive");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
bulkMetric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id());
|
||||||
|
}
|
||||||
|
bulkProcessor.add(updateRequest);
|
||||||
|
} catch (Exception e) {
|
||||||
|
bulkListener.lastBulkError = e;
|
||||||
|
active.set(false);
|
||||||
|
if (logger.isErrorEnabled()) {
|
||||||
|
logger.error("bulk add of update failed: " + e.getMessage(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean waitForResponses(long timeout, TimeUnit timeUnit) {
|
||||||
|
try {
|
||||||
|
return bulkProcessor.awaitFlush(timeout, timeUnit);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
logger.error("interrupted");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stopBulkMode(IndexDefinition indexDefinition) throws IOException {
|
||||||
|
stopBulkMode(indexDefinition.getFullIndexName(),
|
||||||
|
indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stopBulkMode(String index, long timeout, TimeUnit timeUnit) throws IOException {
|
||||||
|
flush();
|
||||||
|
if (waitForResponses(timeout, timeUnit)) {
|
||||||
|
if (indexNames.contains(index)) {
|
||||||
|
Long secs = stopBulkRefreshIntervals.get(index);
|
||||||
|
if (secs != null && secs != 0L) {
|
||||||
|
client.updateIndexSetting(index, "refresh_interval", secs + "s");
|
||||||
|
}
|
||||||
|
indexNames.remove(index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flush() throws IOException {
|
||||||
|
if (bulkProcessor != null) {
|
||||||
|
bulkProcessor.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
flush();
|
||||||
|
if (client.waitForResponses(maxWaitTime, maxWaitTimeUnit)) {
|
||||||
|
for (String index : indexNames) {
|
||||||
|
Long secs = stopBulkRefreshIntervals.get(index);
|
||||||
|
if (secs != null && secs != 0L)
|
||||||
|
client.updateIndexSetting(index, "refresh_interval", secs + "s");
|
||||||
|
}
|
||||||
|
indexNames.clear();
|
||||||
|
}
|
||||||
|
if (bulkProcessor != null) {
|
||||||
|
bulkProcessor.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class BulkListener implements DefaultBulkProcessor.Listener {
|
||||||
|
|
||||||
|
private final Logger logger = LogManager.getLogger("org.xbib.elx.BulkProcessor.Listener");
|
||||||
|
|
||||||
|
private Throwable lastBulkError = null;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void beforeBulk(long executionId, BulkRequest request) {
|
||||||
|
long l = 0;
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
l = bulkMetric.getCurrentIngest().getCount();
|
||||||
|
bulkMetric.getCurrentIngest().inc();
|
||||||
|
int n = request.numberOfActions();
|
||||||
|
bulkMetric.getSubmitted().inc(n);
|
||||||
|
bulkMetric.getCurrentIngestNumDocs().inc(n);
|
||||||
|
bulkMetric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
|
||||||
|
}
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
|
||||||
|
executionId,
|
||||||
|
request.numberOfActions(),
|
||||||
|
request.estimatedSizeInBytes(),
|
||||||
|
l);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
|
||||||
|
long l = 0;
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
l = bulkMetric.getCurrentIngest().getCount();
|
||||||
|
bulkMetric.getCurrentIngest().dec();
|
||||||
|
bulkMetric.getSucceeded().inc(response.getItems().length);
|
||||||
|
}
|
||||||
|
int n = 0;
|
||||||
|
for (BulkItemResponse itemResponse : response.getItems()) {
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
bulkMetric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
|
||||||
|
}
|
||||||
|
if (itemResponse.isFailed()) {
|
||||||
|
n++;
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
bulkMetric.getSucceeded().dec(1);
|
||||||
|
bulkMetric.getFailed().inc(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (bulkMetric != null && logger.isDebugEnabled()) {
|
||||||
|
logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] {} concurrent requests",
|
||||||
|
executionId,
|
||||||
|
bulkMetric.getSucceeded().getCount(),
|
||||||
|
bulkMetric.getFailed().getCount(),
|
||||||
|
response.getTook().millis(),
|
||||||
|
l);
|
||||||
|
}
|
||||||
|
if (n > 0) {
|
||||||
|
if (logger.isErrorEnabled()) {
|
||||||
|
logger.error("bulk [{}] failed with {} failed items, failure message = {}",
|
||||||
|
executionId, n, response.buildFailureMessage());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
bulkMetric.getCurrentIngestNumDocs().dec(response.getItems().length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
|
||||||
|
if (bulkMetric != null) {
|
||||||
|
bulkMetric.getCurrentIngest().dec();
|
||||||
|
}
|
||||||
|
lastBulkError = failure;
|
||||||
|
active.set(false);
|
||||||
|
if (logger.isErrorEnabled()) {
|
||||||
|
logger.error("after bulk [" + executionId + "] error", failure);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Throwable getLastBulkError() {
|
||||||
|
return lastBulkError;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,6 @@
|
||||||
package org.xbib.elx.common;
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.xbib.elx.api.BulkMetric;
|
import org.xbib.elx.api.BulkMetric;
|
||||||
import org.xbib.metrics.Count;
|
import org.xbib.metrics.Count;
|
||||||
import org.xbib.metrics.CountMetric;
|
import org.xbib.metrics.CountMetric;
|
||||||
|
@ -7,9 +8,8 @@ import org.xbib.metrics.Meter;
|
||||||
import org.xbib.metrics.Metered;
|
import org.xbib.metrics.Metered;
|
||||||
|
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
|
|
||||||
public class SimpleBulkMetric implements BulkMetric {
|
public class DefaultBulkMetric implements BulkMetric {
|
||||||
|
|
||||||
private final Meter totalIngest;
|
private final Meter totalIngest;
|
||||||
|
|
||||||
|
@ -29,12 +29,8 @@ public class SimpleBulkMetric implements BulkMetric {
|
||||||
|
|
||||||
private Long stopped;
|
private Long stopped;
|
||||||
|
|
||||||
public SimpleBulkMetric() {
|
public DefaultBulkMetric() {
|
||||||
this(Executors.newSingleThreadScheduledExecutor());
|
totalIngest = new Meter(Executors.newSingleThreadScheduledExecutor());
|
||||||
}
|
|
||||||
|
|
||||||
public SimpleBulkMetric(ScheduledExecutorService executorService) {
|
|
||||||
totalIngest = new Meter(executorService);
|
|
||||||
totalIngestSizeInBytes = new CountMetric();
|
totalIngestSizeInBytes = new CountMetric();
|
||||||
currentIngest = new CountMetric();
|
currentIngest = new CountMetric();
|
||||||
currentIngestNumDocs = new CountMetric();
|
currentIngestNumDocs = new CountMetric();
|
||||||
|
@ -43,6 +39,11 @@ public class SimpleBulkMetric implements BulkMetric {
|
||||||
failed = new CountMetric();
|
failed = new CountMetric();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void init(Settings settings) {
|
||||||
|
start();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Metered getTotalIngest() {
|
public Metered getTotalIngest() {
|
||||||
return totalIngest;
|
return totalIngest;
|
||||||
|
@ -78,6 +79,11 @@ public class SimpleBulkMetric implements BulkMetric {
|
||||||
return failed;
|
return failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long elapsed() {
|
||||||
|
return started != null ? ((stopped != null ? stopped : System.nanoTime()) - started) : -1L;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void start() {
|
public void start() {
|
||||||
this.started = System.nanoTime();
|
this.started = System.nanoTime();
|
||||||
|
@ -91,8 +97,8 @@ public class SimpleBulkMetric implements BulkMetric {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long elapsed() {
|
public void close() {
|
||||||
return (stopped != null ? stopped : System.nanoTime()) - started;
|
stop();
|
||||||
|
totalIngest.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -5,16 +5,14 @@ import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkAction;
|
import org.elasticsearch.action.bulk.BulkAction;
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||||
|
import org.xbib.elx.api.BulkProcessor;
|
||||||
|
|
||||||
import java.io.Closeable;
|
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||||
|
@ -28,7 +26,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||||
* requests allowed to be executed in parallel.
|
* requests allowed to be executed in parallel.
|
||||||
* In order to create a new bulk processor, use the {@link Builder}.
|
* In order to create a new bulk processor, use the {@link Builder}.
|
||||||
*/
|
*/
|
||||||
public class BulkProcessor implements Closeable {
|
public class DefaultBulkProcessor implements BulkProcessor {
|
||||||
|
|
||||||
private final int bulkActions;
|
private final int bulkActions;
|
||||||
|
|
||||||
|
@ -46,8 +44,8 @@ public class BulkProcessor implements Closeable {
|
||||||
|
|
||||||
private volatile boolean closed;
|
private volatile boolean closed;
|
||||||
|
|
||||||
private BulkProcessor(Client client, Listener listener, String name, int concurrentRequests,
|
private DefaultBulkProcessor(Client client, Listener listener, String name, int concurrentRequests,
|
||||||
int bulkActions, ByteSizeValue bulkSize, TimeValue flushInterval) {
|
int bulkActions, ByteSizeValue bulkSize, TimeValue flushInterval) {
|
||||||
this.executionIdGen = new AtomicLong();
|
this.executionIdGen = new AtomicLong();
|
||||||
this.closed = false;
|
this.closed = false;
|
||||||
this.bulkActions = bulkActions;
|
this.bulkActions = bulkActions;
|
||||||
|
@ -77,19 +75,6 @@ public class BulkProcessor implements Closeable {
|
||||||
return new Builder(client, listener);
|
return new Builder(client, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
try {
|
|
||||||
// 0 = immediate close
|
|
||||||
awaitClose(0, TimeUnit.NANOSECONDS);
|
|
||||||
} catch (InterruptedException exc) {
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wait for bulk request handler with flush.
|
* Wait for bulk request handler with flush.
|
||||||
* @param timeout the timeout value
|
* @param timeout the timeout value
|
||||||
|
@ -97,7 +82,8 @@ public class BulkProcessor implements Closeable {
|
||||||
* @return true is method was successful, false if timeout
|
* @return true is method was successful, false if timeout
|
||||||
* @throws InterruptedException if timeout
|
* @throws InterruptedException if timeout
|
||||||
*/
|
*/
|
||||||
public boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException {
|
@Override
|
||||||
|
public synchronized boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -124,6 +110,7 @@ public class BulkProcessor implements Closeable {
|
||||||
* bulk requests completed
|
* bulk requests completed
|
||||||
* @throws InterruptedException If the current thread is interrupted
|
* @throws InterruptedException If the current thread is interrupted
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -139,34 +126,14 @@ public class BulkProcessor implements Closeable {
|
||||||
return this.bulkRequestHandler.close(timeout, unit);
|
return this.bulkRequestHandler.close(timeout, unit);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
|
|
||||||
* (for example, if no id is provided, one will be generated, or usage of the create flag).
|
|
||||||
*
|
|
||||||
* @param request request
|
|
||||||
* @return his bulk processor
|
|
||||||
*/
|
|
||||||
public BulkProcessor add(IndexRequest request) {
|
|
||||||
return add((ActionRequest) request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Adds an {@link DeleteRequest} to the list of actions to execute.
|
|
||||||
*
|
|
||||||
* @param request request
|
|
||||||
* @return his bulk processor
|
|
||||||
*/
|
|
||||||
public BulkProcessor add(DeleteRequest request) {
|
|
||||||
return add((ActionRequest) request);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds either a delete or an index request.
|
* Adds either a delete or an index request.
|
||||||
*
|
*
|
||||||
* @param request request
|
* @param request request
|
||||||
* @return his bulk processor
|
* @return his bulk processor
|
||||||
*/
|
*/
|
||||||
public BulkProcessor add(ActionRequest<?> request) {
|
@Override
|
||||||
|
public DefaultBulkProcessor add(ActionRequest<?> request) {
|
||||||
return add(request, null);
|
return add(request, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,11 +144,36 @@ public class BulkProcessor implements Closeable {
|
||||||
* @param payload payload
|
* @param payload payload
|
||||||
* @return his bulk processor
|
* @return his bulk processor
|
||||||
*/
|
*/
|
||||||
public BulkProcessor add(ActionRequest<?> request, Object payload) {
|
@Override
|
||||||
|
public DefaultBulkProcessor add(ActionRequest<?> request, Object payload) {
|
||||||
internalAdd(request, payload);
|
internalAdd(request, payload);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flush pending delete or index requests.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public synchronized void flush() {
|
||||||
|
ensureOpen();
|
||||||
|
if (bulkRequest.numberOfActions() > 0) {
|
||||||
|
execute();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
try {
|
||||||
|
// 0 = immediate close
|
||||||
|
awaitClose(0, TimeUnit.NANOSECONDS);
|
||||||
|
} catch (InterruptedException exc) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void ensureOpen() {
|
private void ensureOpen() {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
throw new IllegalStateException("bulk process already closed");
|
throw new IllegalStateException("bulk process already closed");
|
||||||
|
@ -213,53 +205,7 @@ public class BulkProcessor implements Closeable {
|
||||||
return bulkActions != -1 &&
|
return bulkActions != -1 &&
|
||||||
bulkRequest.numberOfActions() >= bulkActions ||
|
bulkRequest.numberOfActions() >= bulkActions ||
|
||||||
bulkSize != -1 &&
|
bulkSize != -1 &&
|
||||||
bulkRequest.estimatedSizeInBytes() >= bulkSize;
|
bulkRequest.estimatedSizeInBytes() >= bulkSize;
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Flush pending delete or index requests.
|
|
||||||
*/
|
|
||||||
public synchronized void flush() {
|
|
||||||
ensureOpen();
|
|
||||||
if (bulkRequest.numberOfActions() > 0) {
|
|
||||||
execute();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A listener for the execution.
|
|
||||||
*/
|
|
||||||
public interface Listener {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Callback before the bulk is executed.
|
|
||||||
*
|
|
||||||
* @param executionId execution ID
|
|
||||||
* @param request request
|
|
||||||
*/
|
|
||||||
void beforeBulk(long executionId, BulkRequest request);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Callback after a successful execution of bulk request.
|
|
||||||
*
|
|
||||||
* @param executionId execution ID
|
|
||||||
* @param request request
|
|
||||||
* @param response response
|
|
||||||
*/
|
|
||||||
void afterBulk(long executionId, BulkRequest request, BulkResponse response);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Callback after a failed execution of bulk request.
|
|
||||||
*
|
|
||||||
* Note that in case an instance of <code>InterruptedException</code> is passed, which means that request
|
|
||||||
* processing has been
|
|
||||||
* cancelled externally, the thread's interruption status has been restored prior to calling this method.
|
|
||||||
*
|
|
||||||
* @param executionId execution ID
|
|
||||||
* @param request request
|
|
||||||
* @param failure failure
|
|
||||||
*/
|
|
||||||
void afterBulk(long executionId, BulkRequest request, Throwable failure);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -359,8 +305,8 @@ public class BulkProcessor implements Closeable {
|
||||||
*
|
*
|
||||||
* @return a bulk processor
|
* @return a bulk processor
|
||||||
*/
|
*/
|
||||||
public BulkProcessor build() {
|
public DefaultBulkProcessor build() {
|
||||||
return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
|
return new DefaultBulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +314,7 @@ public class BulkProcessor implements Closeable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
synchronized (BulkProcessor.this) {
|
synchronized (DefaultBulkProcessor.this) {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -380,24 +326,13 @@ public class BulkProcessor implements Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Abstracts the low-level details of bulk request handling.
|
|
||||||
*/
|
|
||||||
interface BulkRequestHandler {
|
|
||||||
|
|
||||||
void execute(BulkRequest bulkRequest, long executionId);
|
|
||||||
|
|
||||||
boolean close(long timeout, TimeUnit unit) throws InterruptedException;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class SyncBulkRequestHandler implements BulkRequestHandler {
|
private static class SyncBulkRequestHandler implements BulkRequestHandler {
|
||||||
|
|
||||||
private final Client client;
|
private final Client client;
|
||||||
|
|
||||||
private final BulkProcessor.Listener listener;
|
private final DefaultBulkProcessor.Listener listener;
|
||||||
|
|
||||||
SyncBulkRequestHandler(Client client, BulkProcessor.Listener listener) {
|
SyncBulkRequestHandler(Client client, DefaultBulkProcessor.Listener listener) {
|
||||||
this.client = client;
|
this.client = client;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
}
|
}
|
||||||
|
@ -427,13 +362,13 @@ public class BulkProcessor implements Closeable {
|
||||||
|
|
||||||
private final Client client;
|
private final Client client;
|
||||||
|
|
||||||
private final BulkProcessor.Listener listener;
|
private final DefaultBulkProcessor.Listener listener;
|
||||||
|
|
||||||
private final Semaphore semaphore;
|
private final Semaphore semaphore;
|
||||||
|
|
||||||
private final int concurrentRequests;
|
private final int concurrentRequests;
|
||||||
|
|
||||||
private AsyncBulkRequestHandler(Client client, BulkProcessor.Listener listener, int concurrentRequests) {
|
private AsyncBulkRequestHandler(Client client, DefaultBulkProcessor.Listener listener, int concurrentRequests) {
|
||||||
this.client = client;
|
this.client = client;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
this.concurrentRequests = concurrentRequests;
|
this.concurrentRequests = concurrentRequests;
|
|
@ -0,0 +1,214 @@
|
||||||
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
|
import org.xbib.elx.api.IndexRetention;
|
||||||
|
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.URL;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
public class DefaultIndexDefinition implements IndexDefinition {
|
||||||
|
|
||||||
|
private String index;
|
||||||
|
|
||||||
|
private String fullIndexName;
|
||||||
|
|
||||||
|
private String dateTimePattern;
|
||||||
|
|
||||||
|
private URL settingsUrl;
|
||||||
|
|
||||||
|
private URL mappingsUrl;
|
||||||
|
|
||||||
|
private boolean enabled;
|
||||||
|
|
||||||
|
private boolean ignoreErrors;
|
||||||
|
|
||||||
|
private boolean switchAliases;
|
||||||
|
|
||||||
|
private boolean hasForceMerge;
|
||||||
|
|
||||||
|
private int replicaLevel;
|
||||||
|
|
||||||
|
private IndexRetention indexRetention;
|
||||||
|
|
||||||
|
private long maxWaitTime;
|
||||||
|
|
||||||
|
private TimeUnit maxWaitTimeUnit;
|
||||||
|
|
||||||
|
private long startRefreshInterval;
|
||||||
|
|
||||||
|
private long stopRefreshInterval;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setIndex(String index) {
|
||||||
|
this.index = index;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getIndex() {
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setFullIndexName(String fullIndexName) {
|
||||||
|
this.fullIndexName = fullIndexName;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getFullIndexName() {
|
||||||
|
return fullIndexName;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setSettingsUrl(String settingsUrlString) throws MalformedURLException {
|
||||||
|
this.settingsUrl = settingsUrlString != null ? new URL(settingsUrlString) : null;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setSettingsUrl(URL settingsUrl) {
|
||||||
|
this.settingsUrl = settingsUrl;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public URL getSettingsUrl() {
|
||||||
|
return settingsUrl;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setMappingsUrl(String mappingsUrlString) throws MalformedURLException {
|
||||||
|
this.mappingsUrl = mappingsUrlString != null ? new URL(mappingsUrlString) : null;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setMappingsUrl(URL mappingsUrl) {
|
||||||
|
this.mappingsUrl = mappingsUrl;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public URL getMappingsUrl() {
|
||||||
|
return mappingsUrl;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setDateTimePattern(String timeWindow) {
|
||||||
|
this.dateTimePattern = timeWindow;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDateTimePattern() {
|
||||||
|
return dateTimePattern;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setEnabled(boolean enabled) {
|
||||||
|
this.enabled = enabled;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isEnabled() {
|
||||||
|
return enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setIgnoreErrors(boolean ignoreErrors) {
|
||||||
|
this.ignoreErrors = ignoreErrors;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean ignoreErrors() {
|
||||||
|
return ignoreErrors;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setShift(boolean switchAliases) {
|
||||||
|
this.switchAliases = switchAliases;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isShiftEnabled() {
|
||||||
|
return switchAliases;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setForceMerge(boolean hasForceMerge) {
|
||||||
|
this.hasForceMerge = hasForceMerge;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasForceMerge() {
|
||||||
|
return hasForceMerge;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setReplicaLevel(int replicaLevel) {
|
||||||
|
this.replicaLevel = replicaLevel;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getReplicaLevel() {
|
||||||
|
return replicaLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setRetention(IndexRetention indexRetention) {
|
||||||
|
this.indexRetention = indexRetention;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexRetention getRetention() {
|
||||||
|
return indexRetention;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setMaxWaitTime(long maxWaitTime, TimeUnit timeUnit) {
|
||||||
|
this.maxWaitTime = maxWaitTime;
|
||||||
|
this.maxWaitTimeUnit = timeUnit;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getMaxWaitTime() {
|
||||||
|
return maxWaitTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TimeUnit getMaxWaitTimeUnit() {
|
||||||
|
return maxWaitTimeUnit;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setStartRefreshInterval(long seconds) {
|
||||||
|
this.startRefreshInterval = seconds;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getStartRefreshInterval() {
|
||||||
|
return startRefreshInterval;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexDefinition setStopRefreshInterval(long seconds) {
|
||||||
|
this.stopRefreshInterval = seconds;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getStopRefreshInterval() {
|
||||||
|
return stopRefreshInterval;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
|
import org.xbib.elx.api.IndexRetention;
|
||||||
|
|
||||||
|
public class DefaultIndexRetention implements IndexRetention {
|
||||||
|
|
||||||
|
private int delta;
|
||||||
|
|
||||||
|
private int minToKeep;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexRetention setDelta(int delta) {
|
||||||
|
this.delta = delta;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getDelta() {
|
||||||
|
return delta;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexRetention setMinToKeep(int minToKeep) {
|
||||||
|
this.minToKeep = minToKeep;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMinToKeep() {
|
||||||
|
return minToKeep;
|
||||||
|
}
|
||||||
|
}
|
|
@ -6,8 +6,10 @@ import org.elasticsearch.action.update.UpdateRequest;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Mock client, it does not perform actions on a cluster. Useful for testing or dry runs.
|
* Mock client, it does not perform any actions on a cluster. Useful for testing.
|
||||||
*/
|
*/
|
||||||
public class MockExtendedClient extends AbstractExtendedClient {
|
public class MockExtendedClient extends AbstractExtendedClient {
|
||||||
|
|
||||||
|
@ -56,18 +58,13 @@ public class MockExtendedClient extends AbstractExtendedClient {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public MockExtendedClient flushIngest() {
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MockExtendedClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) {
|
public MockExtendedClient startBulk(String index, long startRefreshInterval, long stopRefreshIterval) {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MockExtendedClient stopBulk(String index, String maxWaitTime) {
|
public MockExtendedClient stopBulk(String index, long maxWaitTime, TimeUnit timeUnit) {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,32 +89,37 @@ public class MockExtendedClient extends AbstractExtendedClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean forceMerge(String index, String maxWaitTime) {
|
public boolean forceMerge(String index, long maxWaitTime, TimeUnit timeUnit) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean waitForCluster(String healthColor, String timeValue) {
|
public boolean waitForCluster(String healthColor, long timeValue, TimeUnit timeUnit) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean waitForResponses(String maxWaitTime) {
|
public boolean waitForResponses(long maxWaitTime, TimeUnit timeUnit) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean waitForRecovery(String index, String maxWaitTime) {
|
public boolean waitForRecovery(String index, long maxWaitTime, TimeUnit timeUnit) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MockExtendedClient updateReplicaLevel(String index, int level, String maxWaitTime) {
|
public MockExtendedClient updateReplicaLevel(String index, int level, long maxWaitTime, TimeUnit timeUnit) {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void shutdown() {
|
public void flush() {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
package org.xbib.elx.common;
|
|
||||||
|
|
||||||
import org.xbib.elx.api.BulkControl;
|
|
||||||
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*/
|
|
||||||
public class SimpleBulkControl implements BulkControl {
|
|
||||||
|
|
||||||
private final Set<String> indexNames;
|
|
||||||
|
|
||||||
private final Map<String, Long> startBulkRefreshIntervals;
|
|
||||||
|
|
||||||
private final Map<String, Long> stopBulkRefreshIntervals;
|
|
||||||
|
|
||||||
private String maxWaitTime;
|
|
||||||
|
|
||||||
public SimpleBulkControl() {
|
|
||||||
indexNames = new HashSet<>();
|
|
||||||
startBulkRefreshIntervals = new HashMap<>();
|
|
||||||
stopBulkRefreshIntervals = new HashMap<>();
|
|
||||||
maxWaitTime = "30s";
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void startBulk(String indexName, long startRefreshInterval, long stopRefreshInterval) {
|
|
||||||
indexNames.add(indexName);
|
|
||||||
startBulkRefreshIntervals.put(indexName, startRefreshInterval);
|
|
||||||
stopBulkRefreshIntervals.put(indexName, stopRefreshInterval);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isBulk(String indexName) {
|
|
||||||
return indexNames.contains(indexName);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void finishBulk(String indexName) {
|
|
||||||
indexNames.remove(indexName);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Set<String> indices() {
|
|
||||||
return indexNames;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<String, Long> getStartBulkRefreshIntervals() {
|
|
||||||
return startBulkRefreshIntervals;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<String, Long> getStopBulkRefreshIntervals() {
|
|
||||||
return stopBulkRefreshIntervals;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getMaxWaitTime() {
|
|
||||||
return maxWaitTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1 +1,4 @@
|
||||||
package org.xbib.elx.common.io;
|
/**
|
||||||
|
* I/O helpers for Elasticsearch client extensions.
|
||||||
|
*/
|
||||||
|
package org.xbib.elx.common.io;
|
||||||
|
|
|
@ -1 +1,4 @@
|
||||||
package org.xbib.elx.common.util;
|
/**
|
||||||
|
* Utilities for Elasticsearch client extensions.
|
||||||
|
*/
|
||||||
|
package org.xbib.elx.common.util;
|
||||||
|
|
|
@ -47,10 +47,9 @@ public class ExtendedNodeClient extends AbstractExtendedClient {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void shutdown() throws IOException {
|
public void close() throws IOException {
|
||||||
super.shutdown();
|
super.close();
|
||||||
try {
|
try {
|
||||||
if (node != null) {
|
if (node != null) {
|
||||||
logger.debug("closing node...");
|
logger.debug("closing node...");
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
/**
|
||||||
|
* Node client extensions.
|
||||||
|
*/
|
||||||
|
package org.xbib.elx.node;
|
|
@ -2,7 +2,7 @@ package org.xbib.elx.node;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
@ -27,9 +27,9 @@ import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
public class ExtendedNodeClientTest extends NodeTestUtils {
|
public class ClientTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedNodeClientTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(ClientTest.class.getSimpleName());
|
||||||
|
|
||||||
private static final Long ACTIONS = 25000L;
|
private static final Long ACTIONS = 25000L;
|
||||||
|
|
||||||
|
@ -55,17 +55,17 @@ public class ExtendedNodeClientTest extends NodeTestUtils {
|
||||||
try {
|
try {
|
||||||
client.newIndex("test");
|
client.newIndex("test");
|
||||||
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
|
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.shutdown();
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,11 +76,7 @@ public class ExtendedNodeClientTest extends NodeTestUtils {
|
||||||
.put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5))
|
.put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(5))
|
||||||
.build();
|
.build();
|
||||||
client.newIndex("test");
|
client.newIndex("test");
|
||||||
if (client.hasThrowable()) {
|
client.close();
|
||||||
logger.error("error", client.getThrowable());
|
|
||||||
}
|
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
client.shutdown();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -105,11 +101,7 @@ public class ExtendedNodeClientTest extends NodeTestUtils {
|
||||||
client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
|
client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
|
||||||
logger.info("mappings={}", getMappingsResponse.getMappings());
|
logger.info("mappings={}", getMappingsResponse.getMappings());
|
||||||
assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc"));
|
assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc"));
|
||||||
if (client.hasThrowable()) {
|
client.close();
|
||||||
logger.error("error", client.getThrowable());
|
|
||||||
}
|
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
client.shutdown();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -125,22 +117,22 @@ public class ExtendedNodeClientTest extends NodeTestUtils {
|
||||||
for (int i = 0; i < ACTIONS; i++) {
|
for (int i = 0; i < ACTIONS; i++) {
|
||||||
client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.refreshIndex("test");
|
client.refreshIndex("test");
|
||||||
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
||||||
.setQuery(QueryBuilders.matchAllQuery()).setSize(0);
|
.setQuery(QueryBuilders.matchAllQuery()).setSize(0);
|
||||||
assertEquals(numactions,
|
assertEquals(numactions,
|
||||||
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
|
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
|
||||||
client.shutdown();
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,9 +164,9 @@ public class ExtendedNodeClientTest extends NodeTestUtils {
|
||||||
}
|
}
|
||||||
logger.info("waiting for latch...");
|
logger.info("waiting for latch...");
|
||||||
if (latch.await(5, TimeUnit.MINUTES)) {
|
if (latch.await(5, TimeUnit.MINUTES)) {
|
||||||
logger.info("last flush...");
|
logger.info("flush...");
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("60s");
|
client.waitForResponses(60L, TimeUnit.SECONDS);
|
||||||
logger.info("got all responses, pool shutdown...");
|
logger.info("got all responses, pool shutdown...");
|
||||||
pool.shutdown();
|
pool.shutdown();
|
||||||
logger.info("pool is shut down");
|
logger.info("pool is shut down");
|
||||||
|
@ -184,18 +176,18 @@ public class ExtendedNodeClientTest extends NodeTestUtils {
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
client.stopBulk("test", "30s");
|
client.stopBulk("test", 30L, TimeUnit.SECONDS);
|
||||||
assertEquals(maxthreads * actions, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(maxthreads * actions, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.refreshIndex("test");
|
client.refreshIndex("test");
|
||||||
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
||||||
.setQuery(QueryBuilders.matchAllQuery()).setSize(0);
|
.setQuery(QueryBuilders.matchAllQuery()).setSize(0);
|
||||||
assertEquals(maxthreads * actions,
|
assertEquals(maxthreads * actions,
|
||||||
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
|
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
|
||||||
client.shutdown();
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -4,17 +4,16 @@ import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Ignore;
|
import org.junit.Ignore;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
public class ClusterBlockTest extends NodeTestUtils {
|
||||||
|
|
||||||
@Ignore
|
|
||||||
public class ExtendedNodeClusterBlockTest extends NodeTestUtils {
|
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger("test");
|
private static final Logger logger = LogManager.getLogger("test");
|
||||||
|
|
||||||
|
@ -23,7 +22,6 @@ public class ExtendedNodeClusterBlockTest extends NodeTestUtils {
|
||||||
try {
|
try {
|
||||||
setClusterName();
|
setClusterName();
|
||||||
startNode("1");
|
startNode("1");
|
||||||
findNodeAddress();
|
|
||||||
// do not wait for green health state
|
// do not wait for green health state
|
||||||
logger.info("ready");
|
logger.info("ready");
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
|
@ -41,11 +39,11 @@ public class ExtendedNodeClusterBlockTest extends NodeTestUtils {
|
||||||
|
|
||||||
@Test(expected = ClusterBlockException.class)
|
@Test(expected = ClusterBlockException.class)
|
||||||
public void testClusterBlock() throws Exception {
|
public void testClusterBlock() throws Exception {
|
||||||
BulkRequestBuilder brb = client("1").prepareBulk();
|
Client client = client("1");
|
||||||
XContentBuilder builder = jsonBuilder().startObject().field("field1", "value1").endObject();
|
XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("field1", "value1").endObject();
|
||||||
IndexRequestBuilder irb = client("1").prepareIndex("test", "test", "1").setSource(builder);
|
IndexRequestBuilder irb = client.prepareIndex("test", "test", "1").setSource(builder);
|
||||||
|
BulkRequestBuilder brb = client.prepareBulk();
|
||||||
brb.add(irb);
|
brb.add(irb);
|
||||||
brb.execute().actionGet();
|
brb.execute().actionGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -10,13 +10,14 @@ import org.junit.Test;
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
import org.xbib.elx.common.ClientBuilder;
|
||||||
import org.xbib.elx.common.Parameters;
|
import org.xbib.elx.common.Parameters;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
@Ignore
|
public class DuplicateIDTest extends NodeTestUtils {
|
||||||
public class ExtendeNodeDuplicateIDTest extends NodeTestUtils {
|
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendeNodeDuplicateIDTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(DuplicateIDTest.class.getSimpleName());
|
||||||
|
|
||||||
private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
||||||
|
|
||||||
|
@ -34,8 +35,8 @@ public class ExtendeNodeDuplicateIDTest extends NodeTestUtils {
|
||||||
for (int i = 0; i < ACTIONS; i++) {
|
for (int i = 0; i < ACTIONS; i++) {
|
||||||
client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
client.refreshIndex("test");
|
client.refreshIndex("test");
|
||||||
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
||||||
.setIndices("test")
|
.setIndices("test")
|
||||||
|
@ -47,12 +48,12 @@ public class ExtendeNodeDuplicateIDTest extends NodeTestUtils {
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
client.shutdown();
|
client.close();
|
||||||
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,63 +0,0 @@
|
||||||
package org.xbib.elx.node;
|
|
||||||
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
|
||||||
import org.apache.logging.log4j.Logger;
|
|
||||||
import org.elasticsearch.client.transport.NoNodeAvailableException;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.junit.Ignore;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
|
||||||
|
|
||||||
import java.util.HashMap;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
|
|
||||||
@Ignore
|
|
||||||
public class ExtendedNodeUpdateReplicaLevelTest extends NodeTestUtils {
|
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedNodeUpdateReplicaLevelTest.class.getSimpleName());
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUpdateReplicaLevel() throws Exception {
|
|
||||||
|
|
||||||
long numberOfShards = 2;
|
|
||||||
int replicaLevel = 3;
|
|
||||||
|
|
||||||
// we need 3 nodes for replica level 3
|
|
||||||
startNode("2");
|
|
||||||
startNode("3");
|
|
||||||
|
|
||||||
long shardsAfterReplica;
|
|
||||||
|
|
||||||
Settings settings = Settings.settingsBuilder()
|
|
||||||
.put("index.number_of_shards", numberOfShards)
|
|
||||||
.put("index.number_of_replicas", 0)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
|
|
||||||
.provider(ExtendedNodeClientProvider.class)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
try {
|
|
||||||
client.newIndex("replicatest", settings, new HashMap<>());
|
|
||||||
client.waitForCluster("GREEN", "30s");
|
|
||||||
for (int i = 0; i < 12345; i++) {
|
|
||||||
client.index("replicatest",null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
|
||||||
}
|
|
||||||
client.flushIngest();
|
|
||||||
client.waitForResponses("30s");
|
|
||||||
client.updateReplicaLevel("replicatest", replicaLevel, "30s");
|
|
||||||
//assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1));
|
|
||||||
} catch (NoNodeAvailableException e) {
|
|
||||||
logger.warn("skipping, no node available");
|
|
||||||
} finally {
|
|
||||||
client.shutdown();
|
|
||||||
if (client.hasThrowable()) {
|
|
||||||
logger.error("error", client.getThrowable());
|
|
||||||
}
|
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -11,16 +11,18 @@ import org.xbib.elx.common.ClientBuilder;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
@Ignore
|
@Ignore
|
||||||
public class ExtendedNodeIndexAliasTest extends NodeTestUtils {
|
public class IndexShiftTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedNodeIndexAliasTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(IndexShiftTest.class.getSimpleName());
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIndexAlias() throws Exception {
|
public void testIndexShift() throws Exception {
|
||||||
final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
|
final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
|
||||||
.provider(ExtendedNodeClientProvider.class)
|
.provider(ExtendedNodeClientProvider.class)
|
||||||
.build();
|
.build();
|
||||||
|
@ -29,37 +31,47 @@ public class ExtendedNodeIndexAliasTest extends NodeTestUtils {
|
||||||
for (int i = 0; i < 1; i++) {
|
for (int i = 0; i < 1; i++) {
|
||||||
client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.refreshIndex("test1234");
|
client.refreshIndex("test1234");
|
||||||
|
|
||||||
List<String> simpleAliases = Arrays.asList("a", "b", "c");
|
List<String> simpleAliases = Arrays.asList("a", "b", "c");
|
||||||
client.switchIndex("test", "test1234", simpleAliases);
|
client.shiftIndex("test", "test1234", simpleAliases);
|
||||||
|
|
||||||
client.newIndex("test5678");
|
client.newIndex("test5678");
|
||||||
for (int i = 0; i < 1; i++) {
|
for (int i = 0; i < 1; i++) {
|
||||||
client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.refreshIndex("test5678");
|
client.refreshIndex("test5678");
|
||||||
|
|
||||||
simpleAliases = Arrays.asList("d", "e", "f");
|
simpleAliases = Arrays.asList("d", "e", "f");
|
||||||
client.switchIndex("test", "test5678", simpleAliases, (builder, index, alias) ->
|
client.shiftIndex("test", "test5678", simpleAliases, (builder, index, alias) ->
|
||||||
builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)));
|
builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)));
|
||||||
Map<String, String> aliases = client.getIndexFilters("test5678");
|
Map<String, String> indexFilters = client.getIndexFilters("test5678");
|
||||||
logger.info("aliases of index test5678 = {}", aliases);
|
logger.info("aliases of index test5678 = {}", indexFilters);
|
||||||
|
assertTrue(indexFilters.containsKey("a"));
|
||||||
|
assertTrue(indexFilters.containsKey("b"));
|
||||||
|
assertTrue(indexFilters.containsKey("c"));
|
||||||
|
assertTrue(indexFilters.containsKey("d"));
|
||||||
|
assertTrue(indexFilters.containsKey("e"));
|
||||||
|
|
||||||
aliases = client.getAliasFilters("test");
|
Map<String, String> aliases = client.getIndexFilters(client.resolveAlias("test"));
|
||||||
logger.info("aliases of alias test = {}", aliases);
|
logger.info("aliases of alias test = {}", aliases);
|
||||||
|
assertTrue(aliases.containsKey("a"));
|
||||||
|
assertTrue(aliases.containsKey("b"));
|
||||||
|
assertTrue(aliases.containsKey("c"));
|
||||||
|
assertTrue(aliases.containsKey("d"));
|
||||||
|
assertTrue(aliases.containsKey("e"));
|
||||||
|
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
client.shutdown();
|
client.close();
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -63,9 +63,7 @@ public class NodeTestUtils {
|
||||||
Files.delete(dir);
|
Files.delete(dir);
|
||||||
return FileVisitResult.CONTINUE;
|
return FileVisitResult.CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
@ -74,7 +72,6 @@ public class NodeTestUtils {
|
||||||
logger.info("starting");
|
logger.info("starting");
|
||||||
setClusterName();
|
setClusterName();
|
||||||
startNode("1");
|
startNode("1");
|
||||||
findNodeAddress();
|
|
||||||
try {
|
try {
|
||||||
ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE,
|
ClusterHealthResponse healthResponse = client("1").execute(ClusterHealthAction.INSTANCE,
|
||||||
new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN)
|
new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN)
|
||||||
|
@ -160,18 +157,6 @@ public class NodeTestUtils {
|
||||||
logger.info("all nodes closed");
|
logger.info("all nodes closed");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void findNodeAddress() {
|
|
||||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().transport(true);
|
|
||||||
NodesInfoResponse response = client("1").admin().cluster().nodesInfo(nodesInfoRequest).actionGet();
|
|
||||||
Object obj = response.iterator().next().getTransport().getAddress()
|
|
||||||
.publishAddress();
|
|
||||||
if (obj instanceof InetSocketTransportAddress) {
|
|
||||||
InetSocketTransportAddress address = (InetSocketTransportAddress) obj;
|
|
||||||
String host = address.address().getHostName();
|
|
||||||
int port = address.address().getPort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private Node buildNode(String id) {
|
private Node buildNode(String id) {
|
||||||
Settings nodeSettings = settingsBuilder()
|
Settings nodeSettings = settingsBuilder()
|
||||||
.put(getNodeSettings())
|
.put(getNodeSettings())
|
||||||
|
|
|
@ -19,15 +19,16 @@ import org.xbib.elx.common.ClientBuilder;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
|
|
||||||
@Ignore
|
@Ignore
|
||||||
public class ExtendedNodeReplicaTest extends NodeTestUtils {
|
public class ReplicaTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedNodeReplicaTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(ReplicaTest.class.getSimpleName());
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReplicaLevel() throws Exception {
|
public void testReplicaLevel() throws Exception {
|
||||||
|
@ -54,15 +55,15 @@ public class ExtendedNodeReplicaTest extends NodeTestUtils {
|
||||||
try {
|
try {
|
||||||
client.newIndex("test1", settingsTest1, new HashMap<>())
|
client.newIndex("test1", settingsTest1, new HashMap<>())
|
||||||
.newIndex("test2", settingsTest2, new HashMap<>());
|
.newIndex("test2", settingsTest2, new HashMap<>());
|
||||||
client.waitForCluster("GREEN", "30s");
|
client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
|
||||||
for (int i = 0; i < 1234; i++) {
|
for (int i = 0; i < 1234; i++) {
|
||||||
client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 1234; i++) {
|
for (int i = 0; i < 1234; i++) {
|
||||||
client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -97,11 +98,51 @@ public class ExtendedNodeReplicaTest extends NodeTestUtils {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("delete index failed, ignored. Reason:", e);
|
logger.error("delete index failed, ignored. Reason:", e);
|
||||||
}
|
}
|
||||||
client.shutdown();
|
client.close();
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUpdateReplicaLevel() throws Exception {
|
||||||
|
|
||||||
|
long numberOfShards = 2;
|
||||||
|
int replicaLevel = 3;
|
||||||
|
|
||||||
|
// we need 3 nodes for replica level 3
|
||||||
|
startNode("2");
|
||||||
|
startNode("3");
|
||||||
|
|
||||||
|
Settings settings = Settings.settingsBuilder()
|
||||||
|
.put("index.number_of_shards", numberOfShards)
|
||||||
|
.put("index.number_of_replicas", 0)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
final ExtendedNodeClient client = ClientBuilder.builder(client("1"))
|
||||||
|
.provider(ExtendedNodeClientProvider.class)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
try {
|
||||||
|
client.newIndex("replicatest", settings, new HashMap<>());
|
||||||
|
client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
|
||||||
|
for (int i = 0; i < 12345; i++) {
|
||||||
|
client.index("replicatest",null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
|
}
|
||||||
|
client.flush();
|
||||||
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
|
client.updateReplicaLevel("replicatest", replicaLevel, 30L, TimeUnit.SECONDS);
|
||||||
|
assertEquals(replicaLevel, client.getReplicaLevel("replicatest"));
|
||||||
|
} catch (NoNodeAvailableException e) {
|
||||||
|
logger.warn("skipping, no node available");
|
||||||
|
} finally {
|
||||||
|
client.close();
|
||||||
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
|
}
|
||||||
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,16 +6,16 @@ import org.elasticsearch.client.transport.NoNodeAvailableException;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
import org.xbib.elx.common.ClientBuilder;
|
||||||
import org.xbib.elx.common.SimpleBulkControl;
|
|
||||||
import org.xbib.elx.common.SimpleBulkMetric;
|
|
||||||
import org.xbib.elx.api.IndexDefinition;
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
|
|
||||||
public class ExtendedNodeSmokeTest extends NodeTestUtils {
|
public class SmokeTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedNodeSmokeTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(SmokeTest.class.getSimpleName());
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void smokeTest() throws Exception {
|
public void smokeTest() throws Exception {
|
||||||
|
@ -23,21 +23,19 @@ public class ExtendedNodeSmokeTest extends NodeTestUtils {
|
||||||
.provider(ExtendedNodeClientProvider.class)
|
.provider(ExtendedNodeClientProvider.class)
|
||||||
.build();
|
.build();
|
||||||
try {
|
try {
|
||||||
client.setBulkControl(new SimpleBulkControl());
|
|
||||||
client.setBulkMetric(new SimpleBulkMetric());
|
|
||||||
client.newIndex("test");
|
client.newIndex("test");
|
||||||
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
|
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30, TimeUnit.SECONDS);
|
||||||
|
|
||||||
assertEquals(clusterName, client.getClusterName());
|
assertEquals(clusterName, client.getClusterName());
|
||||||
|
|
||||||
client.checkMapping("test");
|
client.checkMapping("test");
|
||||||
|
|
||||||
client.update("test", "1", "{ \"name\" : \"Another name\"}");
|
client.update("test", "1", "{ \"name\" : \"Another name\"}");
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
|
|
||||||
client.waitForRecovery("test", "10s");
|
client.waitForRecovery("test", 10L, TimeUnit.SECONDS);
|
||||||
|
|
||||||
client.delete("test", "1");
|
client.delete("test", "1");
|
||||||
client.deleteIndex("test");
|
client.deleteIndex("test");
|
||||||
|
@ -47,7 +45,7 @@ public class ExtendedNodeSmokeTest extends NodeTestUtils {
|
||||||
assertEquals(0, indexDefinition.getReplicaLevel());
|
assertEquals(0, indexDefinition.getReplicaLevel());
|
||||||
client.newIndex(indexDefinition);
|
client.newIndex(indexDefinition);
|
||||||
client.index(indexDefinition.getFullIndexName(), "1", true, "{ \"name\" : \"Hello World\"}");
|
client.index(indexDefinition.getFullIndexName(), "1", true, "{ \"name\" : \"Hello World\"}");
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.updateReplicaLevel(indexDefinition, 2);
|
client.updateReplicaLevel(indexDefinition, 2);
|
||||||
|
|
||||||
int replica = client.getReplicaLevel(indexDefinition);
|
int replica = client.getReplicaLevel(indexDefinition);
|
||||||
|
@ -59,11 +57,11 @@ public class ExtendedNodeSmokeTest extends NodeTestUtils {
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
if (client.hasThrowable()) {
|
client.close();
|
||||||
logger.error("error", client.getThrowable());
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.shutdown();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -74,15 +74,15 @@ public class ExtendedTransportClient extends AbstractExtendedClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void shutdown() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
super.shutdown();
|
super.close();
|
||||||
logger.info("shutting down...");
|
logger.info("closing");
|
||||||
if (getClient() != null) {
|
if (getClient() != null) {
|
||||||
TransportClient client = (TransportClient) getClient();
|
TransportClient client = (TransportClient) getClient();
|
||||||
client.close();
|
client.close();
|
||||||
client.threadPool().shutdown();
|
client.threadPool().shutdown();
|
||||||
}
|
}
|
||||||
logger.info("shutting down completed");
|
logger.info("close completed");
|
||||||
}
|
}
|
||||||
|
|
||||||
private Collection<InetSocketTransportAddress> findAddresses(Settings settings) throws IOException {
|
private Collection<InetSocketTransportAddress> findAddresses(Settings settings) throws IOException {
|
||||||
|
|
|
@ -308,7 +308,9 @@ public class TransportClient extends AbstractClient {
|
||||||
transportService.connectToNode(node);
|
transportService.connectToNode(node);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
it.remove();
|
it.remove();
|
||||||
logger.debug("failed to connect to discovered node [" + node + "]", e);
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("failed to connect to discovered node [" + node + "]", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,12 +25,12 @@ import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
public class ExtendedTransportClientTest extends NodeTestUtils {
|
public class ClientTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedTransportClientTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(ClientTest.class.getSimpleName());
|
||||||
|
|
||||||
private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
||||||
|
|
||||||
|
@ -54,10 +54,6 @@ public class ExtendedTransportClientTest extends NodeTestUtils {
|
||||||
.put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60))
|
.put(Parameters.FLUSH_INTERVAL.name(), TimeValue.timeValueSeconds(60))
|
||||||
.build();
|
.build();
|
||||||
client.newIndex("test");
|
client.newIndex("test");
|
||||||
if (client.hasThrowable()) {
|
|
||||||
logger.error("error", client.getThrowable());
|
|
||||||
}
|
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
try {
|
try {
|
||||||
client.deleteIndex("test")
|
client.deleteIndex("test")
|
||||||
.newIndex("test")
|
.newIndex("test")
|
||||||
|
@ -65,11 +61,7 @@ public class ExtendedTransportClientTest extends NodeTestUtils {
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.error("no node available");
|
logger.error("no node available");
|
||||||
} finally {
|
} finally {
|
||||||
if (client.hasThrowable()) {
|
client.close();
|
||||||
logger.error("error", client.getThrowable());
|
|
||||||
}
|
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
client.shutdown();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,17 +76,17 @@ public class ExtendedTransportClientTest extends NodeTestUtils {
|
||||||
try {
|
try {
|
||||||
client.newIndex("test");
|
client.newIndex("test");
|
||||||
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}");
|
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}");
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.shutdown();
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,11 +113,7 @@ public class ExtendedTransportClientTest extends NodeTestUtils {
|
||||||
client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
|
client.getClient().execute(GetMappingsAction.INSTANCE, getMappingsRequest).actionGet();
|
||||||
logger.info("mappings={}", getMappingsResponse.getMappings());
|
logger.info("mappings={}", getMappingsResponse.getMappings());
|
||||||
assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc"));
|
assertTrue(getMappingsResponse.getMappings().get("test").containsKey("doc"));
|
||||||
if (client.hasThrowable()) {
|
client.close();
|
||||||
logger.error("error", client.getThrowable());
|
|
||||||
}
|
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
client.shutdown();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -142,17 +130,17 @@ public class ExtendedTransportClientTest extends NodeTestUtils {
|
||||||
for (int i = 0; i < ACTIONS; i++) {
|
for (int i = 0; i < ACTIONS; i++) {
|
||||||
client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.shutdown();
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,21 +178,21 @@ public class ExtendedTransportClientTest extends NodeTestUtils {
|
||||||
logger.info("waiting for latch...");
|
logger.info("waiting for latch...");
|
||||||
if (latch.await(60, TimeUnit.SECONDS)) {
|
if (latch.await(60, TimeUnit.SECONDS)) {
|
||||||
logger.info("flush ...");
|
logger.info("flush ...");
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
logger.info("pool to be shut down ...");
|
logger.info("pool to be shut down ...");
|
||||||
pool.shutdown();
|
pool.shutdown();
|
||||||
logger.info("poot shut down");
|
logger.info("poot shut down");
|
||||||
}
|
}
|
||||||
client.stopBulk("test", "30s");
|
client.stopBulk("test", 30L, TimeUnit.SECONDS);
|
||||||
assertEquals(maxthreads * maxloop, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(maxthreads * maxloop, client.getBulkMetric().getSucceeded().getCount());
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
// extra search lookup
|
// extra search lookup
|
||||||
client.refreshIndex("test");
|
client.refreshIndex("test");
|
||||||
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
||||||
|
@ -214,7 +202,7 @@ public class ExtendedTransportClientTest extends NodeTestUtils {
|
||||||
.setSize(0);
|
.setSize(0);
|
||||||
assertEquals(maxthreads * maxloop,
|
assertEquals(maxthreads * maxloop,
|
||||||
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
|
searchRequestBuilder.execute().actionGet().getHits().getTotalHits());
|
||||||
client.shutdown();
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -9,12 +9,16 @@ import org.junit.Test;
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
import org.xbib.elx.common.ClientBuilder;
|
||||||
import org.xbib.elx.common.Parameters;
|
import org.xbib.elx.common.Parameters;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertNull;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
public class ExtendedTransportDuplicateIDTest extends NodeTestUtils {
|
public class DuplicateIDTest extends NodeTestUtils {
|
||||||
|
|
||||||
private final static Logger logger = LogManager.getLogger(ExtendedTransportDuplicateIDTest.class.getSimpleName());
|
private final static Logger logger = LogManager.getLogger(DuplicateIDTest.class.getSimpleName());
|
||||||
|
|
||||||
private final static Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
private final static Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
||||||
|
|
||||||
|
@ -33,8 +37,8 @@ public class ExtendedTransportDuplicateIDTest extends NodeTestUtils {
|
||||||
for (int i = 0; i < ACTIONS; i++) {
|
for (int i = 0; i < ACTIONS; i++) {
|
||||||
client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
client.refreshIndex("test");
|
client.refreshIndex("test");
|
||||||
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.getClient(), SearchAction.INSTANCE)
|
||||||
.setIndices("test")
|
.setIndices("test")
|
||||||
|
@ -46,12 +50,12 @@ public class ExtendedTransportDuplicateIDTest extends NodeTestUtils {
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
client.shutdown();
|
client.close();
|
||||||
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(numactions, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,61 +0,0 @@
|
||||||
package org.xbib.elx.transport;
|
|
||||||
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
|
||||||
import org.apache.logging.log4j.Logger;
|
|
||||||
import org.elasticsearch.client.transport.NoNodeAvailableException;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
|
||||||
|
|
||||||
import java.util.HashMap;
|
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
|
|
||||||
public class ExtendedTransportUpdateReplicaLevelTest extends NodeTestUtils {
|
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedTransportUpdateReplicaLevelTest.class.getSimpleName());
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testUpdateReplicaLevel() throws Exception {
|
|
||||||
|
|
||||||
long numberOfShards = 2;
|
|
||||||
int replicaLevel = 3;
|
|
||||||
|
|
||||||
// we need 3 nodes for replica level 3
|
|
||||||
startNode("2");
|
|
||||||
startNode("3");
|
|
||||||
|
|
||||||
int shardsAfterReplica;
|
|
||||||
|
|
||||||
final ExtendedTransportClient client = ClientBuilder.builder()
|
|
||||||
.provider(ExtendedTransportClientProvider.class)
|
|
||||||
.put(getSettings())
|
|
||||||
.build();
|
|
||||||
|
|
||||||
Settings settings = Settings.settingsBuilder()
|
|
||||||
.put("index.number_of_shards", numberOfShards)
|
|
||||||
.put("index.number_of_replicas", 0)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
try {
|
|
||||||
client.newIndex("replicatest", settings, new HashMap<>());
|
|
||||||
client.waitForCluster("GREEN", "30s");
|
|
||||||
for (int i = 0; i < 12345; i++) {
|
|
||||||
client.index("replicatest", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
|
||||||
}
|
|
||||||
client.flushIngest();
|
|
||||||
client.waitForResponses("30s");
|
|
||||||
client.updateReplicaLevel("replicatest", replicaLevel, "30s");
|
|
||||||
//assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1));
|
|
||||||
} catch (NoNodeAvailableException e) {
|
|
||||||
logger.warn("skipping, no node available");
|
|
||||||
} finally {
|
|
||||||
client.shutdown();
|
|
||||||
if (client.hasThrowable()) {
|
|
||||||
logger.error("error", client.getThrowable());
|
|
||||||
}
|
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -10,13 +10,14 @@ import org.xbib.elx.common.ClientBuilder;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
public class ExtendedTransportIndexAliasTest extends NodeTestUtils {
|
public class IndexShiftTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedTransportIndexAliasTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(IndexShiftTest.class.getSimpleName());
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIndexAlias() throws Exception {
|
public void testIndexAlias() throws Exception {
|
||||||
|
@ -28,21 +29,21 @@ public class ExtendedTransportIndexAliasTest extends NodeTestUtils {
|
||||||
for (int i = 0; i < 1; i++) {
|
for (int i = 0; i < 1; i++) {
|
||||||
client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test1234", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.refreshIndex("test1234");
|
client.refreshIndex("test1234");
|
||||||
|
|
||||||
List<String> simpleAliases = Arrays.asList("a", "b", "c");
|
List<String> simpleAliases = Arrays.asList("a", "b", "c");
|
||||||
client.switchIndex("test", "test1234", simpleAliases);
|
client.shiftIndex("test", "test1234", simpleAliases);
|
||||||
|
|
||||||
client.newIndex("test5678");
|
client.newIndex("test5678");
|
||||||
for (int i = 0; i < 1; i++) {
|
for (int i = 0; i < 1; i++) {
|
||||||
client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test5678", randomString(1), false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.refreshIndex("test5678");
|
client.refreshIndex("test5678");
|
||||||
|
|
||||||
simpleAliases = Arrays.asList("d", "e", "f");
|
simpleAliases = Arrays.asList("d", "e", "f");
|
||||||
client.switchIndex("test", "test5678", simpleAliases, (builder, index, alias) ->
|
client.shiftIndex("test", "test5678", simpleAliases, (builder, index, alias) ->
|
||||||
builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)));
|
builder.addAlias(index, alias, QueryBuilders.termQuery("my_key", alias)));
|
||||||
Map<String, String> indexFilters = client.getIndexFilters("test5678");
|
Map<String, String> indexFilters = client.getIndexFilters("test5678");
|
||||||
logger.info("index filters of index test5678 = {}", indexFilters);
|
logger.info("index filters of index test5678 = {}", indexFilters);
|
||||||
|
@ -52,7 +53,7 @@ public class ExtendedTransportIndexAliasTest extends NodeTestUtils {
|
||||||
assertTrue(indexFilters.containsKey("d"));
|
assertTrue(indexFilters.containsKey("d"));
|
||||||
assertTrue(indexFilters.containsKey("e"));
|
assertTrue(indexFilters.containsKey("e"));
|
||||||
|
|
||||||
Map<String, String> aliases = client.getAliasFilters("test");
|
Map<String, String> aliases = client.getIndexFilters(client.resolveAlias("test"));
|
||||||
logger.info("aliases of alias test = {}", aliases);
|
logger.info("aliases of alias test = {}", aliases);
|
||||||
assertTrue(aliases.containsKey("a"));
|
assertTrue(aliases.containsKey("a"));
|
||||||
assertTrue(aliases.containsKey("b"));
|
assertTrue(aliases.containsKey("b"));
|
||||||
|
@ -60,15 +61,15 @@ public class ExtendedTransportIndexAliasTest extends NodeTestUtils {
|
||||||
assertTrue(aliases.containsKey("d"));
|
assertTrue(aliases.containsKey("d"));
|
||||||
assertTrue(aliases.containsKey("e"));
|
assertTrue(aliases.containsKey("e"));
|
||||||
|
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
assertFalse(client.hasThrowable());
|
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
client.shutdown();
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -18,14 +18,15 @@ import org.xbib.elx.common.ClientBuilder;
|
||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
|
|
||||||
public class ExtendedTransportReplicaTest extends NodeTestUtils {
|
public class ReplicaTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedTransportReplicaTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(ReplicaTest.class.getSimpleName());
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReplicaLevel() throws Exception {
|
public void testReplicaLevel() throws Exception {
|
||||||
|
@ -53,15 +54,15 @@ public class ExtendedTransportReplicaTest extends NodeTestUtils {
|
||||||
try {
|
try {
|
||||||
client.newIndex("test1", settingsTest1, new HashMap<>())
|
client.newIndex("test1", settingsTest1, new HashMap<>())
|
||||||
.newIndex("test2", settingsTest2, new HashMap<>());
|
.newIndex("test2", settingsTest2, new HashMap<>());
|
||||||
client.waitForCluster("GREEN", "30s");
|
client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
|
||||||
for (int i = 0; i < 1234; i++) {
|
for (int i = 0; i < 1234; i++) {
|
||||||
client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test1", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
for (int i = 0; i < 1234; i++) {
|
for (int i = 0; i < 1234; i++) {
|
||||||
client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
client.index("test2", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
}
|
}
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
client.refreshIndex("test1");
|
client.refreshIndex("test1");
|
||||||
client.refreshIndex("test2");
|
client.refreshIndex("test2");
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
|
@ -96,12 +97,54 @@ public class ExtendedTransportReplicaTest extends NodeTestUtils {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("delete index failed, ignored. Reason:", e);
|
logger.error("delete index failed, ignored. Reason:", e);
|
||||||
}
|
}
|
||||||
if (client.hasThrowable()) {
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
logger.error("error", client.getThrowable());
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.shutdown();
|
client.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUpdateReplicaLevel() throws Exception {
|
||||||
|
|
||||||
|
long numberOfShards = 2;
|
||||||
|
int replicaLevel = 3;
|
||||||
|
|
||||||
|
// we need 3 nodes for replica level 3
|
||||||
|
startNode("2");
|
||||||
|
startNode("3");
|
||||||
|
|
||||||
|
int shardsAfterReplica;
|
||||||
|
|
||||||
|
final ExtendedTransportClient client = ClientBuilder.builder()
|
||||||
|
.provider(ExtendedTransportClientProvider.class)
|
||||||
|
.put(getSettings())
|
||||||
|
.build();
|
||||||
|
|
||||||
|
Settings settings = Settings.settingsBuilder()
|
||||||
|
.put("index.number_of_shards", numberOfShards)
|
||||||
|
.put("index.number_of_replicas", 0)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
try {
|
||||||
|
client.newIndex("replicatest", settings, new HashMap<>());
|
||||||
|
client.waitForCluster("GREEN", 30L, TimeUnit.SECONDS);
|
||||||
|
for (int i = 0; i < 12345; i++) {
|
||||||
|
client.index("replicatest", null, false, "{ \"name\" : \"" + randomString(32) + "\"}");
|
||||||
|
}
|
||||||
|
client.flush();
|
||||||
|
client.waitForResponses(30L, TimeUnit.SECONDS);
|
||||||
|
client.updateReplicaLevel("replicatest", replicaLevel, 30L, TimeUnit.SECONDS);
|
||||||
|
assertEquals(replicaLevel, client.getReplicaLevel("replicatest"));
|
||||||
|
} catch (NoNodeAvailableException e) {
|
||||||
|
logger.warn("skipping, no node available");
|
||||||
|
} finally {
|
||||||
|
client.close();
|
||||||
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
|
}
|
||||||
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -6,12 +6,14 @@ import org.elasticsearch.client.transport.NoNodeAvailableException;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
import org.xbib.elx.common.ClientBuilder;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertNull;
|
||||||
|
|
||||||
public class ExtendedTransportClientSingleNodeTest extends NodeTestUtils {
|
public class SmokeTest extends NodeTestUtils {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(ExtendedTransportClientSingleNodeTest.class.getSimpleName());
|
private static final Logger logger = LogManager.getLogger(SmokeTest.class.getSimpleName());
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSingleDocNodeClient() throws Exception {
|
public void testSingleDocNodeClient() throws Exception {
|
||||||
|
@ -22,17 +24,17 @@ public class ExtendedTransportClientSingleNodeTest extends NodeTestUtils {
|
||||||
try {
|
try {
|
||||||
client.newIndex("test");
|
client.newIndex("test");
|
||||||
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
|
client.index("test", "1", true, "{ \"name\" : \"Hello World\"}"); // single doc ingest
|
||||||
client.flushIngest();
|
client.flush();
|
||||||
client.waitForResponses("30s");
|
client.waitForResponses(30, TimeUnit.SECONDS);
|
||||||
} catch (NoNodeAvailableException e) {
|
} catch (NoNodeAvailableException e) {
|
||||||
logger.warn("skipping, no node available");
|
logger.warn("skipping, no node available");
|
||||||
} finally {
|
} finally {
|
||||||
assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
|
assertEquals(1, client.getBulkMetric().getSucceeded().getCount());
|
||||||
if (client.hasThrowable()) {
|
client.close();
|
||||||
logger.error("error", client.getThrowable());
|
if (client.getBulkController().getLastBulkError() != null) {
|
||||||
|
logger.error("error", client.getBulkController().getLastBulkError());
|
||||||
}
|
}
|
||||||
assertFalse(client.hasThrowable());
|
assertNull(client.getBulkController().getLastBulkError());
|
||||||
client.shutdown();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,6 +1,6 @@
|
||||||
group = org.xbib
|
group = org.xbib
|
||||||
name = elx
|
name = elx
|
||||||
version = 2.2.1.4
|
version = 2.2.1.5
|
||||||
|
|
||||||
xbib-metrics.version = 1.1.0
|
xbib-metrics.version = 1.1.0
|
||||||
xbib-guice.version = 4.0.4
|
xbib-guice.version = 4.0.4
|
||||||
|
|
Loading…
Reference in a new issue