drop bulk controller, merge bulk handlers, experimenting with dynamic bulk volume adaptation
This commit is contained in:
parent
6279a79ec6
commit
6731ad986b
19 changed files with 376 additions and 708 deletions
|
@ -10,12 +10,6 @@ import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
public interface BulkClient extends BasicClient, Flushable {
|
public interface BulkClient extends BasicClient, Flushable {
|
||||||
|
|
||||||
/**
|
|
||||||
* Get bulk control.
|
|
||||||
* @return the bulk control
|
|
||||||
*/
|
|
||||||
BulkController getBulkController();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new index.
|
* Create a new index.
|
||||||
* @param indexDefinition the index definition
|
* @param indexDefinition the index definition
|
||||||
|
@ -154,4 +148,6 @@ public interface BulkClient extends BasicClient, Flushable {
|
||||||
* @param indexDefinition index definition
|
* @param indexDefinition index definition
|
||||||
*/
|
*/
|
||||||
void flushIndex(IndexDefinition indexDefinition);
|
void flushIndex(IndexDefinition indexDefinition);
|
||||||
|
|
||||||
|
BulkProcessor getBulkController();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
package org.xbib.elx.api;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
|
||||||
import org.elasticsearch.action.update.UpdateRequest;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
|
|
||||||
import java.io.Closeable;
|
|
||||||
import java.io.Flushable;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
public interface BulkController extends Closeable, Flushable {
|
|
||||||
|
|
||||||
void init(Settings settings);
|
|
||||||
|
|
||||||
void inactivate();
|
|
||||||
|
|
||||||
BulkProcessor getBulkProcessor();
|
|
||||||
|
|
||||||
BulkMetric getBulkMetric();
|
|
||||||
|
|
||||||
Throwable getLastBulkError();
|
|
||||||
|
|
||||||
void startBulkMode(IndexDefinition indexDefinition) throws IOException;
|
|
||||||
|
|
||||||
void bulkIndex(IndexRequest indexRequest);
|
|
||||||
|
|
||||||
void bulkDelete(DeleteRequest deleteRequest);
|
|
||||||
|
|
||||||
void bulkUpdate(UpdateRequest updateRequest);
|
|
||||||
|
|
||||||
boolean waitForBulkResponses(long timeout, TimeUnit timeUnit);
|
|
||||||
|
|
||||||
void stopBulkMode(IndexDefinition indexDefinition) throws IOException;
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,5 +1,7 @@
|
||||||
package org.xbib.elx.api;
|
package org.xbib.elx.api;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
import org.xbib.metrics.api.Count;
|
import org.xbib.metrics.api.Count;
|
||||||
import org.xbib.metrics.api.Metered;
|
import org.xbib.metrics.api.Metered;
|
||||||
|
|
||||||
|
@ -28,4 +30,6 @@ public interface BulkMetric extends Closeable {
|
||||||
void start();
|
void start();
|
||||||
|
|
||||||
void stop();
|
void stop();
|
||||||
|
|
||||||
|
void recalculate(BulkRequest request, BulkResponse response);
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,17 +9,25 @@ import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
public interface BulkProcessor extends Closeable, Flushable {
|
public interface BulkProcessor extends Closeable, Flushable {
|
||||||
|
|
||||||
void setBulkActions(int bulkActions);
|
void setEnabled(boolean enabled);
|
||||||
|
|
||||||
int getBulkActions();
|
void startBulkMode(IndexDefinition indexDefinition) throws IOException;
|
||||||
|
|
||||||
void setBulkSize(long bulkSize);
|
void stopBulkMode(IndexDefinition indexDefinition) throws IOException;
|
||||||
|
|
||||||
long getBulkSize();
|
|
||||||
|
|
||||||
BulkRequestHandler getBulkRequestHandler();
|
|
||||||
|
|
||||||
void add(ActionRequest<?> request);
|
void add(ActionRequest<?> request);
|
||||||
|
|
||||||
boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException, IOException;
|
boolean waitForBulkResponses(long timeout, TimeUnit unit);
|
||||||
|
|
||||||
|
BulkMetric getBulkMetric();
|
||||||
|
|
||||||
|
Throwable getLastBulkError();
|
||||||
|
|
||||||
|
void setMaxBulkActions(int bulkActions);
|
||||||
|
|
||||||
|
int getMaxBulkActions();
|
||||||
|
|
||||||
|
void setMaxBulkVolume(long bulkSize);
|
||||||
|
|
||||||
|
long getMaxBulkVolume();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
package org.xbib.elx.api;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
public interface BulkRequestHandler {
|
|
||||||
|
|
||||||
void execute(BulkRequest bulkRequest);
|
|
||||||
|
|
||||||
boolean flush(long timeout, TimeUnit unit) throws IOException, InterruptedException;
|
|
||||||
|
|
||||||
int getPermits();
|
|
||||||
|
|
||||||
void increase();
|
|
||||||
|
|
||||||
void reduce();
|
|
||||||
}
|
|
|
@ -45,7 +45,6 @@ public abstract class AbstractBasicClient implements BasicClient {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setClient(ElasticsearchClient client) {
|
public void setClient(ElasticsearchClient client) {
|
||||||
logger.log(Level.DEBUG, "setting client = " + client);
|
|
||||||
this.client = client;
|
this.client = client;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
package org.xbib.elx.common;
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
import com.google.common.base.Charsets;
|
|
||||||
import org.apache.logging.log4j.Level;
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
|
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
|
||||||
|
@ -20,7 +18,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
import org.xbib.elx.api.BulkClient;
|
import org.xbib.elx.api.BulkClient;
|
||||||
import org.xbib.elx.api.BulkController;
|
import org.xbib.elx.api.BulkProcessor;
|
||||||
import org.xbib.elx.api.IndexDefinition;
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
|
@ -32,7 +30,7 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(AbstractBulkClient.class.getName());
|
private static final Logger logger = LogManager.getLogger(AbstractBulkClient.class.getName());
|
||||||
|
|
||||||
private BulkController bulkController;
|
private BulkProcessor bulkProcessor;
|
||||||
|
|
||||||
private final AtomicBoolean closed = new AtomicBoolean(true);
|
private final AtomicBoolean closed = new AtomicBoolean(true);
|
||||||
|
|
||||||
|
@ -40,21 +38,19 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
public void init(Settings settings) throws IOException {
|
public void init(Settings settings) throws IOException {
|
||||||
if (closed.compareAndSet(true, false)) {
|
if (closed.compareAndSet(true, false)) {
|
||||||
super.init(settings);
|
super.init(settings);
|
||||||
bulkController = new DefaultBulkController(this);
|
bulkProcessor = new DefaultBulkProcessor(this, settings);
|
||||||
logger.log(Level.INFO, "initializing bulk controller with settings = " + settings.toDelimitedString(','));
|
|
||||||
bulkController.init(settings);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BulkController getBulkController() {
|
public BulkProcessor getBulkController() {
|
||||||
return bulkController;
|
return bulkProcessor;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void flush() throws IOException {
|
public void flush() throws IOException {
|
||||||
if (bulkController != null) {
|
if (bulkProcessor != null) {
|
||||||
bulkController.flush();
|
bulkProcessor.flush();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,9 +58,9 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
if (closed.compareAndSet(false, true)) {
|
if (closed.compareAndSet(false, true)) {
|
||||||
ensureClientIsPresent();
|
ensureClientIsPresent();
|
||||||
if (bulkController != null) {
|
if (bulkProcessor != null) {
|
||||||
logger.info("closing bulk controller");
|
logger.info("closing bulk");
|
||||||
bulkController.close();
|
bulkProcessor.close();
|
||||||
}
|
}
|
||||||
closeClient(settings);
|
closeClient(settings);
|
||||||
}
|
}
|
||||||
|
@ -125,7 +121,7 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ensureClientIsPresent();
|
ensureClientIsPresent();
|
||||||
bulkController.startBulkMode(indexDefinition);
|
bulkProcessor.startBulkMode(indexDefinition);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -134,12 +130,12 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ensureClientIsPresent();
|
ensureClientIsPresent();
|
||||||
bulkController.stopBulkMode(indexDefinition);
|
bulkProcessor.stopBulkMode(indexDefinition);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BulkClient index(IndexDefinition indexDefinition, String id, boolean create, String source) {
|
public BulkClient index(IndexDefinition indexDefinition, String id, boolean create, String source) {
|
||||||
return index(indexDefinition, id, create, new BytesArray(source.getBytes(Charsets.UTF_8)));
|
return index(indexDefinition, id, create, new BytesArray(source.getBytes(StandardCharsets.UTF_8)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -156,7 +152,7 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
@Override
|
@Override
|
||||||
public BulkClient index(IndexRequest indexRequest) {
|
public BulkClient index(IndexRequest indexRequest) {
|
||||||
ensureClientIsPresent();
|
ensureClientIsPresent();
|
||||||
bulkController.bulkIndex(indexRequest);
|
bulkProcessor.add(indexRequest);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,7 +170,7 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
@Override
|
@Override
|
||||||
public BulkClient delete(DeleteRequest deleteRequest) {
|
public BulkClient delete(DeleteRequest deleteRequest) {
|
||||||
ensureClientIsPresent();
|
ensureClientIsPresent();
|
||||||
bulkController.bulkDelete(deleteRequest);
|
bulkProcessor.add(deleteRequest);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,14 +193,14 @@ public abstract class AbstractBulkClient extends AbstractBasicClient implements
|
||||||
@Override
|
@Override
|
||||||
public BulkClient update(UpdateRequest updateRequest) {
|
public BulkClient update(UpdateRequest updateRequest) {
|
||||||
ensureClientIsPresent();
|
ensureClientIsPresent();
|
||||||
bulkController.bulkUpdate(updateRequest);
|
bulkProcessor.add(updateRequest);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean waitForResponses(long timeout, TimeUnit timeUnit) {
|
public boolean waitForResponses(long timeout, TimeUnit timeUnit) {
|
||||||
ensureClientIsPresent();
|
ensureClientIsPresent();
|
||||||
return bulkController.waitForBulkResponses(timeout, timeUnit);
|
return bulkProcessor.waitForBulkResponses(timeout, timeUnit);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1,211 +0,0 @@
|
||||||
package org.xbib.elx.common;
|
|
||||||
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
|
||||||
import org.apache.logging.log4j.Logger;
|
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
|
||||||
import org.elasticsearch.action.update.UpdateRequest;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
|
||||||
import org.xbib.elx.api.BulkClient;
|
|
||||||
import org.xbib.elx.api.BulkController;
|
|
||||||
import org.xbib.elx.api.BulkMetric;
|
|
||||||
import org.xbib.elx.api.BulkProcessor;
|
|
||||||
import org.xbib.elx.api.IndexDefinition;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
|
||||||
|
|
||||||
public class DefaultBulkController implements BulkController {
|
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(DefaultBulkController.class);
|
|
||||||
|
|
||||||
private final BulkClient bulkClient;
|
|
||||||
|
|
||||||
private BulkProcessor bulkProcessor;
|
|
||||||
|
|
||||||
private DefaultBulkListener bulkListener;
|
|
||||||
|
|
||||||
private long maxWaitTime;
|
|
||||||
|
|
||||||
private TimeUnit maxWaitTimeUnit;
|
|
||||||
|
|
||||||
private final AtomicBoolean active;
|
|
||||||
|
|
||||||
public DefaultBulkController(BulkClient bulkClient) {
|
|
||||||
this.bulkClient = bulkClient;
|
|
||||||
this.active = new AtomicBoolean(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void init(Settings settings) {
|
|
||||||
String maxWaitTimeStr = settings.get(Parameters.MAX_WAIT_BULK_RESPONSE.getName(),
|
|
||||||
Parameters.MAX_WAIT_BULK_RESPONSE.getString());
|
|
||||||
TimeValue maxWaitTimeValue = TimeValue.parseTimeValue(maxWaitTimeStr,
|
|
||||||
TimeValue.timeValueSeconds(30), "");
|
|
||||||
this.maxWaitTime = maxWaitTimeValue.seconds();
|
|
||||||
this.maxWaitTimeUnit = TimeUnit.SECONDS;
|
|
||||||
int maxActionsPerRequest = settings.getAsInt(Parameters.MAX_ACTIONS_PER_REQUEST.getName(),
|
|
||||||
Parameters.MAX_ACTIONS_PER_REQUEST.getInteger());
|
|
||||||
int maxConcurrentRequests = settings.getAsInt(Parameters.MAX_CONCURRENT_REQUESTS.getName(),
|
|
||||||
Parameters.MAX_CONCURRENT_REQUESTS.getInteger());
|
|
||||||
String flushIngestIntervalStr = settings.get(Parameters.FLUSH_INTERVAL.getName(),
|
|
||||||
Parameters.FLUSH_INTERVAL.getString());
|
|
||||||
TimeValue flushIngestInterval = TimeValue.parseTimeValue(flushIngestIntervalStr,
|
|
||||||
TimeValue.timeValueSeconds(30), "");
|
|
||||||
ByteSizeValue maxVolumePerRequest = settings.getAsBytesSize(Parameters.MAX_VOLUME_PER_REQUEST.getName(),
|
|
||||||
ByteSizeValue.parseBytesSizeValue(Parameters.MAX_VOLUME_PER_REQUEST.getString(), "1m"));
|
|
||||||
boolean enableBulkLogging = settings.getAsBoolean(Parameters.ENABLE_BULK_LOGGING.getName(),
|
|
||||||
Parameters.ENABLE_BULK_LOGGING.getBoolean());
|
|
||||||
boolean failOnBulkError = settings.getAsBoolean(Parameters.FAIL_ON_BULK_ERROR.getName(),
|
|
||||||
Parameters.FAIL_ON_BULK_ERROR.getBoolean());
|
|
||||||
int responseTimeCount = settings.getAsInt(Parameters.RESPONSE_TIME_COUNT.getName(),
|
|
||||||
Parameters.RESPONSE_TIME_COUNT.getInteger());
|
|
||||||
this.bulkListener = new DefaultBulkListener(this,
|
|
||||||
enableBulkLogging, failOnBulkError, responseTimeCount);
|
|
||||||
this.bulkProcessor = DefaultBulkProcessor.builder(bulkClient.getClient(), bulkListener)
|
|
||||||
.setBulkActions(maxActionsPerRequest)
|
|
||||||
.setConcurrentRequests(maxConcurrentRequests)
|
|
||||||
.setFlushInterval(flushIngestInterval)
|
|
||||||
.setBulkSize(maxVolumePerRequest)
|
|
||||||
.build();
|
|
||||||
this.active.set(true);
|
|
||||||
if (logger.isInfoEnabled()) {
|
|
||||||
logger.info("bulk processor now active with maxWaitTime = {} maxActionsPerRequest = {} maxConcurrentRequests = {} " +
|
|
||||||
"flushIngestInterval = {} maxVolumePerRequest = {} " +
|
|
||||||
"bulk logging = {} fail on bulk error = {} " +
|
|
||||||
"logger debug = {} from settings = {}",
|
|
||||||
maxWaitTimeStr, maxActionsPerRequest, maxConcurrentRequests,
|
|
||||||
flushIngestInterval, maxVolumePerRequest,
|
|
||||||
enableBulkLogging, failOnBulkError,
|
|
||||||
logger.isDebugEnabled(), settings.toDelimitedString(','));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BulkProcessor getBulkProcessor() {
|
|
||||||
return bulkProcessor;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BulkMetric getBulkMetric() {
|
|
||||||
return bulkListener != null ? bulkListener.getBulkMetric() : null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Throwable getLastBulkError() {
|
|
||||||
return bulkListener != null ? bulkListener.getLastBulkError() : null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void inactivate() {
|
|
||||||
this.active.set(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void startBulkMode(IndexDefinition indexDefinition) throws IOException {
|
|
||||||
String indexName = indexDefinition.getFullIndexName();
|
|
||||||
if (indexDefinition.getStartBulkRefreshSeconds() != 0) {
|
|
||||||
bulkClient.updateIndexSetting(indexName, "refresh_interval",
|
|
||||||
indexDefinition.getStartBulkRefreshSeconds() + "s",
|
|
||||||
30L, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void bulkIndex(IndexRequest indexRequest) {
|
|
||||||
ensureActiveAndBulk();
|
|
||||||
try {
|
|
||||||
bulkProcessor.add(indexRequest);
|
|
||||||
} catch (Exception e) {
|
|
||||||
if (logger.isErrorEnabled()) {
|
|
||||||
logger.error("bulk add of index failed: " + e.getMessage(), e);
|
|
||||||
}
|
|
||||||
inactivate();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void bulkDelete(DeleteRequest deleteRequest) {
|
|
||||||
ensureActiveAndBulk();
|
|
||||||
try {
|
|
||||||
bulkProcessor.add(deleteRequest);
|
|
||||||
} catch (Exception e) {
|
|
||||||
if (logger.isErrorEnabled()) {
|
|
||||||
logger.error("bulk add of delete failed: " + e.getMessage(), e);
|
|
||||||
}
|
|
||||||
inactivate();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void bulkUpdate(UpdateRequest updateRequest) {
|
|
||||||
ensureActiveAndBulk();
|
|
||||||
try {
|
|
||||||
bulkProcessor.add(updateRequest);
|
|
||||||
} catch (Exception e) {
|
|
||||||
if (logger.isErrorEnabled()) {
|
|
||||||
logger.error("bulk add of update failed: " + e.getMessage(), e);
|
|
||||||
}
|
|
||||||
inactivate();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean waitForBulkResponses(long timeout, TimeUnit timeUnit) {
|
|
||||||
try {
|
|
||||||
if (bulkProcessor != null) {
|
|
||||||
bulkProcessor.flush();
|
|
||||||
return bulkProcessor.awaitFlush(timeout, timeUnit);
|
|
||||||
}
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
logger.error("interrupted");
|
|
||||||
return false;
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.error(e.getMessage(), e);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void stopBulkMode(IndexDefinition indexDefinition) throws IOException {
|
|
||||||
flush();
|
|
||||||
if (waitForBulkResponses(indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit())) {
|
|
||||||
if (indexDefinition.getStopBulkRefreshSeconds() != 0) {
|
|
||||||
bulkClient.updateIndexSetting(indexDefinition.getFullIndexName(),
|
|
||||||
"refresh_interval",
|
|
||||||
indexDefinition.getStopBulkRefreshSeconds() + "s",
|
|
||||||
30L, TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void flush() throws IOException {
|
|
||||||
if (bulkProcessor != null) {
|
|
||||||
bulkProcessor.flush();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() throws IOException {
|
|
||||||
flush();
|
|
||||||
bulkClient.waitForResponses(maxWaitTime, maxWaitTimeUnit);
|
|
||||||
if (bulkProcessor != null) {
|
|
||||||
bulkProcessor.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void ensureActiveAndBulk() {
|
|
||||||
if (!active.get()) {
|
|
||||||
throw new IllegalStateException("inactive");
|
|
||||||
}
|
|
||||||
if (bulkProcessor == null) {
|
|
||||||
throw new UnsupportedOperationException("bulk processor not present");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -5,21 +5,19 @@ import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
import org.xbib.elx.api.BulkController;
|
|
||||||
import org.xbib.elx.api.BulkListener;
|
import org.xbib.elx.api.BulkListener;
|
||||||
import org.xbib.elx.api.BulkMetric;
|
import org.xbib.elx.api.BulkMetric;
|
||||||
|
import org.xbib.elx.api.BulkProcessor;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.LongSummaryStatistics;
|
import java.util.LongSummaryStatistics;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||||
import java.util.stream.LongStream;
|
|
||||||
|
|
||||||
public class DefaultBulkListener implements BulkListener {
|
public class DefaultBulkListener implements BulkListener {
|
||||||
|
|
||||||
private final Logger logger = LogManager.getLogger(DefaultBulkListener.class.getName());
|
private final Logger logger = LogManager.getLogger(DefaultBulkListener.class.getName());
|
||||||
|
|
||||||
private final BulkController bulkController;
|
private final BulkProcessor bulkProcessor;
|
||||||
|
|
||||||
private final BulkMetric bulkMetric;
|
private final BulkMetric bulkMetric;
|
||||||
|
|
||||||
|
@ -29,20 +27,15 @@ public class DefaultBulkListener implements BulkListener {
|
||||||
|
|
||||||
private Throwable lastBulkError;
|
private Throwable lastBulkError;
|
||||||
|
|
||||||
private final int ringBufferSize;
|
public DefaultBulkListener(BulkProcessor bulkProcessor,
|
||||||
|
ScheduledThreadPoolExecutor scheduler,
|
||||||
private final LongRingBuffer ringBuffer;
|
|
||||||
|
|
||||||
public DefaultBulkListener(BulkController bulkController,
|
|
||||||
boolean isBulkLoggingEnabled,
|
boolean isBulkLoggingEnabled,
|
||||||
boolean failOnError,
|
boolean failOnError,
|
||||||
int ringBufferSize) {
|
int ringBufferSize) {
|
||||||
this.bulkController = bulkController;
|
this.bulkProcessor = bulkProcessor;
|
||||||
this.isBulkLoggingEnabled = isBulkLoggingEnabled;
|
this.isBulkLoggingEnabled = isBulkLoggingEnabled;
|
||||||
this.failOnError = failOnError;
|
this.failOnError = failOnError;
|
||||||
this.ringBufferSize = ringBufferSize;
|
this.bulkMetric = new DefaultBulkMetric(bulkProcessor, scheduler, ringBufferSize);
|
||||||
this.ringBuffer = new LongRingBuffer(ringBufferSize);
|
|
||||||
this.bulkMetric = new DefaultBulkMetric();
|
|
||||||
bulkMetric.start();
|
bulkMetric.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +52,7 @@ public class DefaultBulkListener implements BulkListener {
|
||||||
bulkMetric.getCurrentIngestNumDocs().inc(n);
|
bulkMetric.getCurrentIngestNumDocs().inc(n);
|
||||||
bulkMetric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
|
bulkMetric.getTotalIngestSizeInBytes().inc(request.estimatedSizeInBytes());
|
||||||
if (isBulkLoggingEnabled && logger.isDebugEnabled()) {
|
if (isBulkLoggingEnabled && logger.isDebugEnabled()) {
|
||||||
logger.debug("before bulk [{}] [actions={}] [bytes={}] [concurrent requests={}]",
|
logger.debug("before bulk [{}] [actions={}] [bytes={}] [requests={}]",
|
||||||
executionId,
|
executionId,
|
||||||
request.numberOfActions(),
|
request.numberOfActions(),
|
||||||
request.estimatedSizeInBytes(),
|
request.estimatedSizeInBytes(),
|
||||||
|
@ -69,22 +62,10 @@ public class DefaultBulkListener implements BulkListener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
|
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
|
||||||
|
bulkMetric.recalculate(request, response);
|
||||||
long l = bulkMetric.getCurrentIngest().getCount();
|
long l = bulkMetric.getCurrentIngest().getCount();
|
||||||
bulkMetric.getCurrentIngest().dec();
|
bulkMetric.getCurrentIngest().dec();
|
||||||
bulkMetric.getSucceeded().inc(response.getItems().length);
|
bulkMetric.getSucceeded().inc(response.getItems().length);
|
||||||
if (ringBufferSize > 0 && ringBuffer.add(response.getTook().millis(), request.estimatedSizeInBytes()) == 0) {
|
|
||||||
LongSummaryStatistics stat1 = ringBuffer.longStreamValues1().summaryStatistics();
|
|
||||||
LongSummaryStatistics stat2 = ringBuffer.longStreamValues2().summaryStatistics();
|
|
||||||
if (isBulkLoggingEnabled && logger.isDebugEnabled()) {
|
|
||||||
logger.debug("bulk response millis: avg = " + stat1.getAverage() +
|
|
||||||
" min = " + stat1.getMin() +
|
|
||||||
" max = " + stat1.getMax() +
|
|
||||||
" size: avg = " + stat2.getAverage() +
|
|
||||||
" min = " + stat2.getMin() +
|
|
||||||
" max = " + stat2.getMax() +
|
|
||||||
" throughput: " + (stat2.getAverage() / stat1.getAverage()) + " bytes/ms");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
int n = 0;
|
int n = 0;
|
||||||
for (BulkItemResponse itemResponse : response.getItems()) {
|
for (BulkItemResponse itemResponse : response.getItems()) {
|
||||||
bulkMetric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
|
bulkMetric.getCurrentIngest().dec(itemResponse.getIndex(), itemResponse.getType(), itemResponse.getId());
|
||||||
|
@ -95,7 +76,7 @@ public class DefaultBulkListener implements BulkListener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (isBulkLoggingEnabled && logger.isDebugEnabled()) {
|
if (isBulkLoggingEnabled && logger.isDebugEnabled()) {
|
||||||
logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] [concurrent requests={}]",
|
logger.debug("after bulk [{}] [succeeded={}] [failed={}] [{}ms] [requests={}]",
|
||||||
executionId,
|
executionId,
|
||||||
bulkMetric.getSucceeded().getCount(),
|
bulkMetric.getSucceeded().getCount(),
|
||||||
bulkMetric.getFailed().getCount(),
|
bulkMetric.getFailed().getCount(),
|
||||||
|
@ -123,7 +104,7 @@ public class DefaultBulkListener implements BulkListener {
|
||||||
if (logger.isErrorEnabled()) {
|
if (logger.isErrorEnabled()) {
|
||||||
logger.error("after bulk [" + executionId + "] error", failure);
|
logger.error("after bulk [" + executionId + "] error", failure);
|
||||||
}
|
}
|
||||||
bulkController.inactivate();
|
bulkProcessor.setEnabled(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -135,37 +116,4 @@ public class DefaultBulkListener implements BulkListener {
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
bulkMetric.close();
|
bulkMetric.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class LongRingBuffer {
|
|
||||||
|
|
||||||
private final Long[] values1, values2;
|
|
||||||
|
|
||||||
private final int limit;
|
|
||||||
|
|
||||||
private final AtomicInteger index;
|
|
||||||
|
|
||||||
public LongRingBuffer(int limit) {
|
|
||||||
this.values1 = new Long[limit];
|
|
||||||
this.values2 = new Long[limit];
|
|
||||||
Arrays.fill(values1, -1L);
|
|
||||||
Arrays.fill(values2, -1L);
|
|
||||||
this.limit = limit;
|
|
||||||
this.index = new AtomicInteger();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int add(Long v1, Long v2) {
|
|
||||||
int i = index.incrementAndGet() % limit;
|
|
||||||
values1[i] = v1;
|
|
||||||
values2[i] = v2;
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
public LongStream longStreamValues1() {
|
|
||||||
return Arrays.stream(values1).filter(v -> v != -1L).mapToLong(Long::longValue);
|
|
||||||
}
|
|
||||||
|
|
||||||
public LongStream longStreamValues2() {
|
|
||||||
return Arrays.stream(values2).filter(v -> v != -1L).mapToLong(Long::longValue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,25 @@
|
||||||
package org.xbib.elx.common;
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
import org.xbib.elx.api.BulkMetric;
|
import org.xbib.elx.api.BulkMetric;
|
||||||
|
import org.xbib.elx.api.BulkProcessor;
|
||||||
import org.xbib.metrics.api.Count;
|
import org.xbib.metrics.api.Count;
|
||||||
import org.xbib.metrics.api.Metered;
|
import org.xbib.metrics.api.Metered;
|
||||||
import org.xbib.metrics.common.CountMetric;
|
import org.xbib.metrics.common.CountMetric;
|
||||||
import org.xbib.metrics.common.Meter;
|
import org.xbib.metrics.common.Meter;
|
||||||
|
|
||||||
import java.util.concurrent.Executors;
|
import java.util.LongSummaryStatistics;
|
||||||
|
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||||
|
|
||||||
public class DefaultBulkMetric implements BulkMetric {
|
public class DefaultBulkMetric implements BulkMetric {
|
||||||
|
|
||||||
|
private static final Logger logger = LogManager.getLogger(DefaultBulkMetric.class.getName());
|
||||||
|
|
||||||
|
private final BulkProcessor bulkProcessor;
|
||||||
|
|
||||||
private final Meter totalIngest;
|
private final Meter totalIngest;
|
||||||
|
|
||||||
private final Count totalIngestSizeInBytes;
|
private final Count totalIngestSizeInBytes;
|
||||||
|
@ -28,14 +38,31 @@ public class DefaultBulkMetric implements BulkMetric {
|
||||||
|
|
||||||
private Long stopped;
|
private Long stopped;
|
||||||
|
|
||||||
public DefaultBulkMetric() {
|
private final int ringBufferSize;
|
||||||
totalIngest = new Meter(Executors.newSingleThreadScheduledExecutor());
|
|
||||||
totalIngestSizeInBytes = new CountMetric();
|
private final LongRingBuffer ringBuffer;
|
||||||
currentIngest = new CountMetric();
|
|
||||||
currentIngestNumDocs = new CountMetric();
|
private Double lastThroughput;
|
||||||
submitted = new CountMetric();
|
|
||||||
succeeded = new CountMetric();
|
private long currentMaxVolume;
|
||||||
failed = new CountMetric();
|
|
||||||
|
private int currentPermits;
|
||||||
|
|
||||||
|
public DefaultBulkMetric(BulkProcessor bulkProcessor,
|
||||||
|
ScheduledThreadPoolExecutor scheduledThreadPoolExecutor,
|
||||||
|
int ringBufferSize) {
|
||||||
|
this.bulkProcessor = bulkProcessor;
|
||||||
|
this.totalIngest = new Meter(scheduledThreadPoolExecutor);
|
||||||
|
this.ringBufferSize = ringBufferSize;
|
||||||
|
this.ringBuffer = new LongRingBuffer(ringBufferSize);
|
||||||
|
this.totalIngestSizeInBytes = new CountMetric();
|
||||||
|
this.currentIngest = new CountMetric();
|
||||||
|
this.currentIngestNumDocs = new CountMetric();
|
||||||
|
this.submitted = new CountMetric();
|
||||||
|
this.succeeded = new CountMetric();
|
||||||
|
this.failed = new CountMetric();
|
||||||
|
this.currentMaxVolume = 1024;
|
||||||
|
this.currentPermits = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -100,4 +127,56 @@ public class DefaultBulkMetric implements BulkMetric {
|
||||||
stop();
|
stop();
|
||||||
totalIngest.shutdown();
|
totalIngest.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private int x = 0;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void recalculate(BulkRequest request, BulkResponse response) {
|
||||||
|
if (ringBufferSize > 0 && ringBuffer.add(response.getTook().millis(), request.estimatedSizeInBytes()) == 0) {
|
||||||
|
x++;
|
||||||
|
LongSummaryStatistics stat1 = ringBuffer.longStreamValues1().summaryStatistics();
|
||||||
|
LongSummaryStatistics stat2 = ringBuffer.longStreamValues2().summaryStatistics();
|
||||||
|
double throughput = stat2.getAverage() / stat1.getAverage();
|
||||||
|
double delta = lastThroughput != null ? throughput - lastThroughput : 0.0d;
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("metric: avg = " + stat1.getAverage() +
|
||||||
|
" min = " + stat1.getMin() +
|
||||||
|
" max = " + stat1.getMax() +
|
||||||
|
" size: avg = " + stat2.getAverage() +
|
||||||
|
" min = " + stat2.getMin() +
|
||||||
|
" max = " + stat2.getMax() +
|
||||||
|
" last throughput: " + lastThroughput + " bytes/ms" +
|
||||||
|
" throughput: " + throughput + " bytes/ms" +
|
||||||
|
" delta = " + delta +
|
||||||
|
" vol = " + currentMaxVolume);
|
||||||
|
}
|
||||||
|
if (lastThroughput == null || throughput < 10000) {
|
||||||
|
double k = 0.5;
|
||||||
|
double d = (1 / (1 + Math.exp(-(((double)x)) * k)));
|
||||||
|
logger.debug("inc: x = " + x + " d = " + d);
|
||||||
|
currentMaxVolume += d * currentMaxVolume;
|
||||||
|
if (currentMaxVolume > 5 + 1024 * 1024) {
|
||||||
|
currentMaxVolume = 5 * 1024 * 1024;
|
||||||
|
}
|
||||||
|
bulkProcessor.setMaxBulkVolume(currentMaxVolume);
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("metric: increase volume to " + currentMaxVolume);
|
||||||
|
}
|
||||||
|
} else if (delta < -100) {
|
||||||
|
double k = 0.5;
|
||||||
|
double d = (1 / (1 + Math.exp(-(((double)x)) * k)));
|
||||||
|
d = -1/d;
|
||||||
|
logger.debug("dec: x = " + x + " d = " + d);
|
||||||
|
currentMaxVolume += d * currentMaxVolume;
|
||||||
|
if (currentMaxVolume > 5 + 1024 * 1024) {
|
||||||
|
currentMaxVolume = 5 * 1024 * 1024;
|
||||||
|
}
|
||||||
|
bulkProcessor.setMaxBulkVolume(currentMaxVolume);
|
||||||
|
if (logger.isDebugEnabled()) {
|
||||||
|
logger.debug("metric: decrease volume to " + currentMaxVolume);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastThroughput = throughput;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,26 +1,29 @@
|
||||||
package org.xbib.elx.common;
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkAction;
|
import org.elasticsearch.action.bulk.BulkAction;
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||||
import org.xbib.elx.api.BulkListener;
|
import org.xbib.elx.api.BulkClient;
|
||||||
|
import org.xbib.elx.api.BulkMetric;
|
||||||
import org.xbib.elx.api.BulkProcessor;
|
import org.xbib.elx.api.BulkProcessor;
|
||||||
import org.xbib.elx.api.BulkRequestHandler;
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||||
import java.util.concurrent.Semaphore;
|
import java.util.concurrent.Semaphore;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -29,343 +32,238 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||||
* (either based on number of actions, based on the size, or time), and
|
* (either based on number of actions, based on the size, or time), and
|
||||||
* to easily control the number of concurrent bulk
|
* to easily control the number of concurrent bulk
|
||||||
* requests allowed to be executed in parallel.
|
* requests allowed to be executed in parallel.
|
||||||
* In order to create a new bulk processor, use the {@link Builder}.
|
|
||||||
*/
|
*/
|
||||||
public class DefaultBulkProcessor implements BulkProcessor {
|
public class DefaultBulkProcessor implements BulkProcessor {
|
||||||
|
|
||||||
|
private static final Logger logger = LogManager.getLogger(DefaultBulkProcessor.class);
|
||||||
|
|
||||||
|
private final BulkClient bulkClient;
|
||||||
|
|
||||||
|
private final AtomicBoolean enabled;
|
||||||
|
|
||||||
|
private final ElasticsearchClient client;
|
||||||
|
|
||||||
|
private final DefaultBulkListener bulkListener;
|
||||||
|
|
||||||
private final ScheduledThreadPoolExecutor scheduler;
|
private final ScheduledThreadPoolExecutor scheduler;
|
||||||
|
|
||||||
private final ScheduledFuture<?> scheduledFuture;
|
private ScheduledFuture<?> scheduledFuture;
|
||||||
|
|
||||||
private final BulkRequestHandler bulkRequestHandler;
|
|
||||||
|
|
||||||
private BulkRequest bulkRequest;
|
private BulkRequest bulkRequest;
|
||||||
|
|
||||||
private long bulkSize;
|
private long maxBulkVolume;
|
||||||
|
|
||||||
private int bulkActions;
|
private int maxBulkActions;
|
||||||
|
|
||||||
private volatile boolean closed;
|
private final AtomicBoolean closed;
|
||||||
|
|
||||||
private DefaultBulkProcessor(ElasticsearchClient client,
|
private final AtomicLong executionIdGen;
|
||||||
BulkListener bulkListener,
|
|
||||||
String name,
|
private ResizeableSemaphore semaphore;
|
||||||
int concurrentRequests,
|
|
||||||
int bulkActions,
|
private int permits;
|
||||||
ByteSizeValue bulkSize,
|
|
||||||
TimeValue flushInterval) {
|
public DefaultBulkProcessor(BulkClient bulkClient, Settings settings) {
|
||||||
this.closed = false;
|
this.bulkClient = bulkClient;
|
||||||
this.bulkActions = bulkActions;
|
int maxActionsPerRequest = settings.getAsInt(Parameters.MAX_ACTIONS_PER_REQUEST.getName(),
|
||||||
this.bulkSize = bulkSize.getBytes();
|
Parameters.MAX_ACTIONS_PER_REQUEST.getInteger());
|
||||||
this.bulkRequest = new BulkRequest();
|
int maxConcurrentRequests = settings.getAsInt(Parameters.MAX_CONCURRENT_REQUESTS.getName(),
|
||||||
this.bulkRequestHandler = concurrentRequests == 0 ?
|
Parameters.MAX_CONCURRENT_REQUESTS.getInteger());
|
||||||
new SyncBulkRequestHandler(client, bulkListener) :
|
String flushIngestIntervalStr = settings.get(Parameters.FLUSH_INTERVAL.getName(),
|
||||||
new AsyncBulkRequestHandler(client, bulkListener, concurrentRequests);
|
Parameters.FLUSH_INTERVAL.getString());
|
||||||
if (flushInterval != null) {
|
TimeValue flushInterval = TimeValue.parseTimeValue(flushIngestIntervalStr,
|
||||||
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1,
|
TimeValue.timeValueSeconds(30), "");
|
||||||
EsExecutors.daemonThreadFactory(name != null ? "[" + name + "]" : "" + "bulk_processor"));
|
ByteSizeValue maxVolumePerRequest = settings.getAsBytesSize(Parameters.MAX_VOLUME_PER_REQUEST.getName(),
|
||||||
|
ByteSizeValue.parseBytesSizeValue(Parameters.MAX_VOLUME_PER_REQUEST.getString(), "1m"));
|
||||||
|
boolean enableBulkLogging = settings.getAsBoolean(Parameters.ENABLE_BULK_LOGGING.getName(),
|
||||||
|
Parameters.ENABLE_BULK_LOGGING.getBoolean());
|
||||||
|
boolean failOnBulkError = settings.getAsBoolean(Parameters.FAIL_ON_BULK_ERROR.getName(),
|
||||||
|
Parameters.FAIL_ON_BULK_ERROR.getBoolean());
|
||||||
|
int ringBufferSize = settings.getAsInt(Parameters.RESPONSE_TIME_COUNT.getName(),
|
||||||
|
Parameters.RESPONSE_TIME_COUNT.getInteger());
|
||||||
|
this.client = bulkClient.getClient();
|
||||||
|
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(2,
|
||||||
|
EsExecutors.daemonThreadFactory("elx-bulk-processor"));
|
||||||
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||||
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
|
this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
|
||||||
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(),
|
if (flushInterval.millis() > 0L) {
|
||||||
|
this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(this::flush, flushInterval.millis(),
|
||||||
flushInterval.millis(), TimeUnit.MILLISECONDS);
|
flushInterval.millis(), TimeUnit.MILLISECONDS);
|
||||||
|
}
|
||||||
|
this.bulkListener = new DefaultBulkListener(this, scheduler,
|
||||||
|
enableBulkLogging, failOnBulkError, ringBufferSize);
|
||||||
|
this.permits = maxConcurrentRequests;
|
||||||
|
this.maxBulkActions = maxActionsPerRequest;
|
||||||
|
this.maxBulkVolume = maxVolumePerRequest != null ? maxVolumePerRequest.getBytes() : -1;
|
||||||
|
this.bulkRequest = new BulkRequest();
|
||||||
|
this.closed = new AtomicBoolean(false);
|
||||||
|
this.enabled = new AtomicBoolean(false);
|
||||||
|
this.executionIdGen = new AtomicLong();
|
||||||
|
if (permits > 0) {
|
||||||
|
this.semaphore = new ResizeableSemaphore(permits);
|
||||||
|
}
|
||||||
|
if (logger.isInfoEnabled()) {
|
||||||
|
logger.info("bulk processor now active with maxActionsPerRequest = {} maxConcurrentRequests = {} " +
|
||||||
|
"flushInterval = {} maxVolumePerRequest = {} " +
|
||||||
|
"bulk logging = {} fail on bulk error = {} " +
|
||||||
|
"logger debug = {} from settings = {}",
|
||||||
|
maxActionsPerRequest, maxConcurrentRequests,
|
||||||
|
flushInterval, maxVolumePerRequest,
|
||||||
|
enableBulkLogging, failOnBulkError,
|
||||||
|
logger.isDebugEnabled(), settings.toDelimitedString(','));
|
||||||
|
}
|
||||||
|
setEnabled(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setEnabled(boolean enabled) {
|
||||||
|
this.enabled.set(enabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void startBulkMode(IndexDefinition indexDefinition) throws IOException {
|
||||||
|
String indexName = indexDefinition.getFullIndexName();
|
||||||
|
int interval = indexDefinition.getStartBulkRefreshSeconds();
|
||||||
|
if (interval != 0) {
|
||||||
|
logger.info("starting bulk on " + indexName + " with new refresh interval " + interval);
|
||||||
|
bulkClient.updateIndexSetting(indexName, "refresh_interval", interval + "s", 30L, TimeUnit.SECONDS);
|
||||||
} else {
|
} else {
|
||||||
this.scheduler = null;
|
logger.warn("ignoring starting bulk on " + indexName + " with refresh interval " + interval);
|
||||||
this.scheduledFuture = null;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Builder builder(ElasticsearchClient client, BulkListener bulkListener) {
|
|
||||||
Objects.requireNonNull(bulkListener, "A listener for the BulkProcessor is required but null");
|
|
||||||
return new Builder(client, bulkListener);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setBulkActions(int bulkActions) {
|
public void stopBulkMode(IndexDefinition indexDefinition) throws IOException {
|
||||||
this.bulkActions = bulkActions;
|
String indexName = indexDefinition.getFullIndexName();
|
||||||
|
int interval = indexDefinition.getStopBulkRefreshSeconds();
|
||||||
|
flush();
|
||||||
|
if (waitForBulkResponses(indexDefinition.getMaxWaitTime(), indexDefinition.getMaxWaitTimeUnit())) {
|
||||||
|
if (interval != 0) {
|
||||||
|
logger.info("stopping bulk on " + indexName + " with new refresh interval " + interval);
|
||||||
|
bulkClient.updateIndexSetting(indexName, "refresh_interval", interval + "s", 30L, TimeUnit.SECONDS);
|
||||||
|
} else {
|
||||||
|
logger.warn("ignoring stopping bulk on " + indexName + " with refresh interval " + interval);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getBulkActions() {
|
public void setMaxBulkActions(int bulkActions) {
|
||||||
return bulkActions;
|
this.maxBulkActions = bulkActions;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setBulkSize(long bulkSize) {
|
public int getMaxBulkActions() {
|
||||||
this.bulkSize = bulkSize;
|
return maxBulkActions;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getBulkSize() {
|
public void setMaxBulkVolume(long bulkSize) {
|
||||||
return bulkSize;
|
this.maxBulkVolume = bulkSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
public BulkRequestHandler getBulkRequestHandler() {
|
@Override
|
||||||
return bulkRequestHandler;
|
public long getMaxBulkVolume() {
|
||||||
|
return maxBulkVolume;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public BulkMetric getBulkMetric() {
|
||||||
|
return bulkListener.getBulkMetric();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Throwable getLastBulkError() {
|
||||||
|
return bulkListener.getLastBulkError();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void add(ActionRequest<?> request) {
|
public synchronized void add(ActionRequest<?> request) {
|
||||||
ensureOpen();
|
ensureOpenAndActive();
|
||||||
bulkRequest.add(request);
|
bulkRequest.add(request);
|
||||||
if ((bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) ||
|
if ((maxBulkActions != -1 && bulkRequest.numberOfActions() >= maxBulkActions) ||
|
||||||
(bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize)) {
|
(maxBulkVolume != -1 && bulkRequest.estimatedSizeInBytes() >= maxBulkVolume)) {
|
||||||
execute();
|
execute();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void flush() {
|
public synchronized void flush() {
|
||||||
ensureOpen();
|
ensureOpenAndActive();
|
||||||
if (bulkRequest.numberOfActions() > 0) {
|
if (bulkRequest.numberOfActions() > 0) {
|
||||||
execute();
|
execute();
|
||||||
}
|
}
|
||||||
|
// do not drain semaphore
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized boolean awaitFlush(long timeout, TimeUnit unit) throws InterruptedException, IOException {
|
public synchronized boolean waitForBulkResponses(long timeout, TimeUnit unit) {
|
||||||
if (closed) {
|
try {
|
||||||
|
if (closed.get()) {
|
||||||
|
// silently skip closed condition
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (bulkRequest.numberOfActions() > 0) {
|
if (bulkRequest.numberOfActions() > 0) {
|
||||||
execute();
|
execute();
|
||||||
}
|
}
|
||||||
return bulkRequestHandler.flush(timeout, unit);
|
return drainSemaphore(timeout, unit);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
logger.error("interrupted while waiting for bulk responses");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void close() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
|
if (closed.compareAndSet(false, true)) {
|
||||||
try {
|
try {
|
||||||
if (closed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
closed = true;
|
|
||||||
if (scheduledFuture != null) {
|
if (scheduledFuture != null) {
|
||||||
scheduledFuture.cancel(true);
|
scheduledFuture.cancel(true);
|
||||||
|
}
|
||||||
|
if (scheduler != null) {
|
||||||
scheduler.shutdown();
|
scheduler.shutdown();
|
||||||
}
|
}
|
||||||
|
// like flush but without ensuring open
|
||||||
if (bulkRequest.numberOfActions() > 0) {
|
if (bulkRequest.numberOfActions() > 0) {
|
||||||
execute();
|
execute();
|
||||||
}
|
}
|
||||||
bulkRequestHandler.flush(0, TimeUnit.NANOSECONDS);
|
drainSemaphore(0L, TimeUnit.NANOSECONDS);
|
||||||
|
bulkListener.close();
|
||||||
} catch (InterruptedException exc) {
|
} catch (InterruptedException exc) {
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void ensureOpen() {
|
|
||||||
if (closed) {
|
|
||||||
throw new IllegalStateException("bulk processor already closed");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void execute() {
|
private void execute() {
|
||||||
BulkRequest myBulkRequest = this.bulkRequest;
|
BulkRequest myBulkRequest = this.bulkRequest;
|
||||||
this.bulkRequest = new BulkRequest();
|
this.bulkRequest = new BulkRequest();
|
||||||
this.bulkRequestHandler.execute(myBulkRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A builder used to create a build an instance of a bulk processor.
|
|
||||||
*/
|
|
||||||
public static class Builder {
|
|
||||||
|
|
||||||
private final ElasticsearchClient client;
|
|
||||||
|
|
||||||
private final BulkListener bulkListener;
|
|
||||||
|
|
||||||
private String name;
|
|
||||||
|
|
||||||
private int concurrentRequests = 1;
|
|
||||||
|
|
||||||
private int bulkActions = 1000;
|
|
||||||
|
|
||||||
private ByteSizeValue bulkSize = new ByteSizeValue(10, ByteSizeUnit.MB);
|
|
||||||
|
|
||||||
private TimeValue flushInterval = null;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a builder of bulk processor with the client to use and the listener that will be used
|
|
||||||
* to be notified on the completion of bulk requests.
|
|
||||||
*
|
|
||||||
* @param client the client
|
|
||||||
* @param bulkListener the listener
|
|
||||||
*/
|
|
||||||
Builder(ElasticsearchClient client, BulkListener bulkListener) {
|
|
||||||
this.client = client;
|
|
||||||
this.bulkListener = bulkListener;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets an optional name to identify this bulk processor.
|
|
||||||
*
|
|
||||||
* @param name name
|
|
||||||
* @return this builder
|
|
||||||
*/
|
|
||||||
public Builder setName(String name) {
|
|
||||||
this.name = name;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single
|
|
||||||
* request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed
|
|
||||||
* while accumulating new bulk requests. Defaults to {@code 1}.
|
|
||||||
*
|
|
||||||
* @param concurrentRequests maximum number of concurrent requests
|
|
||||||
* @return this builder
|
|
||||||
*/
|
|
||||||
public Builder setConcurrentRequests(int concurrentRequests) {
|
|
||||||
this.concurrentRequests = concurrentRequests;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets when to flush a new bulk request based on the number of actions currently added. Defaults to
|
|
||||||
* {@code 1000}. Can be set to {@code -1} to disable it.
|
|
||||||
*
|
|
||||||
* @param bulkActions bulk actions
|
|
||||||
* @return this builder
|
|
||||||
*/
|
|
||||||
public Builder setBulkActions(int bulkActions) {
|
|
||||||
this.bulkActions = bulkActions;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets when to flush a new bulk request based on the size of actions currently added. Defaults to
|
|
||||||
* {@code 1mb}. Can be set to {@code -1} to disable it.
|
|
||||||
*
|
|
||||||
* @param bulkSize bulk size
|
|
||||||
* @return this builder
|
|
||||||
*/
|
|
||||||
public Builder setBulkSize(ByteSizeValue bulkSize) {
|
|
||||||
this.bulkSize = bulkSize;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set.
|
|
||||||
* Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)}
|
|
||||||
* can be set to {@code -1} with the flush interval set allowing for complete async processing of bulk actions.
|
|
||||||
*
|
|
||||||
* @param flushInterval flush interval
|
|
||||||
* @return this builder
|
|
||||||
*/
|
|
||||||
public Builder setFlushInterval(TimeValue flushInterval) {
|
|
||||||
this.flushInterval = flushInterval;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Builds a new bulk processor.
|
|
||||||
*
|
|
||||||
* @return a bulk processor
|
|
||||||
*/
|
|
||||||
public DefaultBulkProcessor build() {
|
|
||||||
return new DefaultBulkProcessor(client, bulkListener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private class Flush implements Runnable {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
synchronized (DefaultBulkProcessor.this) {
|
|
||||||
if (closed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (bulkRequest.numberOfActions() > 0) {
|
|
||||||
execute();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class SyncBulkRequestHandler implements BulkRequestHandler {
|
|
||||||
|
|
||||||
private final ElasticsearchClient client;
|
|
||||||
|
|
||||||
private final BulkListener bulkListener;
|
|
||||||
|
|
||||||
private final AtomicLong executionIdGen;
|
|
||||||
|
|
||||||
SyncBulkRequestHandler(ElasticsearchClient client, BulkListener bulkListener) {
|
|
||||||
this.client = client;
|
|
||||||
this.bulkListener = bulkListener;
|
|
||||||
this.executionIdGen = new AtomicLong();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void execute(BulkRequest bulkRequest) {
|
|
||||||
long executionId = executionIdGen.incrementAndGet();
|
long executionId = executionIdGen.incrementAndGet();
|
||||||
|
if (semaphore == null) {
|
||||||
boolean afterCalled = false;
|
boolean afterCalled = false;
|
||||||
try {
|
try {
|
||||||
bulkListener.beforeBulk(executionId, bulkRequest);
|
bulkListener.beforeBulk(executionId, myBulkRequest);
|
||||||
BulkResponse bulkResponse = client.execute(BulkAction.INSTANCE, bulkRequest).actionGet();
|
BulkResponse bulkResponse = client.execute(BulkAction.INSTANCE, myBulkRequest).actionGet();
|
||||||
afterCalled = true;
|
afterCalled = true;
|
||||||
bulkListener.afterBulk(executionId, bulkRequest, bulkResponse);
|
bulkListener.afterBulk(executionId, myBulkRequest, bulkResponse);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
if (!afterCalled) {
|
if (!afterCalled) {
|
||||||
bulkListener.afterBulk(executionId, bulkRequest, e);
|
bulkListener.afterBulk(executionId, myBulkRequest, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean flush(long timeout, TimeUnit unit) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getPermits() {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void increase() {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void reduce() {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class AsyncBulkRequestHandler implements BulkRequestHandler {
|
|
||||||
|
|
||||||
private final ElasticsearchClient client;
|
|
||||||
|
|
||||||
private final BulkListener bulkListener;
|
|
||||||
|
|
||||||
private final ResizeableSemaphore semaphore;
|
|
||||||
|
|
||||||
private final AtomicLong executionIdGen;
|
|
||||||
|
|
||||||
private int permits;
|
|
||||||
|
|
||||||
private AsyncBulkRequestHandler(ElasticsearchClient client,
|
|
||||||
BulkListener bulkListener,
|
|
||||||
int permits) {
|
|
||||||
this.client = client;
|
|
||||||
this.bulkListener = bulkListener;
|
|
||||||
this.permits = permits;
|
|
||||||
this.semaphore = new ResizeableSemaphore(permits);
|
|
||||||
this.executionIdGen = new AtomicLong();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void execute(BulkRequest bulkRequest) {
|
|
||||||
boolean bulkRequestSetupSuccessful = false;
|
boolean bulkRequestSetupSuccessful = false;
|
||||||
boolean acquired = false;
|
boolean acquired = false;
|
||||||
long executionId = executionIdGen.incrementAndGet();
|
|
||||||
try {
|
try {
|
||||||
bulkListener.beforeBulk(executionId, bulkRequest);
|
bulkListener.beforeBulk(executionId, myBulkRequest);
|
||||||
semaphore.acquire();
|
semaphore.acquire();
|
||||||
acquired = true;
|
acquired = true;
|
||||||
client.execute(BulkAction.INSTANCE, bulkRequest, new ActionListener<>() {
|
client.execute(BulkAction.INSTANCE, myBulkRequest, new ActionListener<>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(BulkResponse response) {
|
public void onResponse(BulkResponse response) {
|
||||||
try {
|
try {
|
||||||
bulkListener.afterBulk(executionId, bulkRequest, response);
|
bulkListener.afterBulk(executionId, myBulkRequest, response);
|
||||||
} finally {
|
} finally {
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
}
|
}
|
||||||
|
@ -374,7 +272,7 @@ public class DefaultBulkProcessor implements BulkProcessor {
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Throwable e) {
|
public void onFailure(Throwable e) {
|
||||||
try {
|
try {
|
||||||
bulkListener.afterBulk(executionId, bulkRequest, e);
|
bulkListener.afterBulk(executionId, myBulkRequest, e);
|
||||||
} finally {
|
} finally {
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
}
|
}
|
||||||
|
@ -383,45 +281,36 @@ public class DefaultBulkProcessor implements BulkProcessor {
|
||||||
bulkRequestSetupSuccessful = true;
|
bulkRequestSetupSuccessful = true;
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
bulkListener.afterBulk(executionId, bulkRequest, e);
|
bulkListener.afterBulk(executionId, myBulkRequest, e);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
bulkListener.afterBulk(executionId, bulkRequest, e);
|
bulkListener.afterBulk(executionId, myBulkRequest, e);
|
||||||
} finally {
|
} finally {
|
||||||
if (!bulkRequestSetupSuccessful && acquired) {
|
if (!bulkRequestSetupSuccessful && acquired) {
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
private boolean drainSemaphore(long timeValue, TimeUnit timeUnit) throws InterruptedException {
|
||||||
public boolean flush(long timeout, TimeUnit unit) throws IOException, InterruptedException {
|
if (semaphore != null) {
|
||||||
bulkListener.close();
|
if (semaphore.tryAcquire(permits, timeValue, timeUnit)) {
|
||||||
if (semaphore.tryAcquire(permits, timeout, unit)) {
|
|
||||||
semaphore.release(permits);
|
semaphore.release(permits);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
private void ensureOpenAndActive() {
|
||||||
public int getPermits() {
|
if (closed.get()) {
|
||||||
return permits;
|
throw new IllegalStateException("bulk processor is closed");
|
||||||
}
|
}
|
||||||
|
if (!enabled.get()) {
|
||||||
@Override
|
throw new IllegalStateException("bulk processor is no longer enabled");
|
||||||
public void increase() {
|
|
||||||
semaphore.release(1);
|
|
||||||
this.permits++;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void reduce() {
|
|
||||||
semaphore.reducePermits(1);
|
|
||||||
this.permits--;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("serial")
|
|
||||||
private static class ResizeableSemaphore extends Semaphore {
|
private static class ResizeableSemaphore extends Semaphore {
|
||||||
|
|
||||||
ResizeableSemaphore(int permits) {
|
ResizeableSemaphore(int permits) {
|
||||||
|
|
|
@ -65,7 +65,7 @@ public class DefaultIndexDefinition implements IndexDefinition {
|
||||||
setDateTimeFormatter(DateTimeFormatter.ofPattern("yyyyMMdd", Locale.getDefault()));
|
setDateTimeFormatter(DateTimeFormatter.ofPattern("yyyyMMdd", Locale.getDefault()));
|
||||||
setDateTimePattern(Pattern.compile("^(.*?)(\\d+)$"));
|
setDateTimePattern(Pattern.compile("^(.*?)(\\d+)$"));
|
||||||
setFullIndexName(index + getDateTimeFormatter().format(LocalDateTime.now()));
|
setFullIndexName(index + getDateTimeFormatter().format(LocalDateTime.now()));
|
||||||
setMaxWaitTime(Parameters.MAX_WAIT_BULK_RESPONSE_SECONDS.getInteger(), TimeUnit.SECONDS);
|
setMaxWaitTime(30, TimeUnit.SECONDS);
|
||||||
setShift(false);
|
setShift(false);
|
||||||
setPrune(false);
|
setPrune(false);
|
||||||
setEnabled(true);
|
setEnabled(true);
|
||||||
|
@ -73,7 +73,9 @@ public class DefaultIndexDefinition implements IndexDefinition {
|
||||||
|
|
||||||
public DefaultIndexDefinition(AdminClient adminClient, String index, String type, Settings settings)
|
public DefaultIndexDefinition(AdminClient adminClient, String index, String type, Settings settings)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TimeValue timeValue = settings.getAsTime(Parameters.MAX_WAIT_BULK_RESPONSE.getName(), TimeValue.timeValueSeconds(30));
|
String timeValueStr = settings.get(Parameters.MAX_WAIT_BULK_RESPONSE.getName(),
|
||||||
|
Parameters.MAX_WAIT_BULK_RESPONSE.getString());
|
||||||
|
TimeValue timeValue = TimeValue.parseTimeValue(timeValueStr, TimeValue.timeValueSeconds(30), "");
|
||||||
setMaxWaitTime(timeValue.seconds(), TimeUnit.SECONDS);
|
setMaxWaitTime(timeValue.seconds(), TimeUnit.SECONDS);
|
||||||
String indexName = settings.get("name", index);
|
String indexName = settings.get("name", index);
|
||||||
String indexType = settings.get("type", type);
|
String indexType = settings.get("type", type);
|
||||||
|
@ -83,11 +85,13 @@ public class DefaultIndexDefinition implements IndexDefinition {
|
||||||
setEnabled(enabled);
|
setEnabled(enabled);
|
||||||
String fullIndexName = adminClient.resolveAlias(indexName).stream().findFirst().orElse(indexName);
|
String fullIndexName = adminClient.resolveAlias(indexName).stream().findFirst().orElse(indexName);
|
||||||
setFullIndexName(fullIndexName);
|
setFullIndexName(fullIndexName);
|
||||||
|
setStartBulkRefreshSeconds(settings.getAsInt(Parameters.START_BULK_REFRESH_SECONDS.getName(),
|
||||||
|
Parameters.START_BULK_REFRESH_SECONDS.getInteger()));
|
||||||
|
setStopBulkRefreshSeconds(settings.getAsInt(Parameters.STOP_BULK_REFRESH_SECONDS.getName(),
|
||||||
|
Parameters.STOP_BULK_REFRESH_SECONDS.getInteger()));
|
||||||
if (settings.get("settings") != null && settings.get("mapping") != null) {
|
if (settings.get("settings") != null && settings.get("mapping") != null) {
|
||||||
setSettings(findSettingsFrom(settings.get("settings")));
|
setSettings(findSettingsFrom(settings.get("settings")));
|
||||||
setMappings(findMappingsFrom(settings.get("mapping")));
|
setMappings(findMappingsFrom(settings.get("mapping")));
|
||||||
setStartBulkRefreshSeconds(settings.getAsInt(Parameters.START_BULK_REFRESH_SECONDS.getName(), -1));
|
|
||||||
setStopBulkRefreshSeconds(settings.getAsInt(Parameters.STOP_BULK_REFRESH_SECONDS.getName(), -1));
|
|
||||||
setReplicaLevel(settings.getAsInt("replica", 0));
|
setReplicaLevel(settings.getAsInt("replica", 0));
|
||||||
boolean shift = settings.getAsBoolean("shift", false);
|
boolean shift = settings.getAsBoolean("shift", false);
|
||||||
setShift(shift);
|
setShift(shift);
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
package org.xbib.elx.common;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.stream.LongStream;
|
||||||
|
|
||||||
|
public class LongRingBuffer {
|
||||||
|
|
||||||
|
private final Long[] values1, values2;
|
||||||
|
|
||||||
|
private final int limit;
|
||||||
|
|
||||||
|
private final AtomicInteger index;
|
||||||
|
|
||||||
|
public LongRingBuffer(int limit) {
|
||||||
|
this.values1 = new Long[limit];
|
||||||
|
this.values2 = new Long[limit];
|
||||||
|
Arrays.fill(values1, -1L);
|
||||||
|
Arrays.fill(values2, -1L);
|
||||||
|
this.limit = limit;
|
||||||
|
this.index = new AtomicInteger();
|
||||||
|
}
|
||||||
|
|
||||||
|
public int add(Long v1, Long v2) {
|
||||||
|
int i = index.incrementAndGet() % limit;
|
||||||
|
values1[i] = v1;
|
||||||
|
values2[i] = v2;
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
public LongStream longStreamValues1() {
|
||||||
|
return Arrays.stream(values1).filter(v -> v != -1L).mapToLong(Long::longValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
public LongStream longStreamValues2() {
|
||||||
|
return Arrays.stream(values2).filter(v -> v != -1L).mapToLong(Long::longValue);
|
||||||
|
}
|
||||||
|
}
|
|
@ -6,9 +6,7 @@ public enum Parameters {
|
||||||
|
|
||||||
MAX_WAIT_BULK_RESPONSE("bulk.max_wait_response", String.class, "30s"),
|
MAX_WAIT_BULK_RESPONSE("bulk.max_wait_response", String.class, "30s"),
|
||||||
|
|
||||||
MAX_WAIT_BULK_RESPONSE_SECONDS("bulk.max_wait_response_seconds", Integer.class, 30),
|
START_BULK_REFRESH_SECONDS("bulk.start_refresh_seconds", Integer.class, -1),
|
||||||
|
|
||||||
START_BULK_REFRESH_SECONDS("bulk.start_refresh_seconds", Integer.class, 0),
|
|
||||||
|
|
||||||
STOP_BULK_REFRESH_SECONDS("bulk.stop_refresh_seconds", Integer.class, 30),
|
STOP_BULK_REFRESH_SECONDS("bulk.stop_refresh_seconds", Integer.class, 30),
|
||||||
|
|
||||||
|
@ -16,14 +14,13 @@ public enum Parameters {
|
||||||
|
|
||||||
FAIL_ON_BULK_ERROR("bulk.failonerror", Boolean.class, true),
|
FAIL_ON_BULK_ERROR("bulk.failonerror", Boolean.class, true),
|
||||||
|
|
||||||
MAX_ACTIONS_PER_REQUEST("bulk.max_actions_per_request", Integer.class, 1000),
|
MAX_ACTIONS_PER_REQUEST("bulk.max_actions_per_request", Integer.class, -1),
|
||||||
|
|
||||||
RESPONSE_TIME_COUNT("bulk.response_time_count", Integer.class, 64),
|
RESPONSE_TIME_COUNT("bulk.response_time_count", Integer.class, 16),
|
||||||
|
|
||||||
// 0 = 1 CPU, synchronous requests, > 0 = n + 1 CPUs, asynchronous requests
|
MAX_CONCURRENT_REQUESTS("bulk.max_concurrent_requests", Integer.class, 1 /*Runtime.getRuntime().availableProcessors() - 1*/),
|
||||||
MAX_CONCURRENT_REQUESTS("bulk.max_concurrent_requests", Integer.class, Runtime.getRuntime().availableProcessors() - 1),
|
|
||||||
|
|
||||||
MAX_VOLUME_PER_REQUEST("bulk.max_volume_per_request", String.class, "1mb"),
|
MAX_VOLUME_PER_REQUEST("bulk.max_volume_per_request", String.class, "1kb"),
|
||||||
|
|
||||||
FLUSH_INTERVAL("bulk.flush_interval", String.class, "30s");
|
FLUSH_INTERVAL("bulk.flush_interval", String.class, "30s");
|
||||||
|
|
||||||
|
|
|
@ -36,10 +36,10 @@ public class NodeClientHelper {
|
||||||
key -> innerCreateClient(settings));
|
key -> innerCreateClient(settings));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void closeClient(Settings settings) throws IOException {
|
public void closeClient(Settings settings) {
|
||||||
ElasticsearchClient client = clientMap.remove(settings.get("cluster.name"));
|
ElasticsearchClient client = clientMap.remove(settings.get("cluster.name"));
|
||||||
if (client != null) {
|
if (client != null) {
|
||||||
logger.debug("closing node...");
|
logger.debug("closing node");
|
||||||
node.close();
|
node.close();
|
||||||
node = null;
|
node = null;
|
||||||
}
|
}
|
||||||
|
@ -77,10 +77,12 @@ public class NodeClientHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean isPrivateSettings(String key) {
|
private static boolean isPrivateSettings(String key) {
|
||||||
return key.equals(Parameters.MAX_ACTIONS_PER_REQUEST.name()) ||
|
for (Parameters p : Parameters.values()) {
|
||||||
key.equals(Parameters.MAX_CONCURRENT_REQUESTS.name()) ||
|
if (key.equals(p.getName())) {
|
||||||
key.equals(Parameters.MAX_VOLUME_PER_REQUEST.name()) ||
|
return true;
|
||||||
key.equals(Parameters.FLUSH_INTERVAL.name());
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class BulkNode extends Node {
|
private static class BulkNode extends Node {
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
/**
|
|
||||||
* Node client extensions.
|
|
||||||
*/
|
|
||||||
package org.xbib.elx.node;
|
|
|
@ -9,7 +9,6 @@ import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
import org.xbib.elx.api.IndexDefinition;
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
import org.xbib.elx.common.ClientBuilder;
|
||||||
import org.xbib.elx.common.DefaultIndexDefinition;
|
import org.xbib.elx.common.DefaultIndexDefinition;
|
||||||
import org.xbib.elx.common.Parameters;
|
|
||||||
import org.xbib.elx.node.NodeAdminClient;
|
import org.xbib.elx.node.NodeAdminClient;
|
||||||
import org.xbib.elx.node.NodeAdminClientProvider;
|
import org.xbib.elx.node.NodeAdminClientProvider;
|
||||||
import org.xbib.elx.node.NodeBulkClient;
|
import org.xbib.elx.node.NodeBulkClient;
|
||||||
|
@ -31,8 +30,6 @@ class BulkClientTest {
|
||||||
|
|
||||||
private static final Long ACTIONS = 10000L;
|
private static final Long ACTIONS = 10000L;
|
||||||
|
|
||||||
private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
|
||||||
|
|
||||||
private final TestExtension.Helper helper;
|
private final TestExtension.Helper helper;
|
||||||
|
|
||||||
BulkClientTest(TestExtension.Helper helper) {
|
BulkClientTest(TestExtension.Helper helper) {
|
||||||
|
@ -44,8 +41,6 @@ class BulkClientTest {
|
||||||
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
||||||
.setBulkClientProvider(NodeBulkClientProvider.class)
|
.setBulkClientProvider(NodeBulkClientProvider.class)
|
||||||
.put(helper.getNodeSettings())
|
.put(helper.getNodeSettings())
|
||||||
.put(Parameters.MAX_ACTIONS_PER_REQUEST.getName(), MAX_ACTIONS_PER_REQUEST)
|
|
||||||
.put(Parameters.FLUSH_INTERVAL.getName(), "30s")
|
|
||||||
.build()) {
|
.build()) {
|
||||||
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
||||||
bulkClient.newIndex(indexDefinition);
|
bulkClient.newIndex(indexDefinition);
|
||||||
|
@ -65,7 +60,6 @@ class BulkClientTest {
|
||||||
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
||||||
.setBulkClientProvider(NodeBulkClientProvider.class)
|
.setBulkClientProvider(NodeBulkClientProvider.class)
|
||||||
.put(helper.getNodeSettings())
|
.put(helper.getNodeSettings())
|
||||||
.put(Parameters.FLUSH_INTERVAL.getName(), "5s")
|
|
||||||
.build()) {
|
.build()) {
|
||||||
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
||||||
bulkClient.newIndex(indexDefinition);
|
bulkClient.newIndex(indexDefinition);
|
||||||
|
@ -105,8 +99,6 @@ class BulkClientTest {
|
||||||
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
||||||
.setBulkClientProvider(NodeBulkClientProvider.class)
|
.setBulkClientProvider(NodeBulkClientProvider.class)
|
||||||
.put(helper.getNodeSettings())
|
.put(helper.getNodeSettings())
|
||||||
.put(Parameters.MAX_ACTIONS_PER_REQUEST.getName(), MAX_ACTIONS_PER_REQUEST)
|
|
||||||
.put(Parameters.FLUSH_INTERVAL.getName(), "60s")
|
|
||||||
.build()) {
|
.build()) {
|
||||||
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
||||||
bulkClient.newIndex(indexDefinition);
|
bulkClient.newIndex(indexDefinition);
|
||||||
|
@ -130,14 +122,11 @@ class BulkClientTest {
|
||||||
@Test
|
@Test
|
||||||
void testThreadedRandomDocs() throws Exception {
|
void testThreadedRandomDocs() throws Exception {
|
||||||
int maxthreads = Runtime.getRuntime().availableProcessors();
|
int maxthreads = Runtime.getRuntime().availableProcessors();
|
||||||
long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST;
|
|
||||||
final long actions = ACTIONS;
|
final long actions = ACTIONS;
|
||||||
long timeout = 120L;
|
long timeout = 120L;
|
||||||
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
try (NodeBulkClient bulkClient = ClientBuilder.builder(helper.client)
|
||||||
.setBulkClientProvider(NodeBulkClientProvider.class)
|
.setBulkClientProvider(NodeBulkClientProvider.class)
|
||||||
.put(helper.getNodeSettings())
|
.put(helper.getNodeSettings())
|
||||||
.put(Parameters.MAX_ACTIONS_PER_REQUEST.getName(), maxActionsPerRequest)
|
|
||||||
.put(Parameters.FLUSH_INTERVAL.getName(), "60s")
|
|
||||||
.build()) {
|
.build()) {
|
||||||
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
||||||
indexDefinition.setStartBulkRefreshSeconds(0);
|
indexDefinition.setStartBulkRefreshSeconds(0);
|
||||||
|
|
|
@ -9,7 +9,6 @@ import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
import org.xbib.elx.api.IndexDefinition;
|
import org.xbib.elx.api.IndexDefinition;
|
||||||
import org.xbib.elx.common.ClientBuilder;
|
import org.xbib.elx.common.ClientBuilder;
|
||||||
import org.xbib.elx.common.DefaultIndexDefinition;
|
import org.xbib.elx.common.DefaultIndexDefinition;
|
||||||
import org.xbib.elx.common.Parameters;
|
|
||||||
import org.xbib.elx.transport.TransportAdminClient;
|
import org.xbib.elx.transport.TransportAdminClient;
|
||||||
import org.xbib.elx.transport.TransportAdminClientProvider;
|
import org.xbib.elx.transport.TransportAdminClientProvider;
|
||||||
import org.xbib.elx.transport.TransportBulkClient;
|
import org.xbib.elx.transport.TransportBulkClient;
|
||||||
|
@ -31,8 +30,6 @@ class BulkClientTest {
|
||||||
|
|
||||||
private static final Long ACTIONS = 100000L;
|
private static final Long ACTIONS = 100000L;
|
||||||
|
|
||||||
private static final Long MAX_ACTIONS_PER_REQUEST = 1000L;
|
|
||||||
|
|
||||||
private final TestExtension.Helper helper;
|
private final TestExtension.Helper helper;
|
||||||
|
|
||||||
BulkClientTest(TestExtension.Helper helper) {
|
BulkClientTest(TestExtension.Helper helper) {
|
||||||
|
@ -44,8 +41,6 @@ class BulkClientTest {
|
||||||
try (TransportBulkClient bulkClient = ClientBuilder.builder()
|
try (TransportBulkClient bulkClient = ClientBuilder.builder()
|
||||||
.setBulkClientProvider(TransportBulkClientProvider.class)
|
.setBulkClientProvider(TransportBulkClientProvider.class)
|
||||||
.put(helper.getTransportSettings())
|
.put(helper.getTransportSettings())
|
||||||
.put(Parameters.MAX_ACTIONS_PER_REQUEST.getName(), MAX_ACTIONS_PER_REQUEST)
|
|
||||||
.put(Parameters.FLUSH_INTERVAL.getName(), "30s")
|
|
||||||
.build()) {
|
.build()) {
|
||||||
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
||||||
bulkClient.newIndex(indexDefinition);
|
bulkClient.newIndex(indexDefinition);
|
||||||
|
@ -104,8 +99,6 @@ class BulkClientTest {
|
||||||
try (TransportBulkClient bulkClient = ClientBuilder.builder()
|
try (TransportBulkClient bulkClient = ClientBuilder.builder()
|
||||||
.setBulkClientProvider(TransportBulkClientProvider.class)
|
.setBulkClientProvider(TransportBulkClientProvider.class)
|
||||||
.put(helper.getTransportSettings())
|
.put(helper.getTransportSettings())
|
||||||
.put(Parameters.MAX_ACTIONS_PER_REQUEST.getName(), MAX_ACTIONS_PER_REQUEST)
|
|
||||||
.put(Parameters.FLUSH_INTERVAL.getName(), "60s")
|
|
||||||
.build()) {
|
.build()) {
|
||||||
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
||||||
bulkClient.newIndex(indexDefinition);
|
bulkClient.newIndex(indexDefinition);
|
||||||
|
@ -128,23 +121,19 @@ class BulkClientTest {
|
||||||
@Test
|
@Test
|
||||||
void testThreadedRandomDocs() {
|
void testThreadedRandomDocs() {
|
||||||
int maxthreads = Runtime.getRuntime().availableProcessors();
|
int maxthreads = Runtime.getRuntime().availableProcessors();
|
||||||
long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST;
|
//long maxActionsPerRequest = MAX_ACTIONS_PER_REQUEST;
|
||||||
final long actions = ACTIONS;
|
final long actions = ACTIONS;
|
||||||
long timeout = 120L;
|
long timeout = 120L;
|
||||||
try (TransportBulkClient bulkClient = ClientBuilder.builder()
|
try (TransportBulkClient bulkClient = ClientBuilder.builder()
|
||||||
.setBulkClientProvider(TransportBulkClientProvider.class)
|
.setBulkClientProvider(TransportBulkClientProvider.class)
|
||||||
.put(helper.getTransportSettings())
|
.put(helper.getTransportSettings())
|
||||||
.put(Parameters.MAX_ACTIONS_PER_REQUEST.getName(), maxActionsPerRequest)
|
|
||||||
.put(Parameters.FLUSH_INTERVAL.getName(), "60s")
|
|
||||||
.put(Parameters.ENABLE_BULK_LOGGING.getName(), Boolean.TRUE)
|
|
||||||
.build()) {
|
.build()) {
|
||||||
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
IndexDefinition indexDefinition = new DefaultIndexDefinition("test", "doc");
|
||||||
indexDefinition.setFullIndexName("test");
|
indexDefinition.setFullIndexName("test");
|
||||||
indexDefinition.setStartBulkRefreshSeconds(0);
|
indexDefinition.setStartBulkRefreshSeconds(-1);
|
||||||
indexDefinition.setStopBulkRefreshSeconds(60);
|
indexDefinition.setStopBulkRefreshSeconds(60);
|
||||||
bulkClient.newIndex(indexDefinition);
|
bulkClient.newIndex(indexDefinition);
|
||||||
bulkClient.startBulk(indexDefinition);
|
bulkClient.startBulk(indexDefinition);
|
||||||
logger.info("index created");
|
|
||||||
ExecutorService executorService = Executors.newFixedThreadPool(maxthreads);
|
ExecutorService executorService = Executors.newFixedThreadPool(maxthreads);
|
||||||
final CountDownLatch latch = new CountDownLatch(maxthreads);
|
final CountDownLatch latch = new CountDownLatch(maxthreads);
|
||||||
for (int i = 0; i < maxthreads; i++) {
|
for (int i = 0; i < maxthreads; i++) {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
group = org.xbib
|
group = org.xbib
|
||||||
name = elx
|
name = elx
|
||||||
version = 2.2.1.36
|
version = 2.2.1.37
|
||||||
|
|
||||||
gradle.wrapper.version = 6.6.1
|
gradle.wrapper.version = 6.6.1
|
||||||
xbib-metrics.version = 2.1.0
|
xbib-metrics.version = 2.1.0
|
||||||
|
|
Loading…
Reference in a new issue