diff --git a/A b/A deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 372c8bc70004c..9857a1bc29ed7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -59,7 +59,7 @@ class RestIntegTestTask extends DefaultTask { Boolean includePackaged = false RestIntegTestTask() { - runner = project.tasks.create("${name}Runner", Test.class) + runner = project.tasks.create("${name}Runner", RestTestRunnerTask.class) super.dependsOn(runner) clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses) runner.dependsOn(clusterInit) @@ -77,10 +77,6 @@ class RestIntegTestTask extends DefaultTask { runner.useCluster project.testClusters."$name" } - // disable the build cache for rest test tasks - // there are a number of inputs we aren't properly tracking here so we'll just not cache these for now - runner.getOutputs().doNotCacheIf("Caching is disabled for REST integration tests", Specs.SATISFIES_ALL) - // override/add more for rest tests runner.maxParallelForks = 1 runner.include('**/*IT.class') diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/AbstractLazyPropertyCollection.java b/buildSrc/src/main/java/org/elasticsearch/gradle/AbstractLazyPropertyCollection.java new file mode 100644 index 0000000000000..94cdb091eac1b --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/AbstractLazyPropertyCollection.java @@ -0,0 +1,27 @@ +package org.elasticsearch.gradle; + +import java.util.List; + +public abstract class AbstractLazyPropertyCollection { + + final String name; + final Object owner; + + public AbstractLazyPropertyCollection(String name) { + this(name, null); + } + + public AbstractLazyPropertyCollection(String name, Object owner) { + this.name = name; + this.owner = owner; + } + + abstract List getNormalizedCollection(); + + void assertNotNull(Object value, String description) { + if (value == null) { + throw new NullPointerException(name + " " + description + " was null" + (owner != null ? " when configuring " + owner : "")); + } + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyList.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyList.java new file mode 100644 index 0000000000000..d23c9b9e748d5 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyList.java @@ -0,0 +1,205 @@ +package org.elasticsearch.gradle; + +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.Nested; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class LazyPropertyList extends AbstractLazyPropertyCollection implements List { + + private final List> delegate = new ArrayList<>(); + + public LazyPropertyList(String name) { + super(name); + } + + public LazyPropertyList(String name, Object owner) { + super(name, owner); + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public boolean isEmpty() { + return delegate.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return delegate.stream().anyMatch(entry -> entry.getValue().equals(o)); + } + + @Override + public Iterator iterator() { + return delegate.stream().peek(this::validate).map(PropertyListEntry::getValue).iterator(); + } + + @Override + public Object[] toArray() { + return delegate.stream().peek(this::validate).map(PropertyListEntry::getValue).toArray(); + } + + @Override + public T1[] toArray(T1[] a) { + return delegate.stream().peek(this::validate).map(PropertyListEntry::getValue).collect(Collectors.toList()).toArray(a); + } + + @Override + public boolean add(T t) { + return delegate.add(new PropertyListEntry<>(() -> t, PropertyNormalization.DEFAULT)); + } + + public boolean add(Supplier supplier) { + return delegate.add(new PropertyListEntry<>(supplier, PropertyNormalization.DEFAULT)); + } + + public boolean add(Supplier supplier, PropertyNormalization normalization) { + return delegate.add(new PropertyListEntry<>(supplier, normalization)); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(this.getClass().getName() + " does not support remove()"); + } + + @Override + public boolean containsAll(Collection c) { + return delegate.stream().map(PropertyListEntry::getValue).collect(Collectors.toList()).containsAll(c); + } + + @Override + public boolean addAll(Collection c) { + c.forEach(this::add); + return true; + } + + @Override + public boolean addAll(int index, Collection c) { + int i = index; + for (T item : c) { + this.add(i++, item); + } + return true; + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(this.getClass().getName() + " does not support removeAll()"); + } + + @Override + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(this.getClass().getName() + " does not support retainAll()"); + } + + @Override + public void clear() { + delegate.clear(); + } + + @Override + public T get(int index) { + PropertyListEntry entry = delegate.get(index); + validate(entry); + return entry.getValue(); + } + + @Override + public T set(int index, T element) { + return delegate.set(index, new PropertyListEntry<>(() -> element, PropertyNormalization.DEFAULT)).getValue(); + } + + @Override + public void add(int index, T element) { + delegate.add(index, new PropertyListEntry<>(() -> element, PropertyNormalization.DEFAULT)); + } + + @Override + public T remove(int index) { + return delegate.remove(index).getValue(); + } + + @Override + public int indexOf(Object o) { + for (int i = 0; i < delegate.size(); i++) { + if (delegate.get(i).getValue().equals(o)) { + return i; + } + } + + return -1; + } + + @Override + public int lastIndexOf(Object o) { + int lastIndex = -1; + for (int i = 0; i < delegate.size(); i++) { + if (delegate.get(i).getValue().equals(o)) { + lastIndex = i; + } + } + + return lastIndex; + } + + @Override + public ListIterator listIterator() { + return delegate.stream().map(PropertyListEntry::getValue).collect(Collectors.toList()).listIterator(); + } + + @Override + public ListIterator listIterator(int index) { + return delegate.stream().peek(this::validate).map(PropertyListEntry::getValue).collect(Collectors.toList()).listIterator(index); + } + + @Override + public List subList(int fromIndex, int toIndex) { + return delegate.stream() + .peek(this::validate) + .map(PropertyListEntry::getValue) + .collect(Collectors.toList()) + .subList(fromIndex, toIndex); + } + + @Override + @Nested + List getNormalizedCollection() { + return delegate.stream() + .peek(this::validate) + .filter(entry -> entry.getNormalization() != PropertyNormalization.IGNORE_VALUE) + .collect(Collectors.toList()); + } + + private void validate(PropertyListEntry entry) { + assertNotNull(entry.getValue(), "entry"); + } + + private class PropertyListEntry { + private final Supplier supplier; + private final PropertyNormalization normalization; + + PropertyListEntry(Supplier supplier, PropertyNormalization normalization) { + this.supplier = supplier; + this.normalization = normalization; + } + + public PropertyNormalization getNormalization() { + return normalization; + } + + @Input + public T getValue() { + assertNotNull(supplier, "supplier"); + return supplier.get(); + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyMap.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyMap.java new file mode 100644 index 0000000000000..781bb095bb5cb --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LazyPropertyMap.java @@ -0,0 +1,167 @@ +package org.elasticsearch.gradle; + +import org.gradle.api.Named; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.Nested; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class LazyPropertyMap extends AbstractLazyPropertyCollection implements Map { + + private final Map> delegate = new LinkedHashMap<>(); + private final BiFunction normalizationMapper; + + public LazyPropertyMap(String name) { + this(name, null); + } + + public LazyPropertyMap(String name, Object owner) { + this(name, owner, null); + } + + public LazyPropertyMap(String name, Object owner, BiFunction normalizationMapper) { + super(name, owner); + this.normalizationMapper = normalizationMapper; + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public boolean isEmpty() { + return delegate.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return delegate.containsKey(key); + } + + @Override + public boolean containsValue(Object value) { + return delegate.values().stream().map(PropertyMapEntry::getValue).anyMatch(v -> v.equals(value)); + } + + @Override + public V get(Object key) { + PropertyMapEntry entry = delegate.get(key); + if (entry != null) { + V value = entry.getValue(); + assertNotNull(value, "value for key '" + key + "'"); + return value; + } else { + return null; + } + } + + @Override + public V put(K key, V value) { + return put(key, value, PropertyNormalization.DEFAULT); + } + + public V put(K key, V value, PropertyNormalization normalization) { + assertNotNull(value, "value for key '" + key + "'"); + return put(key, () -> value, normalization); + } + + public V put(K key, Supplier supplier) { + return put(key, supplier, PropertyNormalization.DEFAULT); + } + + public V put(K key, Supplier supplier, PropertyNormalization normalization) { + assertNotNull(supplier, "supplier for key '" + key + "'"); + PropertyMapEntry previous = delegate.put(key, new PropertyMapEntry<>(key, supplier, normalization)); + return previous == null ? null : previous.getValue(); + } + + @Override + public V remove(Object key) { + PropertyMapEntry previous = delegate.remove(key); + return previous == null ? null : previous.getValue(); + } + + @Override + public void putAll(Map m) { + throw new UnsupportedOperationException(this.getClass().getName() + " does not support putAll()"); + } + + @Override + public void clear() { + delegate.clear(); + } + + @Override + public Set keySet() { + return delegate.keySet(); + } + + @Override + public Collection values() { + return delegate.values().stream().peek(this::validate).map(PropertyMapEntry::getValue).collect(Collectors.toList()); + } + + @Override + public Set> entrySet() { + return delegate.entrySet().stream() + .peek(this::validate) + .collect(Collectors.toMap(Entry::getKey, entry -> entry.getValue().getValue())).entrySet(); + } + + @Override + @Nested + List getNormalizedCollection() { + return delegate.values().stream() + .peek(this::validate) + .filter(entry -> entry.getNormalization() != PropertyNormalization.IGNORE_VALUE) + .map(entry -> normalizationMapper == null ? entry : normalizationMapper.apply(entry.getKey(), entry.getValue())) + .collect(Collectors.toList()); + } + + private void validate(Map.Entry> entry) { + validate(entry.getValue()); + } + + private void validate(PropertyMapEntry supplier) { + assertNotNull(supplier, "key '" + supplier.getKey() + "' supplier value"); + } + + private static class PropertyMapEntry implements Named { + private final K key; + private final Supplier value; + private final PropertyNormalization normalization; + + PropertyMapEntry(K key, Supplier value, PropertyNormalization normalization) { + this.key = key; + this.value = value; + this.normalization = normalization; + } + + public PropertyNormalization getNormalization() { + return normalization; + } + + @Override + public String getName() { + return getKey().toString(); + } + + @Input + public K getKey() { + return key; + } + + @Input + public V getValue() { + return value.get(); + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/PropertyNormalization.java b/buildSrc/src/main/java/org/elasticsearch/gradle/PropertyNormalization.java new file mode 100644 index 0000000000000..85fdede56c392 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/PropertyNormalization.java @@ -0,0 +1,13 @@ +package org.elasticsearch.gradle; + +public enum PropertyNormalization { + /** + * Uses default strategy based on runtime property type. + */ + DEFAULT, + + /** + * Ignores property value completely for the purposes of input snapshotting. + */ + IGNORE_VALUE +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/test/RestTestRunnerTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/RestTestRunnerTask.java new file mode 100644 index 0000000000000..eff05f64f9c33 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/RestTestRunnerTask.java @@ -0,0 +1,37 @@ +package org.elasticsearch.gradle.test; + +import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.Nested; +import org.gradle.api.tasks.testing.Test; + +import java.util.ArrayList; +import java.util.Collection; + +import static org.elasticsearch.gradle.Distribution.INTEG_TEST; + +/** + * Customized version of Gradle {@link Test} task which tracks a collection of {@link ElasticsearchCluster} as a task input. We must do this + * as a custom task type because the current {@link org.gradle.api.tasks.TaskInputs} runtime API does not have a way to register + * {@link Nested} inputs. + */ +@CacheableTask +public class RestTestRunnerTask extends Test { + + private Collection clusters = new ArrayList<>(); + + public RestTestRunnerTask() { + super(); + this.getOutputs().doNotCacheIf("Build cache is only enabled for tests against clusters using the 'integ-test' distribution", + task -> clusters.stream().flatMap(c -> c.getNodes().stream()).anyMatch(n -> n.getDistribution() != INTEG_TEST)); + } + + @Nested + public Collection getClusters() { + return clusters; + } + + public void testCluster(ElasticsearchCluster cluster) { + this.clusters.add(cluster); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 9cc03dd7e371d..c343f56525aea 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -18,15 +18,17 @@ */ package org.elasticsearch.gradle.testclusters; -import org.elasticsearch.GradleServicesAdapter; import org.elasticsearch.gradle.Distribution; import org.elasticsearch.gradle.FileSupplier; +import org.elasticsearch.gradle.PropertyNormalization; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.http.WaitForHttpResource; +import org.gradle.api.Named; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.Nested; import java.io.File; import java.io.IOException; @@ -47,7 +49,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -public class ElasticsearchCluster implements TestClusterConfiguration { +public class ElasticsearchCluster implements TestClusterConfiguration, Named { private static final Logger LOGGER = Logging.getLogger(ElasticsearchNode.class); private static final int CLUSTER_UP_TIMEOUT = 40; @@ -60,19 +62,19 @@ public class ElasticsearchCluster implements TestClusterConfiguration { private final File workingDirBase; private final File artifactsExtractDir; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); - private final GradleServicesAdapter services; + private final Project project; public ElasticsearchCluster(String path, String clusterName, Project project, File artifactsExtractDir, File workingDirBase) { this.path = path; this.clusterName = clusterName; + this.project = project; this.workingDirBase = workingDirBase; this.artifactsExtractDir = artifactsExtractDir; - this.services = GradleServicesAdapter.getInstance(project); this.nodes = project.container(ElasticsearchNode.class); this.nodes.add( new ElasticsearchNode( path, clusterName + "-0", - services, artifactsExtractDir, workingDirBase + project, artifactsExtractDir, workingDirBase ) ); // configure the cluster name eagerly so nodes know about it @@ -96,7 +98,7 @@ public void setNumberOfNodes(int numberOfNodes) { for (int i = nodes.size() ; i < numberOfNodes; i++) { this.nodes.add(new ElasticsearchNode( - path, clusterName + "-" + i, services, artifactsExtractDir, workingDirBase + path, clusterName + "-" + i, project, artifactsExtractDir, workingDirBase )); } } @@ -153,6 +155,11 @@ public void keystore(String key, File value) { nodes.all(each -> each.keystore(key, value)); } + @Override + public void keystore(String key, File value, PropertyNormalization normalization) { + nodes.all(each -> each.keystore(key, value, normalization)); + } + @Override public void keystore(String key, FileSupplier valueSupplier) { nodes.all(each -> each.keystore(key, valueSupplier)); @@ -163,11 +170,21 @@ public void setting(String key, String value) { nodes.all(each -> each.setting(key, value)); } + @Override + public void setting(String key, String value, PropertyNormalization normalization) { + nodes.all(each -> each.setting(key, value, normalization)); + } + @Override public void setting(String key, Supplier valueSupplier) { nodes.all(each -> each.setting(key, valueSupplier)); } + @Override + public void setting(String key, Supplier valueSupplier, PropertyNormalization normalization) { + nodes.all(each -> each.setting(key, valueSupplier, normalization)); + } + @Override public void systemProperty(String key, String value) { nodes.all(each -> each.systemProperty(key, value)); @@ -178,6 +195,11 @@ public void systemProperty(String key, Supplier valueSupplier) { nodes.all(each -> each.systemProperty(key, valueSupplier)); } + @Override + public void systemProperty(String key, Supplier valueSupplier, PropertyNormalization normalization) { + nodes.all(each -> each.systemProperty(key, valueSupplier, normalization)); + } + @Override public void environment(String key, String value) { nodes.all(each -> each.environment(key, value)); @@ -189,13 +211,13 @@ public void environment(String key, Supplier valueSupplier) { } @Override - public void jvmArgs(String... values) { - nodes.all(each -> each.jvmArgs(values)); + public void environment(String key, Supplier valueSupplier, PropertyNormalization normalization) { + nodes.all(each -> each.environment(key, valueSupplier, normalization)); } @Override - public void jvmArgs(Supplier valueSupplier) { - nodes.all(each -> each.jvmArgs(valueSupplier)); + public void jvmArgs(String... values) { + nodes.all(each -> each.jvmArgs(values)); } @Override @@ -229,6 +251,7 @@ public void start() { if (Version.fromString(node.getVersion()).getMajor() >= 7) { node.defaultConfig.put("cluster.initial_master_nodes", "[" + nodeNames + "]"); node.defaultConfig.put("discovery.seed_providers", "file"); + node.defaultConfig.put("discovery.seed_hosts", "[]"); } } node.start(); @@ -245,6 +268,11 @@ public void extraConfigFile(String destination, File from) { nodes.all(node -> node.extraConfigFile(destination, from)); } + @Override + public void extraConfigFile(String destination, File from, PropertyNormalization normalization) { + nodes.all(node -> node.extraConfigFile(destination, from, normalization)); + } + @Override public void user(Map userSpec) { nodes.all(node -> node.user(userSpec)); @@ -286,14 +314,13 @@ public List getAllTransportPortURI() { } public void waitForAllConditions() { - long startedAt = System.currentTimeMillis(); LOGGER.info("Waiting for nodes"); nodes.forEach(ElasticsearchNode::waitForAllConditions); writeUnicastHostsFiles(); LOGGER.info("Starting to wait for cluster to form"); - waitForConditions(waitConditions, startedAt, CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this); + waitForConditions(waitConditions, System.currentTimeMillis(), CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this); } @Override @@ -356,6 +383,11 @@ private void addWaitForClusterHealth() { }); } + @Nested + public NamedDomainObjectContainer getNodes() { + return nodes; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 1641ef3dac4a3..f3727b0363102 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -18,14 +18,29 @@ */ package org.elasticsearch.gradle.testclusters; -import org.elasticsearch.GradleServicesAdapter; import org.elasticsearch.gradle.Distribution; import org.elasticsearch.gradle.FileSupplier; +import org.elasticsearch.gradle.LazyPropertyList; +import org.elasticsearch.gradle.LazyPropertyMap; +import org.elasticsearch.gradle.LoggedExec; import org.elasticsearch.gradle.OS; +import org.elasticsearch.gradle.PropertyNormalization; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.http.WaitForHttpResource; +import org.gradle.api.Action; +import org.gradle.api.Named; +import org.gradle.api.Project; +import org.gradle.api.file.FileCollection; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Nested; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; +import org.gradle.api.tasks.util.PatternFilterable; import java.io.ByteArrayInputStream; import java.io.File; @@ -37,9 +52,10 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -65,8 +81,10 @@ public class ElasticsearchNode implements TestClusterConfiguration { private static final Logger LOGGER = Logging.getLogger(ElasticsearchNode.class); private static final int ES_DESTROY_TIMEOUT = 20; private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS; - private static final int NODE_UP_TIMEOUT = 60; - private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.SECONDS; + private static final int NODE_UP_TIMEOUT = 2; + private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.MINUTES; + private static final int ADDITIONAL_CONFIG_TIMEOUT = 15; + private static final TimeUnit ADDITIONAL_CONFIG_TIMEOUT_UNIT = TimeUnit.SECONDS; private static final List OVERRIDABLE_SETTINGS = Arrays.asList( "path.repo", "discovery.seed_providers" @@ -74,24 +92,23 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final String path; private final String name; - private final GradleServicesAdapter services; + private final Project project; private final AtomicBoolean configurationFrozen = new AtomicBoolean(false); private final Path artifactsExtractDir; private final Path workingDir; - private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); private final List plugins = new ArrayList<>(); private final List modules = new ArrayList<>(); - private final Map> settings = new LinkedHashMap<>(); - private final Map> keystoreSettings = new LinkedHashMap<>(); - private final Map keystoreFiles = new LinkedHashMap<>(); - private final Map> systemProperties = new LinkedHashMap<>(); - private final Map> environment = new LinkedHashMap<>(); - private final List>> jvmArgs = new ArrayList<>(); - private final Map extraConfigFiles = new HashMap<>(); - final LinkedHashMap defaultConfig = new LinkedHashMap<>(); + private final LazyPropertyMap settings = new LazyPropertyMap<>("Settings", this); + private final LazyPropertyMap keystoreSettings = new LazyPropertyMap<>("Keystore", this); + private final LazyPropertyMap keystoreFiles = new LazyPropertyMap<>("Keystore files", this, FileEntry::new); + private final LazyPropertyMap systemProperties = new LazyPropertyMap<>("System properties", this); + private final LazyPropertyMap environment = new LazyPropertyMap<>("Environment", this); + private final LazyPropertyList jvmArgs = new LazyPropertyList<>("JVM arguments", this); + private final LazyPropertyMap extraConfigFiles = new LazyPropertyMap<>("Extra config files", this, FileEntry::new); private final List> credentials = new ArrayList<>(); + final LinkedHashMap defaultConfig = new LinkedHashMap<>(); private final Path confPathRepo; private final Path configFile; @@ -110,10 +127,10 @@ public class ElasticsearchNode implements TestClusterConfiguration { private Function nameCustomization = Function.identity(); private boolean isWorkingDirConfigured = false; - ElasticsearchNode(String path, String name, GradleServicesAdapter services, File artifactsExtractDir, File workingDirBase) { + ElasticsearchNode(String path, String name, Project project, File artifactsExtractDir, File workingDirBase) { this.path = path; this.name = name; - this.services = services; + this.project = project; this.artifactsExtractDir = artifactsExtractDir.toPath(); this.workingDir = workingDirBase.toPath().resolve(safeName(name)).toAbsolutePath(); confPathRepo = workingDir.resolve("repo"); @@ -132,6 +149,7 @@ public String getName() { return nameCustomization.apply(name); } + @Input public String getVersion() { return version; } @@ -143,6 +161,7 @@ public void setVersion(String version) { this.version = version; } + @Input public Distribution getDistribution() { return distribution; } @@ -173,88 +192,81 @@ public void module(File module) { @Override public void keystore(String key, String value) { - addSupplier("Keystore", keystoreSettings, key, value); + keystoreSettings.put(key, value); } @Override public void keystore(String key, Supplier valueSupplier) { - addSupplier("Keystore", keystoreSettings, key, valueSupplier); + keystoreSettings.put(key, valueSupplier); } @Override public void keystore(String key, File value) { - requireNonNull(value, "keystore value was null when configuring test cluster`" + this + "`"); - keystore(key, () -> value); + keystoreFiles.put(key, value); + } + + @Override + public void keystore(String key, File value, PropertyNormalization normalization) { + keystoreFiles.put(key, value, normalization); } @Override public void keystore(String key, FileSupplier valueSupplier) { - requireNonNull(key, "Keystore" + " key was null when configuring test cluster `" + this + "`"); - requireNonNull(valueSupplier, "Keystore" + " value supplier was null when configuring test cluster `" + this + "`"); keystoreFiles.put(key, valueSupplier); } @Override public void setting(String key, String value) { - addSupplier("Settings", settings, key, value); + settings.put(key, value); } @Override - public void setting(String key, Supplier valueSupplier) { - addSupplier("Setting", settings, key, valueSupplier); + public void setting(String key, String value, PropertyNormalization normalization) { + settings.put(key, value, normalization); } @Override - public void systemProperty(String key, String value) { - addSupplier("Java System property", systemProperties, key, value); + public void setting(String key, Supplier valueSupplier) { + settings.put(key, valueSupplier); } @Override - public void systemProperty(String key, Supplier valueSupplier) { - addSupplier("Java System property", systemProperties, key, valueSupplier); + public void setting(String key, Supplier valueSupplier, PropertyNormalization normalization) { + settings.put(key, valueSupplier, normalization); } @Override - public void environment(String key, String value) { - addSupplier("Environment variable", environment, key, value); + public void systemProperty(String key, String value) { + systemProperties.put(key, value); } @Override - public void environment(String key, Supplier valueSupplier) { - addSupplier("Environment variable", environment, key, valueSupplier); + public void systemProperty(String key, Supplier valueSupplier) { + systemProperties.put(key, valueSupplier); } - - public void jvmArgs(String... values) { - for (String value : values) { - requireNonNull(value, "jvm argument was null when configuring test cluster `" + this + "`"); - } - jvmArgs.add(() -> Arrays.asList(values)); + @Override + public void systemProperty(String key, Supplier valueSupplier, PropertyNormalization normalization) { + systemProperties.put(key, valueSupplier, normalization); } - public void jvmArgs(Supplier valueSupplier) { - requireNonNull(valueSupplier, "jvm argument supplier was null when configuring test cluster `" + this + "`"); - jvmArgs.add(() -> Arrays.asList(valueSupplier.get())); + @Override + public void environment(String key, String value) { + environment.put(key, value); } - private void addSupplier(String name, Map> collector, String key, Supplier valueSupplier) { - requireNonNull(key, name + " key was null when configuring test cluster `" + this + "`"); - requireNonNull(valueSupplier, name + " value supplier was null when configuring test cluster `" + this + "`"); - collector.put(key, valueSupplier); + @Override + public void environment(String key, Supplier valueSupplier) { + environment.put(key, valueSupplier); } - private void addSupplier(String name, Map> collector, String key, String actualValue) { - requireNonNull(actualValue, name + " value was null when configuring test cluster `" + this + "`"); - addSupplier(name, collector, key, () -> actualValue); + @Override + public void environment(String key, Supplier valueSupplier, PropertyNormalization normalization) { + environment.put(key, valueSupplier, normalization); } - private void checkSuppliers(String name, Collection> collector) { - collector.forEach(suplier -> - requireNonNull( - suplier.get().toString(), - name + " supplied value was null when configuring test cluster `" + this + "`" - ) - ); + public void jvmArgs(String... values) { + jvmArgs.addAll(Arrays.asList(values)); } public Path getConfigDir() { @@ -297,43 +309,43 @@ public Stream logLines() throws IOException { public synchronized void start() { LOGGER.info("Starting `{}`", this); - Path distroArtifact = artifactsExtractDir - .resolve(distribution.getGroup()) - .resolve("elasticsearch-" + getVersion()); - - if (Files.exists(distroArtifact) == false) { - throw new TestClustersException("Can not start " + this + ", missing: " + distroArtifact); + if (Files.exists(getExtractedDistributionDir()) == false) { + throw new TestClustersException("Can not start " + this + ", missing: " + getExtractedDistributionDir()); } - if (Files.isDirectory(distroArtifact) == false) { - throw new TestClustersException("Can not start " + this + ", is not a directory: " + distroArtifact); + if (Files.isDirectory(getExtractedDistributionDir()) == false) { + throw new TestClustersException("Can not start " + this + ", is not a directory: " + getExtractedDistributionDir()); } try { if (isWorkingDirConfigured == false) { + logToProcessStdout("Configuring working directory: " + workingDir); // Only configure working dir once so we don't loose data on restarts isWorkingDirConfigured = true; - createWorkingDir(distroArtifact); + createWorkingDir(getExtractedDistributionDir()); } } catch (IOException e) { throw new UncheckedIOException("Failed to create working directory for " + this, e); } createConfiguration(); - plugins.forEach(plugin -> runElaticsearchBinScript( - "elasticsearch-plugin", - "install", "--batch", plugin.toString()) - ); + if (plugins.isEmpty() == false) { + logToProcessStdout("Installing " + plugins.size() + " plugins"); + plugins.forEach(plugin -> runElaticsearchBinScript( + "elasticsearch-plugin", + "install", "--batch", plugin.toString()) + ); + } if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { + logToProcessStdout("Adding " + keystoreSettings.size() + " keystore settings and " + keystoreFiles.size() + " keystore files"); runElaticsearchBinScript("elasticsearch-keystore", "create"); - checkSuppliers("Keystore", keystoreSettings.values()); keystoreSettings.forEach((key, value) -> - runElaticsearchBinScriptWithInput(value.get().toString(), "elasticsearch-keystore", "add", "-x", key) + runElaticsearchBinScriptWithInput(value.toString(), "elasticsearch-keystore", "add", "-x", key) ); - for (Map.Entry entry : keystoreFiles.entrySet()) { - File file = entry.getValue().get(); + for (Map.Entry entry : keystoreFiles.entrySet()) { + File file = entry.getValue(); requireNonNull(file, "supplied keystoreFile was null when configuring " + this); if (file.exists() == false) { throw new TestClustersException("supplied keystore file " + file + " does not exist, require for " + this); @@ -347,20 +359,37 @@ public synchronized void start() { copyExtraConfigFiles(); if (isSettingMissingOrTrue("xpack.security.enabled")) { + logToProcessStdout("Setting up " + credentials.size() + " users"); if (credentials.isEmpty()) { user(Collections.emptyMap()); } credentials.forEach(paramMap -> runElaticsearchBinScript( "elasticsearch-users", - paramMap.entrySet().stream() - .flatMap(entry -> Stream.of(entry.getKey(), entry.getValue())) - .toArray(String[]::new) + paramMap.entrySet().stream() + .flatMap(entry -> Stream.of(entry.getKey(), entry.getValue())) + .toArray(String[]::new) )); } + logToProcessStdout("Starting Elasticsearch process"); startElasticsearchProcess(); } + private void logToProcessStdout(String message) { + try { + if (Files.exists(esStdoutFile.getParent()) == false) { + Files.createDirectories(esStdoutFile.getParent()); + } + Files.write( + esStdoutFile, + ("[" + Instant.now().toString() + "] [BUILD] " + message + "\n").getBytes(StandardCharsets.UTF_8), + StandardOpenOption.CREATE, StandardOpenOption.APPEND + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + @Override public void restart() { LOGGER.info("Restarting {}", this); @@ -376,36 +405,40 @@ public void restart() { } private boolean isSettingMissingOrTrue(String name) { - return Boolean.valueOf(settings.getOrDefault(name, () -> "false").get().toString()); + return Boolean.valueOf(settings.getOrDefault(name, "false").toString()); } private void copyExtraConfigFiles() { + if (extraConfigFiles.isEmpty() == false) { + logToProcessStdout("Setting up " + extraConfigFiles.size() + " additional config files"); + } extraConfigFiles.forEach((destination, from) -> { - if (Files.exists(from.toPath()) == false) { - throw new TestClustersException("Can't create extra config file from " + from + " for " + this + - " as it does not exist"); - } - Path dst = configFile.getParent().resolve(destination); - try { - Files.createDirectories(dst.getParent()); - Files.copy(from.toPath(), dst, StandardCopyOption.REPLACE_EXISTING); - LOGGER.info("Added extra config file {} for {}", destination, this); - } catch (IOException e) { - throw new UncheckedIOException("Can't create extra config file for", e); - } - }); + if (Files.exists(from.toPath()) == false) { + throw new TestClustersException("Can't create extra config file from " + from + " for " + this + + " as it does not exist"); + } + Path dst = configFile.getParent().resolve(destination); + try { + Files.createDirectories(dst.getParent()); + Files.copy(from.toPath(), dst, StandardCopyOption.REPLACE_EXISTING); + LOGGER.info("Added extra config file {} for {}", destination, this); + } catch (IOException e) { + throw new UncheckedIOException("Can't create extra config file for", e); + } + }); } private void installModules() { if (distribution == Distribution.INTEG_TEST) { + logToProcessStdout("Installing " + modules.size() + "modules"); for (File module : modules) { Path destination = workingDir.resolve("modules").resolve(module.getName().replace(".zip", "").replace("-" + version, "")); // only install modules that are not already bundled with the integ-test distribution if (Files.exists(destination) == false) { - services.copy(spec -> { + project.copy(spec -> { if (module.getName().toLowerCase().endsWith(".zip")) { - spec.from(services.zipTree(module)); + spec.from(project.zipTree(module)); } else if (module.isDirectory()) { spec.from(module); } else { @@ -430,6 +463,15 @@ public void extraConfigFile(String destination, File from) { extraConfigFiles.put(destination, from); } + @Override + public void extraConfigFile(String destination, File from, PropertyNormalization normalization) { + if (destination.contains("..")) { + throw new IllegalArgumentException("extra config file destination can't be relative, was " + destination + + " for " + this); + } + extraConfigFiles.put(destination, from, normalization); + } + @Override public void user(Map userSpec) { Set keys = new HashSet<>(userSpec.keySet()); @@ -439,9 +481,9 @@ public void user(Map userSpec) { if (keys.isEmpty() == false) { throw new TestClustersException("Unknown keys in user definition " + keys + " for " + this); } - Map cred = new LinkedHashMap<>(); - cred.put("useradd", userSpec.getOrDefault("username","test_user")); - cred.put("-p", userSpec.getOrDefault("password","x-pack-test-password")); + Map cred = new LinkedHashMap<>(); + cred.put("useradd", userSpec.getOrDefault("username", "test_user")); + cred.put("-p", userSpec.getOrDefault("password", "x-pack-test-password")); cred.put("-r", userSpec.getOrDefault("role", "superuser")); credentials.add(cred); } @@ -455,7 +497,7 @@ private void runElaticsearchBinScriptWithInput(String input, String tool, String "Is this the distribution you expect it to be ?"); } try (InputStream byteArrayInputStream = new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8))) { - services.loggedExec(spec -> { + LoggedExec.exec(project, spec -> { spec.setEnvironment(getESEnvironment()); spec.workingDir(workingDir); spec.executable( @@ -496,19 +538,14 @@ private Map getESEnvironment() { defaultEnv.put("ES_PATH_CONF", configFile.getParent().toString()); String systemPropertiesString = ""; if (systemProperties.isEmpty() == false) { - checkSuppliers("Java System property", systemProperties.values()); systemPropertiesString = " " + systemProperties.entrySet().stream() - .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue().get()) + .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue()) .collect(Collectors.joining(" ")); } String jvmArgsString = ""; if (jvmArgs.isEmpty() == false) { jvmArgsString = " " + jvmArgs.stream() - .map(Supplier::get) - .peek(charSequences -> requireNonNull(charSequences, "Jvm argument supplier returned null while configuring " + this)) - .flatMap(Collection::stream) .peek(argument -> { - requireNonNull(argument, "Jvm argument supplier returned null while configuring " + this); if (argument.toString().startsWith("-D")) { throw new TestClustersException("Invalid jvm argument `" + argument + "` configure as systemProperty instead for " + this @@ -532,8 +569,7 @@ private Map getESEnvironment() { ); } - checkSuppliers("Environment variable", environment.values()); - environment.forEach((key, value) -> defaultEnv.put(key, value.get().toString())); + environment.forEach((key, value) -> defaultEnv.put(key, value.toString())); return defaultEnv; } @@ -657,7 +693,7 @@ private void logProcessInfo(String prefix, ProcessHandle.Info info) { private void logFileContents(String description, Path from) { LOGGER.error("{} `{}`", description, this); - try(Stream lines = Files.lines(from, StandardCharsets.UTF_8)) { + try (Stream lines = Files.lines(from, StandardCharsets.UTF_8)) { lines .map(line -> " " + line) .forEach(LOGGER::error); @@ -693,12 +729,12 @@ private void createWorkingDir(Path distroExtractDir) throws IOException { * We remove write permissions to make sure files are note mistakenly edited ( e.x. the config file ) and changes * reflected across all copies. Permissions are retained to be able to replace the links. * - * @param sourceRoot where to copy from + * @param sourceRoot where to copy from * @param destinationRoot destination to link to */ private void syncWithLinks(Path sourceRoot, Path destinationRoot) { if (Files.exists(destinationRoot)) { - services.delete(destinationRoot); + project.delete(destinationRoot); } try (Stream stream = Files.walk(sourceRoot)) { @@ -731,7 +767,7 @@ private void syncWithLinks(Path sourceRoot, Path destinationRoot) { } } - private void createConfiguration() { + private void createConfiguration() { String nodeName = nameCustomization.apply(safeName(name)); if (nodeName != null) { defaultConfig.put("node.name", nodeName); @@ -760,24 +796,21 @@ private void createConfiguration() { // over and the REST client will not retry on circuit breaking exceptions yet (see #31986 for details). Once the REST client // can retry on circuit breaking exceptions, we can revert again to the default configuration. if (Version.fromString(version).getMajor() >= 7) { - defaultConfig.put("indices.breaker.total.use_real_memory", "false"); + defaultConfig.put("indices.breaker.total.use_real_memory", "false"); } // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master - defaultConfig.put("discovery.initial_state_timeout", "0s"); + defaultConfig.put("discovery.initial_state_timeout", "0s"); - checkSuppliers("Settings", settings.values()); - Map userConfig = settings.entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue().get().toString())); HashSet overriden = new HashSet<>(defaultConfig.keySet()); - overriden.retainAll(userConfig.keySet()); + overriden.retainAll(settings.keySet()); overriden.removeAll(OVERRIDABLE_SETTINGS); - if (overriden.isEmpty() ==false) { + if (overriden.isEmpty() == false) { throw new IllegalArgumentException( "Testclusters does not allow the following settings to be changed:" + overriden + " for " + this ); } // Make sure no duplicate config keys - userConfig.keySet().stream() + settings.keySet().stream() .filter(OVERRIDABLE_SETTINGS::contains) .forEach(defaultConfig::remove); @@ -788,7 +821,7 @@ private void createConfiguration() { Files.write( configFile, Stream.concat( - userConfig.entrySet().stream(), + settings.entrySet().stream(), defaultConfig.entrySet().stream() ) .map(entry -> entry.getKey() + ": " + entry.getValue()) @@ -803,7 +836,7 @@ private void createConfiguration() { private void checkFrozen() { if (configurationFrozen.get()) { - throw new IllegalStateException("Configuration for " + this + " can not be altered, already locked"); + throw new IllegalStateException("Configuration for " + this + " can not be altered, already locked"); } } @@ -828,11 +861,91 @@ private List getHttpPortInternal() { } private List readPortsFile(Path file) throws IOException { - try(Stream lines = Files.lines(file, StandardCharsets.UTF_8)) { + try (Stream lines = Files.lines(file, StandardCharsets.UTF_8)) { return lines.map(String::trim).collect(Collectors.toList()); } } + private Path getExtractedDistributionDir() { + return artifactsExtractDir.resolve(distribution.getGroup()).resolve("elasticsearch-" + getVersion()); + } + + private List getInstalledFileSet(Action filter) { + return Stream.concat( + plugins.stream().filter(uri -> uri.getScheme().equalsIgnoreCase("file")).map(File::new), + modules.stream() + ) + .filter(File::exists) + // TODO: We may be able to simplify this with Gradle 5.6 + // https://docs.gradle.org/nightly/release-notes.html#improved-handling-of-zip-archives-on-classpaths + .map(zipFile -> project.zipTree(zipFile).matching(filter)) + .flatMap(tree -> tree.getFiles().stream()) + .collect(Collectors.toList()); + } + + @Input + private Set getRemotePlugins() { + Set file = plugins.stream().filter(uri -> uri.getScheme().equalsIgnoreCase("file") == false).collect(Collectors.toSet()); + return file; + } + + @Classpath + private List getInstalledClasspath() { + return getInstalledFileSet(filter -> filter.include("**/*.jar")); + } + + @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) + private List getInstalledFiles() { + return getInstalledFileSet(filter -> filter.exclude("**/*.jar")); + } + + @Classpath + private Set getDistributionClasspath() { + return project.fileTree(getExtractedDistributionDir()).matching(filter -> filter.include("**/*.jar")).getFiles(); + } + + @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) + private FileCollection getDistributionFiles() { + return project.fileTree(getExtractedDistributionDir()).minus(project.files(getDistributionClasspath())); + } + + @Nested + private Map getKeystoreSettings() { + return keystoreSettings; + } + + @Nested + private Map getKeystoreFiles() { + return keystoreFiles; + } + + @Nested + private Map getSettings() { + return settings; + } + + @Nested + private Map getSystemProperties() { + return systemProperties; + } + + @Nested + private Map getEnvironment() { + return environment; + } + + @Nested + private List getJvmArgs() { + return jvmArgs; + } + + @Nested + private Map getExtraConfigFiles() { + return extraConfigFiles; + } + @Override public boolean isProcessAlive() { requireNonNull( @@ -843,7 +956,23 @@ public boolean isProcessAlive() { } void waitForAllConditions() { - waitForConditions(waitConditions, System.currentTimeMillis(), NODE_UP_TIMEOUT, NODE_UP_TIMEOUT_UNIT, this); + waitForConditions( + waitConditions, + System.currentTimeMillis(), + NODE_UP_TIMEOUT_UNIT.toMillis(NODE_UP_TIMEOUT) + + // Installing plugins at config time and loading them when nods start requires additional time we need to + // account for + ADDITIONAL_CONFIG_TIMEOUT_UNIT.toMillis(ADDITIONAL_CONFIG_TIMEOUT * + ( + plugins.size() + + keystoreFiles.size() + + keystoreSettings.size() + + credentials.size() + ) + ), + TimeUnit.MILLISECONDS, + this + ); } @Override @@ -865,6 +994,7 @@ public String toString() { return "node{" + path + ":" + name + "}"; } + @Input List> getCredentials() { return credentials; } @@ -884,7 +1014,7 @@ private boolean checkPortsFilesExistWithDelay(TestClusterConfiguration node) { public boolean isHttpSslEnabled() { return Boolean.valueOf( - settings.getOrDefault("xpack.security.http.ssl.enabled", () -> "false").get().toString() + settings.getOrDefault("xpack.security.http.ssl.enabled", "false").toString() ); } @@ -892,28 +1022,50 @@ void configureHttpWait(WaitForHttpResource wait) { if (settings.containsKey("xpack.security.http.ssl.certificate_authorities")) { wait.setCertificateAuthorities( getConfigDir() - .resolve(settings.get("xpack.security.http.ssl.certificate_authorities").get().toString()) + .resolve(settings.get("xpack.security.http.ssl.certificate_authorities").toString()) .toFile() ); } if (settings.containsKey("xpack.security.http.ssl.certificate")) { wait.setCertificateAuthorities( getConfigDir() - .resolve(settings.get("xpack.security.http.ssl.certificate").get().toString()) + .resolve(settings.get("xpack.security.http.ssl.certificate").toString()) .toFile() ); } if (settings.containsKey("xpack.security.http.ssl.keystore.path")) { wait.setTrustStoreFile( getConfigDir() - .resolve(settings.get("xpack.security.http.ssl.keystore.path").get().toString()) + .resolve(settings.get("xpack.security.http.ssl.keystore.path").toString()) .toFile() ); } if (keystoreSettings.containsKey("xpack.security.http.ssl.keystore.secure_password")) { wait.setTrustStorePassword( - keystoreSettings.get("xpack.security.http.ssl.keystore.secure_password").get().toString() + keystoreSettings.get("xpack.security.http.ssl.keystore.secure_password").toString() ); } } + + private static class FileEntry implements Named { + private String name; + private File file; + + FileEntry(String name, File file) { + this.name = name; + this.file = file; + } + + @Input + @Override + public String getName() { + return name; + } + + @InputFile + @PathSensitive(PathSensitivity.NONE) + public File getFile() { + return file; + } + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java new file mode 100644 index 0000000000000..0381cece108e2 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java @@ -0,0 +1,59 @@ +package org.elasticsearch.gradle.testclusters; + +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +/** + * Keep an inventory of all running Clusters and stop them when interrupted + * + * This takes advantage of the fact that Gradle interrupts all the threads in the daemon when the build completes. + */ +public class TestClusterCleanupOnShutdown implements Runnable { + + private final Logger logger = Logging.getLogger(TestClusterCleanupOnShutdown.class); + + private Set clustersToWatch = new HashSet<>(); + + public void watch(Collection cluster) { + synchronized (clustersToWatch) { + clustersToWatch.addAll(clustersToWatch); + } + } + + public void unWatch(Collection cluster) { + synchronized (clustersToWatch) { + clustersToWatch.removeAll(clustersToWatch); + } + } + + @Override + public void run() { + try { + while (true) { + Thread.sleep(Long.MAX_VALUE); + } + } catch (InterruptedException interrupted) { + synchronized (clustersToWatch) { + if (clustersToWatch.isEmpty()) { + return; + } + logger.info("Cleanup thread was interrupted, shutting down all clusters"); + Iterator iterator = clustersToWatch.iterator(); + while (iterator.hasNext()) { + ElasticsearchCluster cluster = iterator.next(); + iterator.remove(); + try { + cluster.stop(false); + } catch (Exception e) { + logger.warn("Could not shut down {}", cluster, e); + } + } + } + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index cae3b083af2e5..f290b4aa91b8f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.Distribution; import org.elasticsearch.gradle.FileSupplier; +import org.elasticsearch.gradle.PropertyNormalization; import org.gradle.api.logging.Logging; import org.slf4j.Logger; @@ -52,23 +53,31 @@ public interface TestClusterConfiguration { void keystore(String key, File value); + void keystore(String key, File value, PropertyNormalization normalization); + void keystore(String key, FileSupplier valueSupplier); void setting(String key, String value); + void setting(String key, String value, PropertyNormalization normalization); + void setting(String key, Supplier valueSupplier); + void setting(String key, Supplier valueSupplier, PropertyNormalization normalization); + void systemProperty(String key, String value); void systemProperty(String key, Supplier valueSupplier); + void systemProperty(String key, Supplier valueSupplier, PropertyNormalization normalization); + void environment(String key, String value); void environment(String key, Supplier valueSupplier); - void jvmArgs(String... values); + void environment(String key, Supplier valueSupplier, PropertyNormalization normalization); - void jvmArgs(Supplier valueSupplier); + void jvmArgs(String... values); void freeze(); @@ -80,6 +89,8 @@ public interface TestClusterConfiguration { void extraConfigFile(String destination, File from); + void extraConfigFile(String destination, File from, PropertyNormalization normalization); + void user(Map userSpec); String getHttpSocketURI(); @@ -158,7 +169,5 @@ default String safeName(String name) { .replaceAll("[^a-zA-Z0-9]+", "-"); } - - boolean isProcessAlive(); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java new file mode 100644 index 0000000000000..14bdfa952db0f --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java @@ -0,0 +1,74 @@ +package org.elasticsearch.gradle.testclusters; + +import org.gradle.api.Project; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +/** + * This extensions was meant to be used internally by testclusters + * + * It holds synchronization primitives needed to implement the rate limiting. + * This is tricky because we can't use Gradle workers as there's no way to make sure that tests and their clusters are + * allocated atomically, so we could be in a situation where all workers are tests waiting for clusters to start up. + * + * Also auto configures cleanup of executors to make sure we don't leak threads in the daemon. + */ +public class TestClustersCleanupExtension { + + private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1; + private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES; + + private static final Logger logger = Logging.getLogger(TestClustersCleanupExtension.class); + + private final ExecutorService executorService; + private final TestClusterCleanupOnShutdown cleanupThread; + + public TestClustersCleanupExtension() { + executorService = Executors.newSingleThreadExecutor(); + cleanupThread = new TestClusterCleanupOnShutdown(); + executorService.submit(cleanupThread); + } + + + public static void createExtension(Project project) { + if (project.getRootProject().getExtensions().findByType(TestClustersCleanupExtension.class) != null) { + return; + } + // Configure the extension on the root project so we have a single instance per run + TestClustersCleanupExtension ext = project.getRootProject().getExtensions().create( + "__testclusters_rate_limit", + TestClustersCleanupExtension.class + ); + Thread shutdownHook = new Thread(ext.cleanupThread::run); + Runtime.getRuntime().addShutdownHook(shutdownHook); + project.getGradle().buildFinished(buildResult -> { + ext.executorService.shutdownNow(); + try { + if (ext.executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) { + throw new IllegalStateException( + "Failed to shut down executor service after " + + EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT + ); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { + if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) { + logger.warn("Trying to deregister shutdown hook when it was not registered."); + } + } catch (IllegalStateException ese) { + // Thrown when shutdown is in progress + logger.warn("Can't remove shutdown hook", ese); + } + }); + } + + public TestClusterCleanupOnShutdown getCleanupThread() { + return cleanupThread; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 3f2a7b4dcc744..3c50108d9a179 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -21,6 +21,7 @@ import groovy.lang.Closure; import org.elasticsearch.gradle.BwcVersions; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.test.RestTestRunnerTask; import org.elasticsearch.gradle.tool.Boilerplate; import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; @@ -43,13 +44,9 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; public class TestClustersPlugin implements Plugin { @@ -58,18 +55,14 @@ public class TestClustersPlugin implements Plugin { public static final String EXTENSION_NAME = "testClusters"; private static final String HELPER_CONFIGURATION_PREFIX = "testclusters"; private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts"; - private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1; - private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure"; private final Map> usedClusters = new HashMap<>(); private final Map claimsInventory = new HashMap<>(); - private final Set runningClusters =new HashSet<>(); - private final Thread shutdownHook = new Thread(this::shutDownAllClusters); + private final Set runningClusters = new HashSet<>(); private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false")); - private ExecutorService executorService = Executors.newSingleThreadExecutor(); public static String getHelperConfigurationName(String version) { return HELPER_CONFIGURATION_PREFIX + "-" + version; @@ -82,6 +75,8 @@ public void apply(Project project) { // enable the DSL to describe clusters NamedDomainObjectContainer container = createTestClustersContainerExtension(project); + TestClustersCleanupExtension.createExtension(project); + // provide a task to be able to list defined clusters. createListClustersTask(project, container); @@ -100,9 +95,6 @@ public void apply(Project project) { // After each task we determine if there are clusters that are no longer needed. configureStopClustersHook(project); - // configure hooks to make sure no test cluster processes survive the build - configureCleanupHooks(project); - // Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the // configuration so the user doesn't have to repeat this. autoConfigureClusterDependencies(project, rootProject, container); @@ -164,6 +156,9 @@ public void doCall(ElasticsearchCluster cluster) { ((Task) thisObject).dependsOn( project.getRootProject().getTasks().getByName(SYNC_ARTIFACTS_TASK_NAME) ); + if (thisObject instanceof RestTestRunnerTask) { + ((RestTestRunnerTask) thisObject).testCluster(cluster); + } } }) ); @@ -196,8 +191,19 @@ private void configureStartClustersHook(Project project) { @Override public void beforeActions(Task task) { // we only start the cluster before the actions, so we'll not start it if the task is up-to-date - usedClusters.getOrDefault(task, Collections.emptyList()).stream() + List neededButNotRunning = usedClusters.getOrDefault( + task, + Collections.emptyList() + ) + .stream() .filter(cluster -> runningClusters.contains(cluster) == false) + .collect(Collectors.toList()); + + project.getRootProject().getExtensions() + .getByType(TestClustersCleanupExtension.class) + .getCleanupThread() + .watch(neededButNotRunning); + neededButNotRunning .forEach(elasticsearchCluster -> { elasticsearchCluster.start(); runningClusters.add(elasticsearchCluster); @@ -220,22 +226,36 @@ public void afterExecute(Task task, TaskState state) { task, Collections.emptyList() ); + if (clustersUsedByTask.isEmpty()) { + return; + } + logger.info("Clusters were used, stopping and releasing permits"); + final int permitsToRelease; if (state.getFailure() != null) { // If the task fails, and other tasks use this cluster, the other task will likely never be - // executed at all, so we will never get to un-claim and terminate it. + // executed at all, so we will never be called again to un-claim and terminate it. clustersUsedByTask.forEach(cluster -> stopCluster(cluster, true)); + permitsToRelease = clustersUsedByTask.stream() + .map(cluster -> cluster.getNumberOfNodes()) + .reduce(Integer::sum).get(); } else { clustersUsedByTask.forEach( cluster -> claimsInventory.put(cluster, claimsInventory.getOrDefault(cluster, 0) - 1) ); - claimsInventory.entrySet().stream() + List stoppingClusers = claimsInventory.entrySet().stream() .filter(entry -> entry.getValue() == 0) .filter(entry -> runningClusters.contains(entry.getKey())) .map(Map.Entry::getKey) - .forEach(cluster -> { - stopCluster(cluster, false); - runningClusters.remove(cluster); - }); + .collect(Collectors.toList()); + stoppingClusers.forEach(cluster -> { + stopCluster(cluster, false); + runningClusters.remove(cluster); + }); + + project.getRootProject().getExtensions() + .getByType(TestClustersCleanupExtension.class) + .getCleanupThread() + .unWatch(stoppingClusers); } } @Override @@ -406,62 +426,4 @@ public void execute(Task task) { }))); } - private void configureCleanupHooks(Project project) { - // When the Gradle daemon is used, it will interrupt all threads when the build concludes. - // This is our signal to clean up - executorService.submit(() -> { - while (true) { - try { - Thread.sleep(Long.MAX_VALUE); - } catch (InterruptedException interrupted) { - shutDownAllClusters(); - Thread.currentThread().interrupt(); - return; - } - } - }); - - // When the Daemon is not used, or runs into issues, rely on a shutdown hook - // When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptible - // thread in the build) process will be stopped eventually when the daemon dies. - Runtime.getRuntime().addShutdownHook(shutdownHook); - - // When we don't run into anything out of the ordinary, and the build completes, makes sure to clean up - project.getGradle().buildFinished(buildResult -> { - shutdownExecutorService(); - if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) { - logger.info("Trying to deregister shutdown hook when it was not registered."); - } - }); - } - - private void shutdownExecutorService() { - executorService.shutdownNow(); - try { - if (executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) { - throw new IllegalStateException( - "Failed to shut down executor service after " + - EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT - ); - } - } catch (InterruptedException e) { - logger.info("Wait for testclusters shutdown interrupted", e); - Thread.currentThread().interrupt(); - } - } - - private void shutDownAllClusters() { - synchronized (runningClusters) { - if (runningClusters.isEmpty()) { - return; - } - Iterator iterator = runningClusters.iterator(); - while (iterator.hasNext()) { - ElasticsearchCluster next = iterator.next(); - iterator.remove(); - next.stop(false); - } - } - } - } diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 04edabda285a6..e5e7441d3e938 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -5.4.1 \ No newline at end of file +5.5 \ No newline at end of file diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java index 2bfd3b0cc8ed4..e6412099fee72 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkAction.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.plugin.noop.action.bulk; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.bulk.BulkResponse; -public class NoopBulkAction extends Action { +public class NoopBulkAction extends StreamableResponseActionType { public static final String NAME = "mock:data/write/bulk"; public static final NoopBulkAction INSTANCE = new NoopBulkAction(); diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java index 9b390e1ffddbc..fb83bda148b11 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchAction.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.plugin.noop.action.search; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.io.stream.Writeable; -public class NoopSearchAction extends Action { +public class NoopSearchAction extends ActionType { public static final NoopSearchAction INSTANCE = new NoopSearchAction(); public static final String NAME = "mock:data/read/search"; @@ -30,11 +30,6 @@ private NoopSearchAction() { super(NAME); } - @Override - public SearchResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return SearchResponse::new; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java index 00d2651a1aeb8..18dfc2305575d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java @@ -37,6 +37,7 @@ import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.RequestConverters.createEntity; +import static org.elasticsearch.client.dataframe.GetDataFrameTransformRequest.ALLOW_NO_MATCH; final class DataFrameRequestConverters { @@ -64,6 +65,9 @@ static Request getDataFrameTransform(GetDataFrameTransformRequest getRequest) { if (getRequest.getPageParams() != null && getRequest.getPageParams().getSize() != null) { request.addParameter(PageParams.SIZE.getPreferredName(), getRequest.getPageParams().getSize().toString()); } + if (getRequest.getAllowNoMatch() != null) { + request.addParameter(ALLOW_NO_MATCH, getRequest.getAllowNoMatch().toString()); + } return request; } @@ -91,21 +95,24 @@ static Request startDataFrameTransform(StartDataFrameTransformRequest startReque } static Request stopDataFrameTransform(StopDataFrameTransformRequest stopRequest) { - String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") - .addPathPart(stopRequest.getId()) - .addPathPartAsIs("_stop") - .build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - RequestConverters.Params params = new RequestConverters.Params(); - if (stopRequest.getWaitForCompletion() != null) { - params.withWaitForCompletion(stopRequest.getWaitForCompletion()); - } - if (stopRequest.getTimeout() != null) { - params.withTimeout(stopRequest.getTimeout()); - } - request.addParameters(params.asMap()); - return request; + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_data_frame", "transforms") + .addPathPart(stopRequest.getId()) + .addPathPartAsIs("_stop") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(); + if (stopRequest.getWaitForCompletion() != null) { + params.withWaitForCompletion(stopRequest.getWaitForCompletion()); + } + if (stopRequest.getTimeout() != null) { + params.withTimeout(stopRequest.getTimeout()); + } + if (stopRequest.getAllowNoMatch() != null) { + request.addParameter(ALLOW_NO_MATCH, stopRequest.getAllowNoMatch().toString()); + } + request.addParameters(params.asMap()); + return request; } static Request previewDataFrameTransform(PreviewDataFrameTransformRequest previewRequest) throws IOException { @@ -130,6 +137,9 @@ static Request getDataFrameTransformStats(GetDataFrameTransformStatsRequest stat if (statsRequest.getPageParams() != null && statsRequest.getPageParams().getSize() != null) { request.addParameter(PageParams.SIZE.getPreferredName(), statsRequest.getPageParams().getSize().toString()); } + if (statsRequest.getAllowNoMatch() != null) { + request.addParameter(ALLOW_NO_MATCH, statsRequest.getAllowNoMatch().toString()); + } return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index a5a57e4d6b8fa..584bdad745026 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -61,6 +61,8 @@ import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.client.indices.rollover.RolloverResponse; @@ -1328,4 +1330,28 @@ public void deleteTemplateAsync(DeleteIndexTemplateRequest request, RequestOptio restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::deleteTemplate, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } + + /** + * Synchronously calls the _reload_search_analyzers API + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public ReloadAnalyzersResponse reloadAnalyzers(ReloadAnalyzersRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options, + ReloadAnalyzersResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously calls the _reload_search_analyzers API + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void reloadAnalyzersAsync(ReloadAnalyzersRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options, + ReloadAnalyzersResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 1e7e810a91b8f..7de53510b5080 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -50,6 +50,7 @@ import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.common.Strings; @@ -644,4 +645,13 @@ static Request deleteTemplate(DeleteIndexTemplateRequest deleteIndexTemplateRequ request.addParameters(params.asMap()); return request; } + + static Request reloadAnalyzers(ReloadAnalyzersRequest reloadAnalyzersRequest) { + String endpoint = RequestConverters.endpoint(reloadAnalyzersRequest.getIndices(), "_reload_search_analyzers"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(); + parameters.withIndicesOptions(reloadAnalyzersRequest.indicesOptions()); + request.addParameters(parameters.asMap()); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index e5a98b4632432..651851e345df9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -676,6 +676,9 @@ static Request stopDataFrameAnalytics(StopDataFrameAnalyticsRequest stopRequest) params.putParam( StopDataFrameAnalyticsRequest.ALLOW_NO_MATCH.getPreferredName(), Boolean.toString(stopRequest.getAllowNoMatch())); } + if (stopRequest.getForce() != null) { + params.putParam(StopDataFrameAnalyticsRequest.FORCE.getPreferredName(), Boolean.toString(stopRequest.getForce())); + } request.addParameters(params.asMap()); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index fefb5771dc801..8c29cfaae54e1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -43,6 +43,8 @@ import org.elasticsearch.client.security.EnableUserRequest; import org.elasticsearch.client.security.GetApiKeyRequest; import org.elasticsearch.client.security.GetApiKeyResponse; +import org.elasticsearch.client.security.GetBuiltinPrivilegesRequest; +import org.elasticsearch.client.security.GetBuiltinPrivilegesResponse; import org.elasticsearch.client.security.GetPrivilegesRequest; import org.elasticsearch.client.security.GetPrivilegesResponse; import org.elasticsearch.client.security.GetRoleMappingsRequest; @@ -751,6 +753,34 @@ public void invalidateTokenAsync(InvalidateTokenRequest request, RequestOptions InvalidateTokenResponse::fromXContent, listener, emptySet()); } + /** + * Synchronously get builtin (cluster & index) privilege(s). + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use + * {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the get builtin privileges call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetBuiltinPrivilegesResponse getBuiltinPrivileges(final RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE, + GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get builtin (cluster & index) privilege(s). + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getBuiltinPrivilegesAsync(final RequestOptions options, final ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE, + GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, listener, emptySet()); + } + /** * Synchronously get application privilege(s). * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java index a20dfd1ba328a..69cdd329e3984 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java @@ -35,8 +35,8 @@ * default distribution of Elasticsearch. All of these APIs will 404 if run * against the OSS distribution of Elasticsearch. *

- * See the - * X-Pack APIs on elastic.co for more information. + * See the + * REST APIs on elastic.co for more information. */ public final class XPackClient { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java index 3665ba5bf5009..35ce0f55d717b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java @@ -45,7 +45,7 @@ public Shards shards() { return shards; } - BroadcastResponse(final Shards shards) { + protected BroadcastResponse(final Shards shards) { this.shards = Objects.requireNonNull(shards); } @@ -56,7 +56,7 @@ public Shards shards() { a -> new BroadcastResponse((Shards) a[0])); static { - PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD); + declareShardsField(PARSER); } /** @@ -70,6 +70,10 @@ public static BroadcastResponse fromXContent(final XContentParser parser) throws return PARSER.parse(parser, null); } + protected static void declareShardsField(ConstructingObjectParser PARSER) { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD); + } + /** * Represents the results of a collection of shards on which a request was executed against. */ diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java index c50f37a27c885..cc69e0bd4cd4e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java @@ -30,6 +30,7 @@ public class GetDataFrameTransformRequest implements Validatable { + public static final String ALLOW_NO_MATCH = "allow_no_match"; /** * Helper method to create a request that will get ALL Data Frame Transforms * @return new {@link GetDataFrameTransformRequest} object for the id "_all" @@ -40,6 +41,7 @@ public static GetDataFrameTransformRequest getAllDataFrameTransformsRequest() { private final List ids; private PageParams pageParams; + private Boolean allowNoMatch; public GetDataFrameTransformRequest(String... ids) { this.ids = Arrays.asList(ids); @@ -57,6 +59,14 @@ public void setPageParams(PageParams pageParams) { this.pageParams = pageParams; } + public Boolean getAllowNoMatch() { + return allowNoMatch; + } + + public void setAllowNoMatch(Boolean allowNoMatch) { + this.allowNoMatch = allowNoMatch; + } + @Override public Optional validate() { if (ids == null || ids.isEmpty()) { @@ -70,7 +80,7 @@ public Optional validate() { @Override public int hashCode() { - return Objects.hash(ids, pageParams); + return Objects.hash(ids, pageParams, allowNoMatch); } @Override @@ -83,6 +93,8 @@ public boolean equals(Object obj) { return false; } GetDataFrameTransformRequest other = (GetDataFrameTransformRequest) obj; - return Objects.equals(ids, other.ids) && Objects.equals(pageParams, other.pageParams); + return Objects.equals(ids, other.ids) + && Objects.equals(pageParams, other.pageParams) + && Objects.equals(allowNoMatch, other.allowNoMatch); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java index 4a105f7b40c7e..7522ae0d67c26 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java @@ -29,6 +29,7 @@ public class GetDataFrameTransformStatsRequest implements Validatable { private final String id; private PageParams pageParams; + private Boolean allowNoMatch; public GetDataFrameTransformStatsRequest(String id) { this.id = id; @@ -46,6 +47,14 @@ public void setPageParams(PageParams pageParams) { this.pageParams = pageParams; } + public Boolean getAllowNoMatch() { + return allowNoMatch; + } + + public void setAllowNoMatch(Boolean allowNoMatch) { + this.allowNoMatch = allowNoMatch; + } + @Override public Optional validate() { if (id == null) { @@ -59,7 +68,7 @@ public Optional validate() { @Override public int hashCode() { - return Objects.hash(id, pageParams); + return Objects.hash(id, pageParams, allowNoMatch); } @Override @@ -72,6 +81,8 @@ public boolean equals(Object obj) { return false; } GetDataFrameTransformStatsRequest other = (GetDataFrameTransformStatsRequest) obj; - return Objects.equals(id, other.id) && Objects.equals(pageParams, other.pageParams); + return Objects.equals(id, other.id) + && Objects.equals(pageParams, other.pageParams) + && Objects.equals(allowNoMatch, other.allowNoMatch); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java index 5d5f67dd65ed2..40e87b5768b5d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java @@ -29,23 +29,32 @@ public class PreviewDataFrameTransformResponse { private static final String PREVIEW = "preview"; + private static final String MAPPINGS = "mappings"; @SuppressWarnings("unchecked") public static PreviewDataFrameTransformResponse fromXContent(final XContentParser parser) throws IOException { - Object previewDocs = parser.map().get(PREVIEW); - return new PreviewDataFrameTransformResponse((List>) previewDocs); + Map previewMap = parser.mapOrdered(); + Object previewDocs = previewMap.get(PREVIEW); + Object mappings = previewMap.get(MAPPINGS); + return new PreviewDataFrameTransformResponse((List>) previewDocs, (Map) mappings); } private List> docs; + private Map mappings; - public PreviewDataFrameTransformResponse(List> docs) { + public PreviewDataFrameTransformResponse(List> docs, Map mappings) { this.docs = docs; + this.mappings = mappings; } public List> getDocs() { return docs; } + public Map getMappings() { + return mappings; + } + @Override public boolean equals(Object obj) { if (obj == this) { @@ -57,12 +66,12 @@ public boolean equals(Object obj) { } PreviewDataFrameTransformResponse other = (PreviewDataFrameTransformResponse) obj; - return Objects.equals(other.docs, docs); + return Objects.equals(other.docs, docs) && Objects.equals(other.mappings, mappings); } @Override public int hashCode() { - return Objects.hashCode(docs); + return Objects.hash(docs, mappings); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequest.java index 0bc690ad79076..4fb6164f2cca9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequest.java @@ -31,6 +31,7 @@ public class StopDataFrameTransformRequest implements Validatable { private final String id; private Boolean waitForCompletion; private TimeValue timeout; + private Boolean allowNoMatch; public StopDataFrameTransformRequest(String id) { this.id = id; @@ -64,6 +65,14 @@ public TimeValue getTimeout() { return timeout; } + public Boolean getAllowNoMatch() { + return allowNoMatch; + } + + public void setAllowNoMatch(Boolean allowNoMatch) { + this.allowNoMatch = allowNoMatch; + } + @Override public Optional validate() { if (id == null) { @@ -77,7 +86,7 @@ public Optional validate() { @Override public int hashCode() { - return Objects.hash(id, waitForCompletion, timeout); + return Objects.hash(id, waitForCompletion, timeout, allowNoMatch); } @Override @@ -92,7 +101,8 @@ public boolean equals(Object obj) { StopDataFrameTransformRequest other = (StopDataFrameTransformRequest) obj; return Objects.equals(this.id, other.id) && Objects.equals(this.waitForCompletion, other.waitForCompletion) - && Objects.equals(this.timeout, other.timeout); + && Objects.equals(this.timeout, other.timeout) + && Objects.equals(this.allowNoMatch, other.allowNoMatch); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java index 352cbfb67fcde..186c67bf42ce2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java @@ -43,6 +43,7 @@ public class DataFrameTransformState { private static final ParseField CHECKPOINT = new ParseField("checkpoint"); private static final ParseField REASON = new ParseField("reason"); private static final ParseField PROGRESS = new ParseField("progress"); + private static final ParseField NODE = new ParseField("node"); @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = @@ -52,7 +53,8 @@ public class DataFrameTransformState { (Map) args[2], (long) args[3], (String) args[4], - (DataFrameTransformProgress) args[5])); + (DataFrameTransformProgress) args[5], + (NodeAttributes) args[6])); static { PARSER.declareField(constructorArg(), p -> DataFrameTransformTaskState.fromString(p.text()), TASK_STATE, ValueType.STRING); @@ -61,6 +63,7 @@ public class DataFrameTransformState { PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REASON); PARSER.declareField(optionalConstructorArg(), DataFrameTransformProgress::fromXContent, PROGRESS, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), NodeAttributes.PARSER::apply, NODE, ValueType.OBJECT); } public static DataFrameTransformState fromXContent(XContentParser parser) throws IOException { @@ -73,19 +76,22 @@ public static DataFrameTransformState fromXContent(XContentParser parser) throws private final Map currentPosition; private final String reason; private final DataFrameTransformProgress progress; + private final NodeAttributes node; public DataFrameTransformState(DataFrameTransformTaskState taskState, IndexerState indexerState, @Nullable Map position, long checkpoint, @Nullable String reason, - @Nullable DataFrameTransformProgress progress) { + @Nullable DataFrameTransformProgress progress, + @Nullable NodeAttributes node) { this.taskState = taskState; this.indexerState = indexerState; this.currentPosition = position == null ? null : Collections.unmodifiableMap(new LinkedHashMap<>(position)); this.checkpoint = checkpoint; this.reason = reason; this.progress = progress; + this.node = node; } public IndexerState getIndexerState() { @@ -115,6 +121,11 @@ public DataFrameTransformProgress getProgress() { return progress; } + @Nullable + public NodeAttributes getNode() { + return node; + } + @Override public boolean equals(Object other) { if (this == other) { @@ -132,12 +143,13 @@ public boolean equals(Object other) { Objects.equals(this.currentPosition, that.currentPosition) && Objects.equals(this.progress, that.progress) && this.checkpoint == that.checkpoint && + Objects.equals(this.node, that.node) && Objects.equals(this.reason, that.reason); } @Override public int hashCode() { - return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress); + return Objects.hash(taskState, indexerState, currentPosition, checkpoint, reason, progress, node); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java new file mode 100644 index 0000000000000..85c2b9644c2fd --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * A Pojo class containing an Elastic Node's attributes + */ +public class NodeAttributes implements ToXContentObject { + + public static final ParseField ID = new ParseField("id"); + public static final ParseField NAME = new ParseField("name"); + public static final ParseField EPHEMERAL_ID = new ParseField("ephemeral_id"); + public static final ParseField TRANSPORT_ADDRESS = new ParseField("transport_address"); + public static final ParseField ATTRIBUTES = new ParseField("attributes"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("node", true, + (a) -> { + int i = 0; + String id = (String) a[i++]; + String name = (String) a[i++]; + String ephemeralId = (String) a[i++]; + String transportAddress = (String) a[i++]; + Map attributes = (Map) a[i]; + return new NodeAttributes(id, name, ephemeralId, transportAddress, attributes); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), EPHEMERAL_ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_ADDRESS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> p.mapStrings(), + ATTRIBUTES, + ObjectParser.ValueType.OBJECT); + } + + private final String id; + private final String name; + private final String ephemeralId; + private final String transportAddress; + private final Map attributes; + + public NodeAttributes(String id, String name, String ephemeralId, String transportAddress, Map attributes) { + this.id = id; + this.name = name; + this.ephemeralId = ephemeralId; + this.transportAddress = transportAddress; + this.attributes = Collections.unmodifiableMap(attributes); + } + + /** + * The unique identifier of the node. + */ + public String getId() { + return id; + } + + /** + * The node name. + */ + public String getName() { + return name; + } + + /** + * The ephemeral id of the node. + */ + public String getEphemeralId() { + return ephemeralId; + } + + /** + * The host and port where transport HTTP connections are accepted. + */ + public String getTransportAddress() { + return transportAddress; + } + + /** + * Additional attributes related to this node + */ + public Map getAttributes() { + return attributes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(NAME.getPreferredName(), name); + builder.field(EPHEMERAL_ID.getPreferredName(), ephemeralId); + builder.field(TRANSPORT_ADDRESS.getPreferredName(), transportAddress); + builder.field(ATTRIBUTES.getPreferredName(), attributes); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ephemeralId, transportAddress, attributes); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + NodeAttributes that = (NodeAttributes) other; + return Objects.equals(id, that.id) && + Objects.equals(name, that.name) && + Objects.equals(ephemeralId, that.ephemeralId) && + Objects.equals(transportAddress, that.transportAddress) && + Objects.equals(attributes, that.attributes); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java index d880bfd82140b..c8fb885896d9c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java @@ -45,7 +45,6 @@ public class DateHistogramGroupSource extends SingleGroupSource implements ToXContentObject { private static final ParseField TIME_ZONE = new ParseField("time_zone"); - private static final ParseField FORMAT = new ParseField("format"); // From DateHistogramAggregationBuilder in core, transplanted and modified to a set // so we don't need to import a dependency on the class @@ -195,8 +194,7 @@ public int hashCode() { } ZoneId zoneId = (ZoneId) args[3]; - String format = (String) args[4]; - return new DateHistogramGroupSource(field, interval, format, zoneId); + return new DateHistogramGroupSource(field, interval, zoneId); }); static { @@ -212,8 +210,6 @@ public int hashCode() { return ZoneOffset.ofHours(p.intValue()); } }, TIME_ZONE, ObjectParser.ValueType.LONG); - - PARSER.declareString(optionalConstructorArg(), FORMAT); } public static DateHistogramGroupSource fromXContent(final XContentParser parser) { @@ -221,13 +217,11 @@ public static DateHistogramGroupSource fromXContent(final XContentParser parser) } private final Interval interval; - private final String format; private final ZoneId timeZone; - DateHistogramGroupSource(String field, Interval interval, String format, ZoneId timeZone) { + DateHistogramGroupSource(String field, Interval interval, ZoneId timeZone) { super(field); this.interval = interval; - this.format = format; this.timeZone = timeZone; } @@ -240,10 +234,6 @@ public Interval getInterval() { return interval; } - public String getFormat() { - return format; - } - public ZoneId getTimeZone() { return timeZone; } @@ -258,9 +248,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (timeZone != null) { builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); } - if (format != null) { - builder.field(FORMAT.getPreferredName(), format); - } builder.endObject(); return builder; } @@ -279,13 +266,12 @@ public boolean equals(Object other) { return Objects.equals(this.field, that.field) && Objects.equals(this.interval, that.interval) && - Objects.equals(this.timeZone, that.timeZone) && - Objects.equals(this.format, that.format); + Objects.equals(this.timeZone, that.timeZone); } @Override public int hashCode() { - return Objects.hash(field, interval, timeZone, format); + return Objects.hash(field, interval, timeZone); } public static Builder builder() { @@ -296,7 +282,6 @@ public static class Builder { private String field; private Interval interval; - private String format; private ZoneId timeZone; /** @@ -319,16 +304,6 @@ public Builder setInterval(Interval interval) { return this; } - /** - * Set the optional String formatting for the time interval. - * @param format The format of the output for the time interval key - * @return The {@link Builder} with the format set. - */ - public Builder setFormat(String format) { - this.format = format; - return this; - } - /** * Sets the time zone to use for this aggregation * @param timeZone The zoneId for the timeZone @@ -340,7 +315,7 @@ public Builder setTimeZone(ZoneId timeZone) { } public DateHistogramGroupSource build() { - return new DateHistogramGroupSource(field, interval, format, timeZone); + return new DateHistogramGroupSource(field, interval, timeZone); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersRequest.java new file mode 100644 index 0000000000000..e815d91bbe8f5 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +/** + * Request for the _reload_search_analyzers API + */ +public final class ReloadAnalyzersRequest implements Validatable { + + private final String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + /** + * Creates a new reload analyzers request + * @param indices the index for which to reload analyzers + */ + public ReloadAnalyzersRequest(String... indices) { + this.indices = Objects.requireNonNull(indices); + } + + /** + * Returns the indices + */ + public String[] getIndices() { + return indices; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @return the current behaviour when it comes to index names and wildcard indices expressions + */ + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + /** + * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. + * For example indices that don't exist. + * + * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions + */ + public void setIndicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersResponse.java new file mode 100644 index 0000000000000..e2c39d0a7aeba --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/ReloadAnalyzersResponse.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.client.core.BroadcastResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The response object that will be returned when reloading analyzers + */ +public class ReloadAnalyzersResponse extends BroadcastResponse { + + private final Map reloadDetails; + + ReloadAnalyzersResponse(final Shards shards, Map reloadDetails) { + super(shards); + this.reloadDetails = reloadDetails; + } + + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("reload_analyzer", + true, arg -> { + Shards shards = (Shards) arg[0]; + List> results = (List>) arg[1]; + Map reloadDetails = new HashMap<>(); + for (Tuple result : results) { + reloadDetails.put(result.v1(), result.v2()); + } + return new ReloadAnalyzersResponse(shards, reloadDetails); + }); + + @SuppressWarnings({ "unchecked" }) + private static final ConstructingObjectParser, Void> ENTRY_PARSER = new ConstructingObjectParser<>( + "reload_analyzer.entry", true, arg -> { + String index = (String) arg[0]; + Set nodeIds = new HashSet<>((List) arg[1]); + Set analyzers = new HashSet<>((List) arg[2]); + return new Tuple<>(index, new ReloadDetails(index, nodeIds, analyzers)); + }); + + static { + declareShardsField(PARSER); + PARSER.declareObjectArray(constructorArg(), ENTRY_PARSER, new ParseField("reload_details")); + ENTRY_PARSER.declareString(constructorArg(), new ParseField("index")); + ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_node_ids")); + ENTRY_PARSER.declareStringArray(constructorArg(), new ParseField("reloaded_analyzers")); + } + + public static ReloadAnalyzersResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public Map getReloadedDetails() { + return reloadDetails; + } + + public static class ReloadDetails { + + private final String indexName; + private final Set reloadedIndicesNodes; + private final Set reloadedAnalyzers; + + public ReloadDetails(String name, Set reloadedIndicesNodes, Set reloadedAnalyzers) { + this.indexName = name; + this.reloadedIndicesNodes = reloadedIndicesNodes; + this.reloadedAnalyzers = reloadedAnalyzers; + } + + public String getIndexName() { + return indexName; + } + + public Set getReloadedIndicesNodes() { + return reloadedIndicesNodes; + } + + public Set getReloadedAnalyzers() { + return reloadedAnalyzers; + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java index 14950a74c9187..2624b68a98318 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDataFrameAnalyticsRequest.java @@ -22,6 +22,7 @@ import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -67,4 +68,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(config); } + + @Override + public String toString() { + return Strings.toString(this); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java index 9608d40fc7d16..4ba6af852f61c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/StopDataFrameAnalyticsRequest.java @@ -31,10 +31,12 @@ public class StopDataFrameAnalyticsRequest implements Validatable { public static final ParseField ALLOW_NO_MATCH = new ParseField("allow_no_match"); + public static final ParseField FORCE = new ParseField("force"); private final String id; - private TimeValue timeout; private Boolean allowNoMatch; + private Boolean force; + private TimeValue timeout; public StopDataFrameAnalyticsRequest(String id) { this.id = id; @@ -62,6 +64,15 @@ public StopDataFrameAnalyticsRequest setAllowNoMatch(boolean allowNoMatch) { return this; } + public Boolean getForce() { + return force; + } + + public StopDataFrameAnalyticsRequest setForce(boolean force) { + this.force = force; + return this; + } + @Override public Optional validate() { if (id == null) { @@ -78,11 +89,12 @@ public boolean equals(Object o) { StopDataFrameAnalyticsRequest other = (StopDataFrameAnalyticsRequest) o; return Objects.equals(id, other.id) && Objects.equals(timeout, other.timeout) - && Objects.equals(allowNoMatch, other.allowNoMatch); + && Objects.equals(allowNoMatch, other.allowNoMatch) + && Objects.equals(force, other.force); } @Override public int hashCode() { - return Objects.hash(id, timeout, allowNoMatch); + return Objects.hash(id, timeout, allowNoMatch, force); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java index b1309e66afcd4..62adb06294558 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java @@ -19,11 +19,14 @@ package org.elasticsearch.client.ml.dataframe; +import org.elasticsearch.Version; +import org.elasticsearch.client.dataframe.transforms.util.TimeUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,11 +34,9 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; +import java.time.Instant; import java.util.Objects; -import static org.elasticsearch.common.xcontent.ObjectParser.ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING; -import static org.elasticsearch.common.xcontent.ObjectParser.ValueType.VALUE; - public class DataFrameAnalyticsConfig implements ToXContentObject { public static DataFrameAnalyticsConfig fromXContent(XContentParser parser) { @@ -52,6 +53,8 @@ public static Builder builder(String id) { private static final ParseField ANALYSIS = new ParseField("analysis"); private static final ParseField ANALYZED_FIELDS = new ParseField("analyzed_fields"); private static final ParseField MODEL_MEMORY_LIMIT = new ParseField("model_memory_limit"); + private static final ParseField CREATE_TIME = new ParseField("create_time"); + private static final ParseField VERSION = new ParseField("version"); private static ObjectParser PARSER = new ObjectParser<>("data_frame_analytics_config", true, Builder::new); @@ -63,9 +66,24 @@ public static Builder builder(String id) { PARSER.declareField(Builder::setAnalyzedFields, (p, c) -> FetchSourceContext.fromXContent(p), ANALYZED_FIELDS, - OBJECT_ARRAY_BOOLEAN_OR_STRING); + ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING); PARSER.declareField(Builder::setModelMemoryLimit, - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()), MODEL_MEMORY_LIMIT, VALUE); + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()), + MODEL_MEMORY_LIMIT, + ValueType.VALUE); + PARSER.declareField(Builder::setCreateTime, + p -> TimeUtil.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()), + CREATE_TIME, + ValueType.VALUE); + PARSER.declareField(Builder::setVersion, + p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Version.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, + VERSION, + ValueType.STRING); } private static DataFrameAnalysis parseAnalysis(XContentParser parser) throws IOException { @@ -82,15 +100,20 @@ private static DataFrameAnalysis parseAnalysis(XContentParser parser) throws IOE private final DataFrameAnalysis analysis; private final FetchSourceContext analyzedFields; private final ByteSizeValue modelMemoryLimit; + private final Instant createTime; + private final Version version; private DataFrameAnalyticsConfig(String id, DataFrameAnalyticsSource source, DataFrameAnalyticsDest dest, DataFrameAnalysis analysis, - @Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit) { + @Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit, + @Nullable Instant createTime, @Nullable Version version) { this.id = Objects.requireNonNull(id); this.source = Objects.requireNonNull(source); this.dest = Objects.requireNonNull(dest); this.analysis = Objects.requireNonNull(analysis); this.analyzedFields = analyzedFields; this.modelMemoryLimit = modelMemoryLimit; + this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli());; + this.version = version; } public String getId() { @@ -117,6 +140,14 @@ public ByteSizeValue getModelMemoryLimit() { return modelMemoryLimit; } + public Instant getCreateTime() { + return createTime; + } + + public Version getVersion() { + return version; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -132,6 +163,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (modelMemoryLimit != null) { builder.field(MODEL_MEMORY_LIMIT.getPreferredName(), modelMemoryLimit.getStringRep()); } + if (createTime != null) { + builder.timeField(CREATE_TIME.getPreferredName(), CREATE_TIME.getPreferredName() + "_string", createTime.toEpochMilli()); + } + if (version != null) { + builder.field(VERSION.getPreferredName(), version); + } builder.endObject(); return builder; } @@ -147,12 +184,14 @@ public boolean equals(Object o) { && Objects.equals(dest, other.dest) && Objects.equals(analysis, other.analysis) && Objects.equals(analyzedFields, other.analyzedFields) - && Objects.equals(modelMemoryLimit, other.modelMemoryLimit); + && Objects.equals(modelMemoryLimit, other.modelMemoryLimit) + && Objects.equals(createTime, other.createTime) + && Objects.equals(version, other.version); } @Override public int hashCode() { - return Objects.hash(id, source, dest, analysis, analyzedFields, getModelMemoryLimit()); + return Objects.hash(id, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version); } @Override @@ -168,6 +207,8 @@ public static class Builder { private DataFrameAnalysis analysis; private FetchSourceContext analyzedFields; private ByteSizeValue modelMemoryLimit; + private Instant createTime; + private Version version; private Builder() {} @@ -201,8 +242,18 @@ public Builder setModelMemoryLimit(ByteSizeValue modelMemoryLimit) { return this; } + public Builder setCreateTime(Instant createTime) { + this.createTime = createTime; + return this; + } + + public Builder setVersion(Version version) { + this.version = version; + return this; + } + public DataFrameAnalyticsConfig build() { - return new DataFrameAnalyticsConfig(id, source, dest, analysis, analyzedFields, modelMemoryLimit); + return new DataFrameAnalyticsConfig(id, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java index c36799cd3b4a7..9a6de159bea3e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSource.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Arrays; +import java.util.List; import java.util.Objects; public class DataFrameAnalyticsSource implements ToXContentObject { @@ -46,19 +48,19 @@ public static Builder builder() { private static ObjectParser PARSER = new ObjectParser<>("data_frame_analytics_source", true, Builder::new); static { - PARSER.declareString(Builder::setIndex, INDEX); + PARSER.declareStringArray(Builder::setIndex, INDEX); PARSER.declareObject(Builder::setQueryConfig, (p, c) -> QueryConfig.fromXContent(p), QUERY); } - private final String index; + private final String[] index; private final QueryConfig queryConfig; - private DataFrameAnalyticsSource(String index, @Nullable QueryConfig queryConfig) { + private DataFrameAnalyticsSource(String[] index, @Nullable QueryConfig queryConfig) { this.index = Objects.requireNonNull(index); this.queryConfig = queryConfig; } - public String getIndex() { + public String[] getIndex() { return index; } @@ -83,13 +85,13 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; DataFrameAnalyticsSource other = (DataFrameAnalyticsSource) o; - return Objects.equals(index, other.index) + return Arrays.equals(index, other.index) && Objects.equals(queryConfig, other.queryConfig); } @Override public int hashCode() { - return Objects.hash(index, queryConfig); + return Objects.hash(Arrays.asList(index), queryConfig); } @Override @@ -99,16 +101,21 @@ public String toString() { public static class Builder { - private String index; + private String[] index; private QueryConfig queryConfig; private Builder() {} - public Builder setIndex(String index) { + public Builder setIndex(String... index) { this.index = index; return this; } + public Builder setIndex(List index) { + this.index = index.toArray(new String[0]); + return this; + } + public Builder setQueryConfig(QueryConfig queryConfig) { this.queryConfig = queryConfig; return this; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java index 5c652f33edb2e..4e04204e65021 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsStats.java @@ -41,6 +41,7 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws static final ParseField ID = new ParseField("id"); static final ParseField STATE = new ParseField("state"); + static final ParseField FAILURE_REASON = new ParseField("failure_reason"); static final ParseField PROGRESS_PERCENT = new ParseField("progress_percent"); static final ParseField NODE = new ParseField("node"); static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation"); @@ -50,9 +51,10 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws args -> new DataFrameAnalyticsStats( (String) args[0], (DataFrameAnalyticsState) args[1], - (Integer) args[2], - (NodeAttributes) args[3], - (String) args[4])); + (String) args[2], + (Integer) args[3], + (NodeAttributes) args[4], + (String) args[5])); static { PARSER.declareString(constructorArg(), ID); @@ -62,6 +64,7 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, STATE, ObjectParser.ValueType.STRING); + PARSER.declareString(optionalConstructorArg(), FAILURE_REASON); PARSER.declareInt(optionalConstructorArg(), PROGRESS_PERCENT); PARSER.declareObject(optionalConstructorArg(), NodeAttributes.PARSER, NODE); PARSER.declareString(optionalConstructorArg(), ASSIGNMENT_EXPLANATION); @@ -69,14 +72,17 @@ public static DataFrameAnalyticsStats fromXContent(XContentParser parser) throws private final String id; private final DataFrameAnalyticsState state; + private final String failureReason; private final Integer progressPercent; private final NodeAttributes node; private final String assignmentExplanation; - public DataFrameAnalyticsStats(String id, DataFrameAnalyticsState state, @Nullable Integer progressPercent, - @Nullable NodeAttributes node, @Nullable String assignmentExplanation) { + public DataFrameAnalyticsStats(String id, DataFrameAnalyticsState state, @Nullable String failureReason, + @Nullable Integer progressPercent, @Nullable NodeAttributes node, + @Nullable String assignmentExplanation) { this.id = id; this.state = state; + this.failureReason = failureReason; this.progressPercent = progressPercent; this.node = node; this.assignmentExplanation = assignmentExplanation; @@ -90,6 +96,10 @@ public DataFrameAnalyticsState getState() { return state; } + public String getFailureReason() { + return failureReason; + } + public Integer getProgressPercent() { return progressPercent; } @@ -110,6 +120,7 @@ public boolean equals(Object o) { DataFrameAnalyticsStats other = (DataFrameAnalyticsStats) o; return Objects.equals(id, other.id) && Objects.equals(state, other.state) + && Objects.equals(failureReason, other.failureReason) && Objects.equals(progressPercent, other.progressPercent) && Objects.equals(node, other.node) && Objects.equals(assignmentExplanation, other.assignmentExplanation); @@ -117,7 +128,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(id, state, progressPercent, node, assignmentExplanation); + return Objects.hash(id, state, failureReason, progressPercent, node, assignmentExplanation); } @Override @@ -125,6 +136,7 @@ public String toString() { return new ToStringBuilder(getClass()) .add("id", id) .add("state", state) + .add("failureReason", failureReason) .add("progressPercent", progressPercent) .add("node", node) .add("assignmentExplanation", assignmentExplanation) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java index 946c01ac5c835..fe5094fb7190a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java @@ -47,8 +47,7 @@ public static Builder builder() { public static final ParseField NAME = new ParseField("outlier_detection"); static final ParseField N_NEIGHBORS = new ParseField("n_neighbors"); static final ParseField METHOD = new ParseField("method"); - public static final ParseField MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE = - new ParseField("minimum_score_to_write_feature_influence"); + public static final ParseField FEATURE_INFLUENCE_THRESHOLD = new ParseField("feature_influence_threshold"); private static ObjectParser PARSER = new ObjectParser<>(NAME.getPreferredName(), true, Builder::new); @@ -60,23 +59,23 @@ public static Builder builder() { } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, METHOD, ObjectParser.ValueType.STRING); - PARSER.declareDouble(Builder::setMinScoreToWriteFeatureInfluence, MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE); + PARSER.declareDouble(Builder::setFeatureInfluenceThreshold, FEATURE_INFLUENCE_THRESHOLD); } private final Integer nNeighbors; private final Method method; - private final Double minScoreToWriteFeatureInfluence; + private final Double featureInfluenceThreshold; /** * Constructs the outlier detection configuration * @param nNeighbors The number of neighbors. Leave unspecified for dynamic detection. * @param method The method. Leave unspecified for a dynamic mixture of methods. - * @param minScoreToWriteFeatureInfluence The min outlier score required to calculate feature influence. Defaults to 0.1. + * @param featureInfluenceThreshold The min outlier score required to calculate feature influence. Defaults to 0.1. */ - private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double minScoreToWriteFeatureInfluence) { + private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double featureInfluenceThreshold) { this.nNeighbors = nNeighbors; this.method = method; - this.minScoreToWriteFeatureInfluence = minScoreToWriteFeatureInfluence; + this.featureInfluenceThreshold = featureInfluenceThreshold; } @Override @@ -92,8 +91,8 @@ public Method getMethod() { return method; } - public Double getMinScoreToWriteFeatureInfluence() { - return minScoreToWriteFeatureInfluence; + public Double getFeatureInfluenceThreshold() { + return featureInfluenceThreshold; } @Override @@ -105,8 +104,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (method != null) { builder.field(METHOD.getPreferredName(), method); } - if (minScoreToWriteFeatureInfluence != null) { - builder.field(MINIMUM_SCORE_TO_WRITE_FEATURE_INFLUENCE.getPreferredName(), minScoreToWriteFeatureInfluence); + if (featureInfluenceThreshold != null) { + builder.field(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold); } builder.endObject(); return builder; @@ -120,12 +119,12 @@ public boolean equals(Object o) { OutlierDetection other = (OutlierDetection) o; return Objects.equals(nNeighbors, other.nNeighbors) && Objects.equals(method, other.method) - && Objects.equals(minScoreToWriteFeatureInfluence, other.minScoreToWriteFeatureInfluence); + && Objects.equals(featureInfluenceThreshold, other.featureInfluenceThreshold); } @Override public int hashCode() { - return Objects.hash(nNeighbors, method, minScoreToWriteFeatureInfluence); + return Objects.hash(nNeighbors, method, featureInfluenceThreshold); } @Override @@ -150,7 +149,7 @@ public static class Builder { private Integer nNeighbors; private Method method; - private Double minScoreToWriteFeatureInfluence; + private Double featureInfluenceThreshold; private Builder() {} @@ -164,13 +163,13 @@ public Builder setMethod(Method method) { return this; } - public Builder setMinScoreToWriteFeatureInfluence(Double minScoreToWriteFeatureInfluence) { - this.minScoreToWriteFeatureInfluence = minScoreToWriteFeatureInfluence; + public Builder setFeatureInfluenceThreshold(Double featureInfluenceThreshold) { + this.featureInfluenceThreshold = featureInfluenceThreshold; return this; } public OutlierDetection build() { - return new OutlierDetection(nNeighbors, method, minScoreToWriteFeatureInfluence); + return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java index 6c1b394355e1e..4f1302533d9b5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/CreateTokenRequest.java @@ -40,6 +40,7 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { private final String username; private final char[] password; private final String refreshToken; + private final char[] kerberosTicket; /** * General purpose constructor. This constructor is typically not useful, and one of the following factory methods should be used @@ -48,10 +49,11 @@ public final class CreateTokenRequest implements Validatable, ToXContentObject { *

  • {@link #passwordGrant(String, char[])}
  • *
  • {@link #refreshTokenGrant(String)}
  • *
  • {@link #clientCredentialsGrant()}
  • + *
  • {@link #kerberosGrant(char[])}
  • * */ public CreateTokenRequest(String grantType, @Nullable String scope, @Nullable String username, @Nullable char[] password, - @Nullable String refreshToken) { + @Nullable String refreshToken, @Nullable char[] kerberosTicket) { if (Strings.isNullOrEmpty(grantType)) { throw new IllegalArgumentException("grant_type is required"); } @@ -60,6 +62,7 @@ public CreateTokenRequest(String grantType, @Nullable String scope, @Nullable St this.password = password; this.scope = scope; this.refreshToken = refreshToken; + this.kerberosTicket = kerberosTicket; } public static CreateTokenRequest passwordGrant(String username, char[] password) { @@ -69,18 +72,25 @@ public static CreateTokenRequest passwordGrant(String username, char[] password) if (password == null || password.length == 0) { throw new IllegalArgumentException("password is required"); } - return new CreateTokenRequest("password", null, username, password, null); + return new CreateTokenRequest("password", null, username, password, null, null); } public static CreateTokenRequest refreshTokenGrant(String refreshToken) { if (Strings.isNullOrEmpty(refreshToken)) { throw new IllegalArgumentException("refresh_token is required"); } - return new CreateTokenRequest("refresh_token", null, null, null, refreshToken); + return new CreateTokenRequest("refresh_token", null, null, null, refreshToken, null); } public static CreateTokenRequest clientCredentialsGrant() { - return new CreateTokenRequest("client_credentials", null, null, null, null); + return new CreateTokenRequest("client_credentials", null, null, null, null, null); + } + + public static CreateTokenRequest kerberosGrant(char[] kerberosTicket) { + if (kerberosTicket == null || kerberosTicket.length == 0) { + throw new IllegalArgumentException("kerberos ticket is required"); + } + return new CreateTokenRequest("_kerberos", null, null, null, null, kerberosTicket); } public String getGrantType() { @@ -103,6 +113,10 @@ public String getRefreshToken() { return refreshToken; } + public char[] getKerberosTicket() { + return kerberosTicket; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject() @@ -124,6 +138,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (refreshToken != null) { builder.field("refresh_token", refreshToken); } + if (kerberosTicket != null) { + byte[] kerberosTicketBytes = CharArrays.toUtf8Bytes(kerberosTicket); + try { + builder.field("kerberos_ticket").utf8Value(kerberosTicketBytes, 0, kerberosTicketBytes.length); + } finally { + Arrays.fill(kerberosTicketBytes, (byte) 0); + } + } return builder.endObject(); } @@ -140,13 +162,15 @@ public boolean equals(Object o) { Objects.equals(scope, that.scope) && Objects.equals(username, that.username) && Arrays.equals(password, that.password) && - Objects.equals(refreshToken, that.refreshToken); + Objects.equals(refreshToken, that.refreshToken) && + Arrays.equals(kerberosTicket, that.kerberosTicket); } @Override public int hashCode() { int result = Objects.hash(grantType, scope, username, refreshToken); result = 31 * result + Arrays.hashCode(password); + result = 31 * result + Arrays.hashCode(kerberosTicket); return result; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesRequest.java new file mode 100644 index 0000000000000..543665580756c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesRequest.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Validatable; + +/** + * Request object to retrieve the privilege that are builtin to the Elasticsearch cluster. + */ +public final class GetBuiltinPrivilegesRequest implements Validatable { + + public static final GetBuiltinPrivilegesRequest INSTANCE = new GetBuiltinPrivilegesRequest(); + + private GetBuiltinPrivilegesRequest() { + } + + public Request getRequest() { + return new Request(HttpGet.METHOD_NAME, "/_security/privilege/_builtin"); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesResponse.java new file mode 100644 index 0000000000000..a900ee4f976f4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetBuiltinPrivilegesResponse.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Get builtin privileges response + */ +public final class GetBuiltinPrivilegesResponse { + + private final Set clusterPrivileges; + private final Set indexPrivileges; + + public GetBuiltinPrivilegesResponse(Collection cluster, Collection index) { + this.clusterPrivileges = Set.copyOf(cluster); + this.indexPrivileges = Set.copyOf(index); + } + + public Set getClusterPrivileges() { + return clusterPrivileges; + } + + public Set getIndexPrivileges() { + return indexPrivileges; + } + + public static GetBuiltinPrivilegesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetBuiltinPrivilegesResponse that = (GetBuiltinPrivilegesResponse) o; + return Objects.equals(this.clusterPrivileges, that.clusterPrivileges) + && Objects.equals(this.indexPrivileges, that.indexPrivileges); + } + + @Override + public int hashCode() { + return Objects.hash(clusterPrivileges, indexPrivileges); + } + + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_builtin_privileges", true, + args -> new GetBuiltinPrivilegesResponse((Collection) args[0], (Collection) args[1])); + + static { + PARSER.declareStringArray(constructorArg(), new ParseField("cluster")); + PARSER.declareStringArray(constructorArg(), new ParseField("index")); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java index a3263e7f6e920..e8e1a104d8c2c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java @@ -299,10 +299,12 @@ public static class ClusterPrivilegeName { public static final String NONE = "none"; public static final String ALL = "all"; public static final String MONITOR = "monitor"; + public static final String MONITOR_DATA_FRAME_TRANSFORMS = "monitor_data_frame_transforms"; public static final String MONITOR_ML = "monitor_ml"; public static final String MONITOR_WATCHER = "monitor_watcher"; public static final String MONITOR_ROLLUP = "monitor_rollup"; public static final String MANAGE = "manage"; + public static final String MANAGE_DATA_FRAME_TRANSFORMS = "manage_data_frame_transforms"; public static final String MANAGE_ML = "manage_ml"; public static final String MANAGE_WATCHER = "manage_watcher"; public static final String MANAGE_ROLLUP = "manage_rollup"; @@ -318,8 +320,9 @@ public static class ClusterPrivilegeName { public static final String READ_CCR = "read_ccr"; public static final String MANAGE_ILM = "manage_ilm"; public static final String READ_ILM = "read_ilm"; - public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_ML, MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, - MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, + public static final String[] ALL_ARRAY = new String[] { NONE, ALL, MONITOR, MONITOR_DATA_FRAME_TRANSFORMS, MONITOR_ML, + MONITOR_WATCHER, MONITOR_ROLLUP, MANAGE, MANAGE_DATA_FRAME_TRANSFORMS, + MANAGE_ML, MANAGE_WATCHER, MANAGE_ROLLUP, MANAGE_INDEX_TEMPLATES, MANAGE_INGEST_PIPELINES, TRANSPORT_CLIENT, MANAGE_SECURITY, MANAGE_SAML, MANAGE_OIDC, MANAGE_TOKEN, MANAGE_PIPELINE, MANAGE_CCR, READ_CCR, MANAGE_ILM, READ_ILM}; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java index 8565ca14a908a..2a91a639a5ac3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractResponseTestCase.java @@ -45,7 +45,7 @@ public final void testFromXContent() throws IOException { final S serverTestInstance = createServerTestInstance(); final XContentType xContentType = randomFrom(XContentType.values()); - final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, getParams(), randomBoolean()); final XContent xContent = XContentFactory.xContent(xContentType); final XContentParser parser = xContent.createParser( @@ -62,4 +62,8 @@ public final void testFromXContent() throws IOException { protected abstract void assertInstances(S serverTestInstance, C clientInstance); + protected ToXContent.Params getParams() { + return ToXContent.EMPTY_PARAMS; + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java index 26a4ade504682..db111904f4704 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java @@ -46,6 +46,7 @@ import java.util.Collections; import java.util.List; +import static org.elasticsearch.client.dataframe.GetDataFrameTransformRequest.ALLOW_NO_MATCH; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; @@ -115,7 +116,6 @@ public void testStopDataFrameTransform() { } StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, waitForCompletion, timeValue); - Request request = DataFrameRequestConverters.stopDataFrameTransform(stopRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + stopRequest.getId() + "/_stop")); @@ -133,6 +133,11 @@ public void testStopDataFrameTransform() { } else { assertFalse(request.getParameters().containsKey("timeout")); } + + assertFalse(request.getParameters().containsKey(ALLOW_NO_MATCH)); + stopRequest.setAllowNoMatch(randomBoolean()); + request = DataFrameRequestConverters.stopDataFrameTransform(stopRequest); + assertEquals(stopRequest.getAllowNoMatch(), Boolean.parseBoolean(request.getParameters().get(ALLOW_NO_MATCH))); } public void testPreviewDataFrameTransform() throws IOException { @@ -158,6 +163,7 @@ public void testGetDataFrameTransformStats() { assertFalse(request.getParameters().containsKey("from")); assertFalse(request.getParameters().containsKey("size")); + assertFalse(request.getParameters().containsKey(ALLOW_NO_MATCH)); getStatsRequest.setPageParams(new PageParams(0, null)); request = DataFrameRequestConverters.getDataFrameTransformStats(getStatsRequest); @@ -172,6 +178,10 @@ public void testGetDataFrameTransformStats() { getStatsRequest.setPageParams(new PageParams(0, 10)); request = DataFrameRequestConverters.getDataFrameTransformStats(getStatsRequest); assertThat(request.getParameters(), allOf(hasEntry("from", "0"), hasEntry("size", "10"))); + + getStatsRequest.setAllowNoMatch(false); + request = DataFrameRequestConverters.getDataFrameTransformStats(getStatsRequest); + assertThat(request.getParameters(), hasEntry("allow_no_match", "false")); } public void testGetDataFrameTransform() { @@ -183,6 +193,7 @@ public void testGetDataFrameTransform() { assertFalse(request.getParameters().containsKey("from")); assertFalse(request.getParameters().containsKey("size")); + assertFalse(request.getParameters().containsKey(ALLOW_NO_MATCH)); getRequest.setPageParams(new PageParams(0, null)); request = DataFrameRequestConverters.getDataFrameTransform(getRequest); @@ -197,6 +208,10 @@ public void testGetDataFrameTransform() { getRequest.setPageParams(new PageParams(0, 10)); request = DataFrameRequestConverters.getDataFrameTransform(getRequest); assertThat(request.getParameters(), allOf(hasEntry("from", "0"), hasEntry("size", "10"))); + + getRequest.setAllowNoMatch(false); + request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + assertThat(request.getParameters(), hasEntry("allow_no_match", "false")); } public void testGetDataFrameTransform_givenMulitpleIds() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 45c3038b662a8..d7371468b1506 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -71,6 +71,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; @@ -258,8 +259,10 @@ public void testStartStop() throws IOException { GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id), client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); - IndexerState indexerState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState(); - assertThat(indexerState, is(oneOf(IndexerState.STARTED, IndexerState.INDEXING))); + DataFrameTransformTaskState taskState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getTaskState(); + + // Since we are non-continuous, the transform could auto-stop between being started earlier and us gathering the statistics + assertThat(taskState, is(oneOf(DataFrameTransformTaskState.STARTED, DataFrameTransformTaskState.STOPPED))); StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = @@ -267,8 +270,15 @@ public void testStartStop() throws IOException { assertTrue(stopResponse.isAcknowledged()); assertThat(stopResponse.getNodeFailures(), empty()); assertThat(stopResponse.getTaskFailures(), empty()); + + // Calling stop with wait_for_completion assures that we will be in the `STOPPED` state for the transform task + statsResponse = execute(new GetDataFrameTransformStatsRequest(id), + client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); + taskState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getTaskState(); + assertThat(taskState, is(DataFrameTransformTaskState.STOPPED)); } + @SuppressWarnings("unchecked") public void testPreview() throws IOException { String sourceIndex = "transform-source"; createIndex(sourceIndex); @@ -290,6 +300,12 @@ public void testPreview() throws IOException { Optional> michel = docs.stream().filter(doc -> "michel".equals(doc.get("reviewer"))).findFirst(); assertTrue(michel.isPresent()); assertEquals(3.6d, (double) michel.get().get("avg_rating"), 0.1d); + + Map mappings = preview.getMappings(); + assertThat(mappings, hasKey("properties")); + Map fields = (Map)mappings.get("properties"); + assertThat(fields.get("reviewer"), equalTo(Map.of("type", "keyword"))); + assertThat(fields.get("avg_rating"), equalTo(Map.of("type", "double"))); } private DataFrameTransformConfig validDataFrameTransformConfig(String id, String source, String destination) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 458e6371010b0..59d76142566e6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -73,6 +73,8 @@ import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.client.indices.rollover.RolloverResponse; @@ -1877,4 +1879,14 @@ public void testFreezeAndUnfreeze() throws IOException { assertTrue(unfreeze.isShardsAcknowledged()); assertTrue(unfreeze.isAcknowledged()); } + + public void testReloadAnalyzer() throws IOException { + createIndex("test", Settings.EMPTY); + RestHighLevelClient client = highLevelClient(); + + ReloadAnalyzersResponse reloadResponse = execute(new ReloadAnalyzersRequest("test"), client.indices()::reloadAnalyzers, + client.indices()::reloadAnalyzersAsync); + assertNotNull(reloadResponse.shards()); + assertTrue(reloadResponse.getReloadedDetails().containsKey("test")); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index 8f52dd7b00b6a..d0f4177635797 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -54,6 +54,7 @@ import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.RandomCreateIndexGenerator; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; @@ -1215,4 +1216,21 @@ public void testDeleteTemplateRequest() { Assert.assertThat(request.getParameters(), equalTo(expectedParams)); Assert.assertThat(request.getEntity(), nullValue()); } + + public void testReloadAnalyzers() { + String[] indices = RequestConvertersTests.randomIndicesNames(1, 5); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + ReloadAnalyzersRequest reloadRequest = new ReloadAnalyzersRequest(indices); + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(reloadRequest::setIndicesOptions, reloadRequest::indicesOptions, + expectedParams); + Request request = IndicesRequestConverters.reloadAnalyzers(reloadRequest); + Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + Assert.assertThat(request.getEndpoint(), equalTo(endpoint + "/_reload_search_analyzers")); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + Assert.assertThat(request.getEntity(), nullValue()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 9bb2bb42fd9d7..aff25b4aa2d3b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -758,11 +758,15 @@ public void testStopDataFrameAnalytics() { public void testStopDataFrameAnalytics_WithParams() { StopDataFrameAnalyticsRequest stopRequest = new StopDataFrameAnalyticsRequest(randomAlphaOfLength(10)) .setTimeout(TimeValue.timeValueMinutes(1)) - .setAllowNoMatch(false); + .setAllowNoMatch(false) + .setForce(true); Request request = MLRequestConverters.stopDataFrameAnalytics(stopRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_ml/data_frame/analytics/" + stopRequest.getId() + "/_stop", request.getEndpoint()); - assertThat(request.getParameters(), allOf(hasEntry("timeout", "1m"), hasEntry("allow_no_match", "false"))); + assertThat(request.getParameters(), allOf( + hasEntry("timeout", "1m"), + hasEntry("allow_no_match", "false"), + hasEntry("force", "true"))); assertNull(request.getEntity()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 77efe43b2e174..e44883823c271 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -1359,6 +1359,7 @@ public void testGetDataFrameAnalyticsStats() throws Exception { DataFrameAnalyticsStats stats = statsResponse.getAnalyticsStats().get(0); assertThat(stats.getId(), equalTo(configId)); assertThat(stats.getState(), equalTo(DataFrameAnalyticsState.STOPPED)); + assertNull(stats.getFailureReason()); assertNull(stats.getProgressPercent()); assertNull(stats.getNode()); assertNull(stats.getAssignmentExplanation()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 82ce5f6b9b946..a1946baa3e0e1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -427,11 +427,7 @@ public void testReindex() throws IOException { reindexRequest.setDestRouting("=cat"); } if (randomBoolean()) { - if (randomBoolean()) { - reindexRequest.setMaxDocs(randomIntBetween(100, 1000)); - } else { - reindexRequest.setSize(randomIntBetween(100, 1000)); - } + reindexRequest.setMaxDocs(randomIntBetween(100, 1000)); } if (randomBoolean()) { reindexRequest.setAbortOnVersionConflict(false); @@ -479,13 +475,9 @@ public void testUpdateByQuery() throws IOException { expectedParams.put("routing", "=cat"); } if (randomBoolean()) { - int size = randomIntBetween(100, 1000); - if (randomBoolean()) { - updateByQueryRequest.setMaxDocs(size); - } else { - updateByQueryRequest.setSize(size); - } - expectedParams.put("max_docs", Integer.toString(size)); + int maxDocs = randomIntBetween(100, 1000); + updateByQueryRequest.setMaxDocs(maxDocs); + expectedParams.put("max_docs", Integer.toString(maxDocs)); } if (randomBoolean()) { updateByQueryRequest.setAbortOnVersionConflict(false); @@ -528,13 +520,9 @@ public void testDeleteByQuery() throws IOException { expectedParams.put("routing", "=cat"); } if (randomBoolean()) { - int size = randomIntBetween(100, 1000); - if (randomBoolean()) { - deleteByQueryRequest.setMaxDocs(size); - } else { - deleteByQueryRequest.setSize(size); - } - expectedParams.put("max_docs", Integer.toString(size)); + int maxDocs = randomIntBetween(100, 1000); + deleteByQueryRequest.setMaxDocs(maxDocs); + expectedParams.put("max_docs", Integer.toString(maxDocs)); } if (randomBoolean()) { deleteByQueryRequest.setAbortOnVersionConflict(false); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 26e5842019675..ae1cd5eb45edf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -141,7 +141,7 @@ public class RestHighLevelClientTests extends ESTestCase { // core "ping", "info", // security - "security.get_ssl_certificates", "security.authenticate", "security.get_user_privileges", + "security.get_ssl_certificates", "security.authenticate", "security.get_user_privileges", "security.get_builtin_privileges", // license "license.get_trial_status", "license.get_basic_status" @@ -730,8 +730,8 @@ public void testApiNamingConventions() throws Exception { "indices.exists_type", "indices.get_upgrade", "indices.put_alias", - "scripts_painless_execute", - "render_search_template" + "render_search_template", + "scripts_painless_execute" }; //These API are not required for high-level client feature completeness String[] notRequiredApi = new String[] { @@ -824,6 +824,7 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("ccr.") == false && apiName.startsWith("data_frame") == false && apiName.endsWith("freeze") == false && + apiName.endsWith("reload_analyzers") == false && // IndicesClientIT.getIndexTemplate should be renamed "getTemplate" in version 8.0 when we // can get rid of 7.0's deprecated "getTemplate" apiName.equals("indices.get_index_template") == false) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index 99350fc29db8a..4c99cb323969e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -315,7 +315,7 @@ public void testGetApplicationPrivilege() throws Exception { assertNull(request.getEntity()); } - public void testGetAllApplicationPrivileges() throws Exception { + public void testGetAllPrivilegesForApplication() throws Exception { final String application = randomAlphaOfLength(6); GetPrivilegesRequest getPrivilegesRequest = GetPrivilegesRequest.getApplicationPrivileges(application); Request request = SecurityRequestConverters.getPrivileges(getPrivilegesRequest); @@ -339,7 +339,7 @@ public void testGetMultipleApplicationPrivileges() throws Exception { assertNull(request.getEntity()); } - public void testGetAllPrivileges() throws Exception { + public void testGetAllApplicationPrivileges() throws Exception { GetPrivilegesRequest getPrivilegesRequest = GetPrivilegesRequest.getAllPrivileges(); Request request = SecurityRequestConverters.getPrivileges(getPrivilegesRequest); assertEquals(HttpGet.METHOD_NAME, request.getMethod()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java index 31f1a26d6f1d0..c0e4cfb7eeb31 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java @@ -53,8 +53,13 @@ private PreviewDataFrameTransformResponse createTestInstance() { } docs.add(doc); } + int numMappingEntries = randomIntBetween(5, 10); + Map mappings = new HashMap<>(numMappingEntries); + for (int i = 0; i < numMappingEntries; i++) { + mappings.put(randomAlphaOfLength(10), Map.of("type", randomAlphaOfLength(10))); + } - return new PreviewDataFrameTransformResponse(docs); + return new PreviewDataFrameTransformResponse(docs, mappings); } private void toXContent(PreviewDataFrameTransformResponse response, XContentBuilder builder) throws IOException { @@ -64,6 +69,7 @@ private void toXContent(PreviewDataFrameTransformResponse response, XContentBuil builder.map(doc); } builder.endArray(); + builder.field("mappings", response.getMappings()); builder.endObject(); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java index 4ada50c20d219..ebb62890c3cdd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client.dataframe.transforms; import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; @@ -37,7 +38,8 @@ public void testFromXContent() throws IOException { DataFrameTransformStateTests::toXContent, DataFrameTransformState::fromXContent) .supportsUnknownFields(true) - .randomFieldsExcludeFilter(field -> field.equals("current_position")) + .randomFieldsExcludeFilter(field -> field.equals("current_position") || + field.equals("node.attributes")) .test(); } @@ -47,7 +49,8 @@ public static DataFrameTransformState randomDataFrameTransformState() { randomPositionMap(), randomLongBetween(0,10), randomBoolean() ? null : randomAlphaOfLength(10), - randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance()); + randomBoolean() ? null : DataFrameTransformProgressTests.randomInstance(), + randomBoolean() ? null : NodeAttributesTests.createRandom()); } public static void toXContent(DataFrameTransformState state, XContentBuilder builder) throws IOException { @@ -65,6 +68,10 @@ public static void toXContent(DataFrameTransformState state, XContentBuilder bui builder.field("progress"); DataFrameTransformProgressTests.toXContent(state.getProgress(), builder); } + if (state.getNode() != null) { + builder.field("node"); + state.getNode().toXContent(builder, ToXContent.EMPTY_PARAMS); + } builder.endObject(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java new file mode 100644 index 0000000000000..661aa9f7a30a4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class NodeAttributesTests extends AbstractXContentTestCase { + + public static NodeAttributes createRandom() { + int numberOfAttributes = randomIntBetween(1, 10); + Map attributes = new HashMap<>(numberOfAttributes); + for(int i = 0; i < numberOfAttributes; i++) { + String val = randomAlphaOfLength(10); + attributes.put("key-"+i, val); + } + return new NodeAttributes(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + attributes); + } + + @Override + protected NodeAttributes createTestInstance() { + return createRandom(); + } + + @Override + protected NodeAttributes doParseInstance(XContentParser parser) throws IOException { + return NodeAttributes.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java index ad08881fb5641..dde44898bf90b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateAndStatsTests.java @@ -64,7 +64,7 @@ protected boolean supportsUnknownFields() { @Override protected Predicate getRandomFieldsExcludeFilter() { - return field -> field.equals("state.current_position"); + return field -> field.equals("state.current_position") || field.equals("state.node") || field.equals("state.node.attributes"); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java index 4c80365bc539a..b97e0a72c1fa2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStateTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; +import org.elasticsearch.xpack.core.dataframe.transforms.NodeAttributes; import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; @@ -40,8 +41,20 @@ public class DataFrameTransformStateTests extends AbstractHlrcXContentTestCase getRandomFieldsExcludeFilter() { - return field -> field.equals("current_position"); + return field -> field.equals("current_position") || field.equals("node.attributes"); } public static DataFrameTransformStateAndStats randomDataFrameTransformStateAndStats(String id) { @@ -97,6 +110,20 @@ public static DataFrameTransformProgress randomDataFrameTransformProgress() { return new DataFrameTransformProgress(totalDocs, remainingDocs); } + public static NodeAttributes randomNodeAttributes() { + int numberOfAttributes = randomIntBetween(1, 10); + Map attributes = new HashMap<>(numberOfAttributes); + for(int i = 0; i < numberOfAttributes; i++) { + String val = randomAlphaOfLength(10); + attributes.put("key-"+i, val); + } + return new NodeAttributes(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + attributes); + } + public static DataFrameIndexerTransformStats randomStats(String transformId) { return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), @@ -110,7 +137,8 @@ public static DataFrameTransformState randomDataFrameTransformState() { randomPosition(), randomLongBetween(0,10), randomBoolean() ? null : randomAlphaOfLength(10), - randomBoolean() ? null : randomDataFrameTransformProgress()); + randomBoolean() ? null : randomDataFrameTransformProgress(), + randomBoolean() ? null : randomNodeAttributes()); } private static Map randomPosition() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java index 32605f5c286ad..ece1c4fb743ee 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java @@ -39,7 +39,6 @@ public static DateHistogramGroupSource randomDateHistogramGroupSource() { String field = randomAlphaOfLengthBetween(1, 20); return new DateHistogramGroupSource(field, randomDateHistogramInterval(), - randomBoolean() ? randomAlphaOfLength(10) : null, randomBoolean() ? randomZone() : null); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java index dc31004607dcd..fd98e52a1527e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java @@ -44,9 +44,6 @@ public static DateHistogramGroupSource randomDateHistogramGroupSource() { if (randomBoolean()) { dateHistogramGroupSource.setTimeZone(randomZone()); } - if (randomBoolean()) { - dateHistogramGroupSource.setFormat(randomAlphaOfLength(10)); - } return dateHistogramGroupSource; } @@ -64,7 +61,6 @@ protected org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroup protected void assertInstances(DateHistogramGroupSource serverTestInstance, org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource clientInstance) { assertThat(serverTestInstance.getField(), equalTo(clientInstance.getField())); - assertThat(serverTestInstance.getFormat(), equalTo(clientInstance.getFormat())); assertSameInterval(serverTestInstance.getInterval(), clientInstance.getInterval()); assertThat(serverTestInstance.getTimeZone(), equalTo(clientInstance.getTimeZone())); assertThat(serverTestInstance.getType().name(), equalTo(clientInstance.getType().name())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index b3fa85880b465..731d42f902c50 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -45,6 +45,7 @@ import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.client.dataframe.transforms.DestConfig; +import org.elasticsearch.client.dataframe.transforms.NodeAttributes; import org.elasticsearch.client.dataframe.transforms.QueryConfig; import org.elasticsearch.client.dataframe.transforms.SourceConfig; import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; @@ -263,6 +264,7 @@ public void testStartStop() throws IOException, InterruptedException { // tag::stop-data-frame-transform-request-options request.setWaitForCompletion(Boolean.TRUE); // <1> request.setTimeout(TimeValue.timeValueSeconds(30)); // <2> + request.setAllowNoMatch(true); // <3> // end::stop-data-frame-transform-request-options // tag::stop-data-frame-transform-execute @@ -446,6 +448,7 @@ public void testPreview() throws IOException, InterruptedException { // end::preview-data-frame-transform-execute assertNotNull(response.getDocs()); + assertNotNull(response.getMappings()); } { // tag::preview-data-frame-transform-execute-listener @@ -506,6 +509,11 @@ public void testGetStats() throws IOException, InterruptedException { new GetDataFrameTransformStatsRequest(id); // <1> // end::get-data-frame-transform-stats-request + // tag::get-data-frame-transform-stats-request-options + request.setPageParams(new PageParams(0, 100)); // <1> + request.setAllowNoMatch(true); // <2> + // end::get-data-frame-transform-stats-request-options + { // tag::get-data-frame-transform-stats-execute GetDataFrameTransformStatsResponse response = @@ -526,6 +534,8 @@ public void testGetStats() throws IOException, InterruptedException { stateAndStats.getTransformStats(); // <4> DataFrameTransformProgress progress = stateAndStats.getTransformState().getProgress(); // <5> + NodeAttributes node = + stateAndStats.getTransformState().getNode(); // <6> // end::get-data-frame-transform-stats-response assertEquals(IndexerState.STOPPED, indexerState); @@ -597,6 +607,7 @@ public void testGetDataFrameTransform() throws IOException, InterruptedException // tag::get-data-frame-transform-request-options request.setPageParams(new PageParams(0, 100)); // <1> + request.setAllowNoMatch(true); // <2> // end::get-data-frame-transform-request-options // tag::get-data-frame-transform-execute diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 8e0a3d2fd005b..f878f0f6f7d88 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -58,6 +58,7 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.SyncedFlushResponse; +import org.elasticsearch.client.core.BroadcastResponse.Shards; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; import org.elasticsearch.client.indices.AnalyzeRequest; import org.elasticsearch.client.indices.AnalyzeResponse; @@ -77,6 +78,9 @@ import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersRequest; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse; +import org.elasticsearch.client.indices.ReloadAnalyzersResponse.ReloadDetails; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.client.indices.rollover.RolloverResponse; @@ -2748,4 +2752,77 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testReloadSearchAnalyzers() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::reload-analyzers-request + ReloadAnalyzersRequest request = new ReloadAnalyzersRequest("index"); // <1> + // end::reload-analyzers-request + + // tag::reload-analyzers-request-indicesOptions + request.setIndicesOptions(IndicesOptions.strictExpandOpen()); // <1> + // end::reload-analyzers-request-indicesOptions + + // tag::reload-analyzers-execute + ReloadAnalyzersResponse reloadResponse = client.indices().reloadAnalyzers(request, RequestOptions.DEFAULT); + // end::reload-analyzers-execute + + // tag::reload-analyzers-response + Shards shards = reloadResponse.shards(); // <1> + Map reloadDetails = reloadResponse.getReloadedDetails(); // <2> + ReloadDetails details = reloadDetails.get("index"); // <3> + String indexName = details.getIndexName(); // <4> + Set indicesNodes = details.getReloadedIndicesNodes(); // <5> + Set analyzers = details.getReloadedAnalyzers(); // <6> + // end::reload-analyzers-response + assertNotNull(shards); + assertEquals("index", indexName); + assertEquals(1, indicesNodes.size()); + assertEquals(0, analyzers.size()); + + // tag::reload-analyzers-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ReloadAnalyzersResponse reloadResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::reload-analyzers-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::reload-analyzers-execute-async + client.indices().reloadAnalyzersAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::reload-analyzers-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + { + // tag::reload-analyzers-notfound + try { + ReloadAnalyzersRequest request = new ReloadAnalyzersRequest("does_not_exist"); + client.indices().reloadAnalyzers(request, RequestOptions.DEFAULT); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.BAD_REQUEST) { + // <1> + } + } + // end::reload-analyzers-notfound + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 8fd63a065d944..93f196212ca0c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -2802,7 +2802,7 @@ public void onFailure(Exception e) { } public void testGetDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -2851,7 +2851,7 @@ public void onFailure(Exception e) { } public void testGetDataFrameAnalyticsStats() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -2901,7 +2901,7 @@ public void onFailure(Exception e) { } public void testPutDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); { @@ -2994,7 +2994,7 @@ public void onFailure(Exception e) { } public void testDeleteDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -3044,9 +3044,9 @@ public void onFailure(Exception e) { } public void testStartDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); highLevelClient().index( - new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()).source(XContentType.JSON, "total", 10000) + new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]).source(XContentType.JSON, "total", 10000) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); @@ -3101,15 +3101,16 @@ public void onFailure(Exception e) { } public void testStopDataFrameAnalytics() throws Exception { - createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()); + createIndex(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]); highLevelClient().index( - new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()).source(XContentType.JSON, "total", 10000) + new IndexRequest(DF_ANALYTICS_CONFIG.getSource().getIndex()[0]).source(XContentType.JSON, "total", 10000) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); RestHighLevelClient client = highLevelClient(); client.machineLearning().putDataFrameAnalytics(new PutDataFrameAnalyticsRequest(DF_ANALYTICS_CONFIG), RequestOptions.DEFAULT); { // tag::stop-data-frame-analytics-request StopDataFrameAnalyticsRequest request = new StopDataFrameAnalyticsRequest("my-analytics-config"); // <1> + request.setForce(false); // <2> // end::stop-data-frame-analytics-request // tag::stop-data-frame-analytics-execute diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index f9a1c5c6571eb..9bbc3b2ea9072 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -50,6 +50,7 @@ import org.elasticsearch.client.security.ExpressionRoleMapping; import org.elasticsearch.client.security.GetApiKeyRequest; import org.elasticsearch.client.security.GetApiKeyResponse; +import org.elasticsearch.client.security.GetBuiltinPrivilegesResponse; import org.elasticsearch.client.security.GetPrivilegesRequest; import org.elasticsearch.client.security.GetPrivilegesResponse; import org.elasticsearch.client.security.GetRoleMappingsRequest; @@ -97,7 +98,6 @@ import javax.crypto.SecretKeyFactory; import javax.crypto.spec.PBEKeySpec; - import java.io.IOException; import java.time.Instant; import java.util.ArrayList; @@ -120,6 +120,7 @@ import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.iterableWithSize; @@ -1500,6 +1501,60 @@ public void onFailure(Exception e) { } } + public void testGetBuiltinPrivileges() throws Exception { + final RestHighLevelClient client = highLevelClient(); + { + //tag::get-builtin-privileges-execute + GetBuiltinPrivilegesResponse response = client.security().getBuiltinPrivileges(RequestOptions.DEFAULT); + //end::get-builtin-privileges-execute + + assertNotNull(response); + //tag::get-builtin-privileges-response + final Set cluster = response.getClusterPrivileges(); + final Set index = response.getIndexPrivileges(); + //end::get-builtin-privileges-response + + assertThat(cluster, hasItem("all")); + assertThat(cluster, hasItem("manage")); + assertThat(cluster, hasItem("monitor")); + assertThat(cluster, hasItem("manage_security")); + + assertThat(index, hasItem("all")); + assertThat(index, hasItem("manage")); + assertThat(index, hasItem("monitor")); + assertThat(index, hasItem("read")); + assertThat(index, hasItem("write")); + } + { + // tag::get-builtin-privileges-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetBuiltinPrivilegesResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-builtin-privileges-execute-listener + + // Replace the empty listener by a blocking listener in test + final PlainActionFuture future = new PlainActionFuture<>(); + listener = future; + + // tag::get-builtin-privileges-execute-async + client.security().getBuiltinPrivilegesAsync(RequestOptions.DEFAULT, listener); // <1> + // end::get-builtin-privileges-execute-async + + final GetBuiltinPrivilegesResponse response = future.get(30, TimeUnit.SECONDS); + assertNotNull(response); + assertThat(response.getClusterPrivileges(), hasItem("manage_security")); + assertThat(response.getIndexPrivileges(), hasItem("read")); + } + } + public void testGetPrivileges() throws Exception { final RestHighLevelClient client = highLevelClient(); final ApplicationPrivilege readTestappPrivilege = @@ -1559,9 +1614,9 @@ public void testGetPrivileges() throws Exception { assertNotNull(response); assertThat(response.getPrivileges().size(), equalTo(3)); - final GetPrivilegesResponse exptectedResponse = + final GetPrivilegesResponse expectedResponse = new GetPrivilegesResponse(Arrays.asList(readTestappPrivilege, writeTestappPrivilege, allTestappPrivilege)); - assertThat(response, equalTo(exptectedResponse)); + assertThat(response, equalTo(expectedResponse)); //tag::get-privileges-response Set privileges = response.getPrivileges(); //end::get-privileges-response diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java new file mode 100644 index 0000000000000..6719e10808e42 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/ReloadAnalyzersResponseTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse.ReloadDetails; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isIn; + +public class ReloadAnalyzersResponseTests + extends AbstractResponseTestCase { + + private String index; + private String id; + private Set shardIds; + + @Override + protected org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse createServerTestInstance() { + index = randomAlphaOfLength(8); + id = randomAlphaOfLength(8); + final int total = randomIntBetween(1, 16); + final int successful = total - scaledRandomIntBetween(0, total); + final int failed = scaledRandomIntBetween(0, total - successful); + final List failures = new ArrayList<>(); + shardIds = new HashSet<>(); + for (int i = 0; i < failed; i++) { + final DefaultShardOperationFailedException failure = new DefaultShardOperationFailedException( + index, + randomValueOtherThanMany(shardIds::contains, () -> randomIntBetween(0, total - 1)), + new RetentionLeaseNotFoundException(id)); + failures.add(failure); + shardIds.add(failure.shardId()); + } + Map reloadedDetailsMap = new HashMap<>(); + int randomIndices = randomIntBetween(0, 5); + for (int i = 0; i < randomIndices; i++) { + String indexName = randomAlphaOfLengthBetween(5, 10); + Set randomNodeIds = new HashSet<>(Arrays.asList(generateRandomStringArray(5, 5, false, true))); + Set randomAnalyzers = new HashSet<>(Arrays.asList(generateRandomStringArray(5, 5, false, true))); + + ReloadDetails reloadedDetails = new ReloadDetails(indexName, randomNodeIds, randomAnalyzers); + reloadedDetailsMap.put(indexName, reloadedDetails); + } + return new org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse(total, successful, failed, failures, reloadedDetailsMap); + } + + @Override + protected ReloadAnalyzersResponse doParseToClientInstance(XContentParser parser) throws IOException { + return ReloadAnalyzersResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(org.elasticsearch.xpack.core.action.ReloadAnalyzersResponse serverTestInstance, + ReloadAnalyzersResponse clientInstance) { + assertThat(clientInstance.shards().total(), equalTo(serverTestInstance.getTotalShards())); + assertThat(clientInstance.shards().successful(), equalTo(serverTestInstance.getSuccessfulShards())); + assertThat(clientInstance.shards().skipped(), equalTo(0)); + assertThat(clientInstance.shards().failed(), equalTo(serverTestInstance.getFailedShards())); + assertThat(clientInstance.shards().failures(), hasSize(clientInstance.shards().failed() == 0 ? 0 : 1)); // failures are grouped + if (clientInstance.shards().failed() > 0) { + final DefaultShardOperationFailedException groupedFailure = clientInstance.shards().failures().iterator().next(); + assertThat(groupedFailure.index(), equalTo(index)); + assertThat(groupedFailure.shardId(), isIn(shardIds)); + assertThat(groupedFailure.reason(), containsString("reason=retention lease with ID [" + id + "] not found")); + } + Map serverDetails = serverTestInstance.getReloadDetails(); + assertThat(clientInstance.getReloadedDetails().size(), equalTo(serverDetails.size())); + for (Entry entry : clientInstance + .getReloadedDetails().entrySet()) { + String indexName = entry.getKey(); + assertTrue(serverDetails.keySet().contains(indexName)); + assertEquals(serverDetails.get(indexName).getIndexName(), entry.getValue().getIndexName()); + assertEquals(serverDetails.get(indexName).getReloadedAnalyzers(), entry.getValue().getReloadedAnalyzers()); + assertEquals(serverDetails.get(indexName).getReloadedIndicesNodes(), entry.getValue().getReloadedIndicesNodes()); + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java index f6826af551d0a..957afc69dd0c2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfigTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.ml.dataframe; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -29,6 +30,7 @@ import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -54,6 +56,12 @@ public static DataFrameAnalyticsConfig randomDataFrameAnalyticsConfig() { if (randomBoolean()) { builder.setModelMemoryLimit(new ByteSizeValue(randomIntBetween(1, 16), randomFrom(ByteSizeUnit.MB, ByteSizeUnit.GB))); } + if (randomBoolean()) { + builder.setCreateTime(Instant.now()); + } + if (randomBoolean()) { + builder.setVersion(Version.CURRENT); + } return builder.build(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java index 246cd67c1baf1..d82e1999f3034 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsSourceTests.java @@ -36,7 +36,7 @@ public class DataFrameAnalyticsSourceTests extends AbstractXContentTestCase new CreateTokenRequest(r.getGrantType(), r.getScope(), r.getUsername(), r.getPassword(), r.getRefreshToken()), + r -> new CreateTokenRequest(r.getGrantType(), r.getScope(), r.getUsername(), r.getPassword(), + r.getRefreshToken(), r.getKerberosTicket()), this::mutate); } private CreateTokenRequest mutate(CreateTokenRequest req) { - switch (randomIntBetween(1, 5)) { - case 1: - return new CreateTokenRequest("g", req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken()); - case 2: - return new CreateTokenRequest(req.getGrantType(), "s", req.getUsername(), req.getPassword(), req.getRefreshToken()); - case 3: - return new CreateTokenRequest(req.getGrantType(), req.getScope(), "u", req.getPassword(), req.getRefreshToken()); - case 4: - final char[] password = {'p'}; - return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), password, req.getRefreshToken()); - case 5: - return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), "r"); + switch (randomIntBetween(1, 6)) { + case 1: + return new CreateTokenRequest("g", req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken(), + req.getKerberosTicket()); + case 2: + return new CreateTokenRequest(req.getGrantType(), "s", req.getUsername(), req.getPassword(), req.getRefreshToken(), + req.getKerberosTicket()); + case 3: + return new CreateTokenRequest(req.getGrantType(), req.getScope(), "u", req.getPassword(), req.getRefreshToken(), + req.getKerberosTicket()); + case 4: + final char[] password = { 'p' }; + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), password, req.getRefreshToken(), + req.getKerberosTicket()); + case 5: + final char[] kerberosTicket = { 'k' }; + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), req.getRefreshToken(), + kerberosTicket); + case 6: + return new CreateTokenRequest(req.getGrantType(), req.getScope(), req.getUsername(), req.getPassword(), "r", + req.getKerberosTicket()); } throw new IllegalStateException("Bad random number"); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/ExecuteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/ExecuteWatchResponseTests.java deleted file mode 100644 index 3e0ef4c8a5e5f..0000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/ExecuteWatchResponseTests.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.watcher; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.ObjectPath; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.io.InputStream; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; -import static org.hamcrest.Matchers.is; - -public class ExecuteWatchResponseTests extends ESTestCase { - - public static final String WATCH_ID_VALUE = "my_watch"; - public static final String NODE_VALUE = "my_node"; - public static final String TRIGGER_TYPE_VALUE = "manual"; - public static final String STATE_VALUE = "executed"; - public static final String STATE_KEY = "state"; - public static final String TRIGGER_EVENT_KEY = "trigger_event"; - public static final String TRIGGER_EVENT_TYPE_KEY = "type"; - public static final String MESSAGES_KEY = "messages"; - public static final String NODE_KEY = "node"; - public static final String WATCH_ID_KEY = "watch_id"; - - public void testFromXContent() throws IOException { - xContentTester(this::createParser, - ExecuteWatchResponseTests::createTestInstance, - this::toXContent, - ExecuteWatchResponse::fromXContent) - .supportsUnknownFields(true) - .assertEqualsConsumer(this::assertEqualInstances) - .assertToXContentEquivalence(false) - .test(); - } - - private void assertEqualInstances(ExecuteWatchResponse expected, ExecuteWatchResponse actual) { - assertThat(expected.getRecordId(), is(actual.getRecordId())); - - // This may have extra json, so lets just assume that if all of the original fields from the creation are there, then its equal - // This is the same code that is in createTestInstance in this class. - Map actualMap = actual.getRecordAsMap(); - assertThat(ObjectPath.eval(WATCH_ID_KEY, actualMap), is(WATCH_ID_VALUE)); - assertThat(ObjectPath.eval(NODE_KEY, actualMap), is(NODE_VALUE)); - List messages = ObjectPath.eval(MESSAGES_KEY, actualMap); - assertThat(messages.size(), is(0)); - assertThat(ObjectPath.eval(TRIGGER_EVENT_KEY + "." + TRIGGER_EVENT_TYPE_KEY, actualMap), is(TRIGGER_TYPE_VALUE)); - assertThat(ObjectPath.eval(STATE_KEY, actualMap), is(STATE_VALUE)); - } - - private XContentBuilder toXContent(BytesReference bytes, XContentBuilder builder) throws IOException { - // EMPTY is safe here because we never use namedObject - try (InputStream stream = bytes.streamInput(); - XContentParser parser = createParser(JsonXContent.jsonXContent, stream)) { - parser.nextToken(); - builder.generator().copyCurrentStructure(parser); - return builder; - } - } - - private XContentBuilder toXContent(ExecuteWatchResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - builder.field("_id", response.getRecordId()); - builder.field("watch_record"); - toXContent(response.getRecord(), builder); - return builder.endObject(); - } - - private static ExecuteWatchResponse createTestInstance() { - String id = "my_watch_0-2015-06-02T23:17:55.124Z"; - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - builder.field(WATCH_ID_KEY, WATCH_ID_VALUE); - builder.field(NODE_KEY, NODE_VALUE); - builder.startArray(MESSAGES_KEY); - builder.endArray(); - builder.startObject(TRIGGER_EVENT_KEY); - builder.field(TRIGGER_EVENT_TYPE_KEY, TRIGGER_TYPE_VALUE); - builder.endObject(); - builder.field(STATE_KEY, STATE_VALUE); - builder.endObject(); - BytesReference bytes = BytesReference.bytes(builder); - return new ExecuteWatchResponse(id, bytes); - } - catch (IOException e) { - throw new AssertionError(e); - } - } -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java index 7d919ef5f68c1..b69ea90a49e4a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/GetWatchResponseTests.java @@ -19,17 +19,13 @@ package org.elasticsearch.client.watcher; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.time.DateUtils; -import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.client.AbstractHlrcStreamableXContentTestCase; import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; @@ -37,70 +33,17 @@ import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; import java.io.IOException; -import java.io.InputStream; import java.time.ZonedDateTime; -import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.function.Predicate; -public class GetWatchResponseTests extends - AbstractHlrcStreamableXContentTestCase { +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; - private static final String[] SHUFFLE_FIELDS_EXCEPTION = new String[] { "watch" }; +public class GetWatchResponseTests extends AbstractResponseTestCase { @Override - protected String[] getShuffleFieldsExceptions() { - return SHUFFLE_FIELDS_EXCEPTION; - } - - @Override - protected ToXContent.Params getToXContentParams() { - return new ToXContent.MapParams(Collections.singletonMap("hide_headers", "false")); - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - return f -> f.contains("watch") || f.contains("actions") || f.contains("headers"); - } - - @Override - protected void assertEqualInstances(GetWatchResponse expectedInstance, GetWatchResponse newInstance) { - if (expectedInstance.isFound() && - expectedInstance.getSource().getContentType() != newInstance.getSource().getContentType()) { - /** - * The {@link GetWatchResponse#getContentType()} depends on the content type that - * was used to serialize the main object so we use the same content type than the - * expectedInstance to translate the watch of the newInstance. - */ - XContent from = XContentFactory.xContent(newInstance.getSource().getContentType()); - XContent to = XContentFactory.xContent(expectedInstance.getSource().getContentType()); - final BytesReference newSource; - // It is safe to use EMPTY here because this never uses namedObject - try (InputStream stream = newInstance.getSource().getBytes().streamInput(); - XContentParser parser = XContentFactory.xContent(from.type()).createParser(NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream)) { - parser.nextToken(); - XContentBuilder builder = XContentFactory.contentBuilder(to.type()); - builder.copyCurrentStructure(parser); - newSource = BytesReference.bytes(builder); - } catch (IOException e) { - throw new AssertionError(e); - } - newInstance = new GetWatchResponse(newInstance.getId(), newInstance.getVersion(), - newInstance.getSeqNo(), newInstance.getPrimaryTerm(), - newInstance.getStatus(), new XContentSource(newSource, expectedInstance.getSource().getContentType())); - } - super.assertEqualInstances(expectedInstance, newInstance); - } - - @Override - protected GetWatchResponse createBlankInstance() { - return new GetWatchResponse(); - } - - @Override - protected GetWatchResponse createTestInstance() { + protected GetWatchResponse createServerTestInstance() { String id = randomAlphaOfLength(10); if (LuceneTestCase.rarely()) { return new GetWatchResponse(id); @@ -113,6 +56,34 @@ protected GetWatchResponse createTestInstance() { return new GetWatchResponse(id, version, seqNo, primaryTerm, status, new XContentSource(source, XContentType.JSON)); } + @Override + protected org.elasticsearch.client.watcher.GetWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.watcher.GetWatchResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(GetWatchResponse serverTestInstance, org.elasticsearch.client.watcher.GetWatchResponse clientInstance) { + assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId())); + assertThat(clientInstance.getSeqNo(), equalTo(serverTestInstance.getSeqNo())); + assertThat(clientInstance.getPrimaryTerm(), equalTo(serverTestInstance.getPrimaryTerm())); + assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion())); + if (serverTestInstance.getStatus() != null) { + assertThat(convertWatchStatus(clientInstance.getStatus()), equalTo(serverTestInstance.getStatus())); + } else { + assertThat(clientInstance.getStatus(), nullValue()); + } + if (serverTestInstance.getSource() != null) { + assertThat(clientInstance.getSourceAsMap(), equalTo(serverTestInstance.getSource().getAsMap())); + } else { + assertThat(clientInstance.getSource(), nullValue()); + } + } + + @Override + protected ToXContent.Params getParams() { + return new ToXContent.MapParams(Map.of("hide_headers", "false")); + } + private static BytesReference simpleWatch() { try { XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); @@ -179,58 +150,45 @@ private static ActionStatus.Execution randomExecution() { } } - @Override - public org.elasticsearch.client.watcher.GetWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.GetWatchResponse.fromXContent(parser); - } - - @Override - public GetWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.GetWatchResponse instance) { - if (instance.isFound()) { - return new GetWatchResponse(instance.getId(), instance.getVersion(), instance.getSeqNo(), instance.getPrimaryTerm(), - convertHlrcToInternal(instance.getStatus()), new XContentSource(instance.getSource(), instance.getContentType())); - } else { - return new GetWatchResponse(instance.getId()); - } - } - - private static WatchStatus convertHlrcToInternal(org.elasticsearch.client.watcher.WatchStatus status) { + private static WatchStatus convertWatchStatus(org.elasticsearch.client.watcher.WatchStatus status) { final Map actions = new HashMap<>(); for (Map.Entry entry : status.getActions().entrySet()) { - actions.put(entry.getKey(), convertHlrcToInternal(entry.getValue())); + actions.put(entry.getKey(), convertActionStatus(entry.getValue())); } return new WatchStatus(status.version(), - convertHlrcToInternal(status.state()), - status.getExecutionState() == null ? null : convertHlrcToInternal(status.getExecutionState()), + convertWatchStatusState(status.state()), + status.getExecutionState() == null ? null : convertWatchStatus(status.getExecutionState()), status.lastChecked(), status.lastMetCondition(), actions, status.getHeaders() ); } - private static ActionStatus convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus actionStatus) { - return new ActionStatus(convertHlrcToInternal(actionStatus.ackStatus()), - actionStatus.lastExecution() == null ? null : convertHlrcToInternal(actionStatus.lastExecution()), - actionStatus.lastSuccessfulExecution() == null ? null : convertHlrcToInternal(actionStatus.lastSuccessfulExecution()), - actionStatus.lastThrottle() == null ? null : convertHlrcToInternal(actionStatus.lastThrottle()) + private static ActionStatus convertActionStatus(org.elasticsearch.client.watcher.ActionStatus actionStatus) { + return new ActionStatus(convertAckStatus(actionStatus.ackStatus()), + actionStatus.lastExecution() == null ? null : convertActionStatusExecution(actionStatus.lastExecution()), + actionStatus.lastSuccessfulExecution() == null ? null : convertActionStatusExecution(actionStatus.lastSuccessfulExecution()), + actionStatus.lastThrottle() == null ? null : convertActionStatusThrottle(actionStatus.lastThrottle()) ); } - private static ActionStatus.AckStatus convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.AckStatus ackStatus) { - return new ActionStatus.AckStatus(ackStatus.timestamp(), convertHlrcToInternal(ackStatus.state())); + private static ActionStatus.AckStatus convertAckStatus(org.elasticsearch.client.watcher.ActionStatus.AckStatus ackStatus) { + return new ActionStatus.AckStatus(ackStatus.timestamp(), convertAckStatusState(ackStatus.state())); } - private static ActionStatus.AckStatus.State convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.AckStatus.State state) { + private static ActionStatus.AckStatus.State convertAckStatusState( + org.elasticsearch.client.watcher.ActionStatus.AckStatus.State state) { return ActionStatus.AckStatus.State.valueOf(state.name()); } - private static WatchStatus.State convertHlrcToInternal(org.elasticsearch.client.watcher.WatchStatus.State state) { + private static WatchStatus.State convertWatchStatusState(org.elasticsearch.client.watcher.WatchStatus.State state) { return new WatchStatus.State(state.isActive(), state.getTimestamp()); } - private static ExecutionState convertHlrcToInternal(org.elasticsearch.client.watcher.ExecutionState executionState) { + private static ExecutionState convertWatchStatus(org.elasticsearch.client.watcher.ExecutionState executionState) { return ExecutionState.valueOf(executionState.name()); } - private static ActionStatus.Execution convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.Execution execution) { + private static ActionStatus.Execution convertActionStatusExecution( + org.elasticsearch.client.watcher.ActionStatus.Execution execution) { if (execution.successful()) { return ActionStatus.Execution.successful(execution.timestamp()); } else { @@ -238,7 +196,7 @@ private static ActionStatus.Execution convertHlrcToInternal(org.elasticsearch.cl } } - private static ActionStatus.Throttle convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.Throttle throttle) { + private static ActionStatus.Throttle convertActionStatusThrottle(org.elasticsearch.client.watcher.ActionStatus.Throttle throttle) { return new ActionStatus.Throttle(throttle.timestamp(), throttle.reason()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java index eebf2c9cef184..493375c451745 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/DeleteWatchResponseTests.java @@ -18,17 +18,19 @@ */ package org.elasticsearch.client.watcher.hlrc; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.watcher.DeleteWatchResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcXContentTestCase; import java.io.IOException; -public class DeleteWatchResponseTests extends AbstractHlrcXContentTestCase< +import static org.hamcrest.Matchers.equalTo; + +public class DeleteWatchResponseTests extends AbstractResponseTestCase< org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse, DeleteWatchResponse> { @Override - protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createTestInstance() { + protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createServerTestInstance() { String id = randomAlphaOfLength(10); long version = randomLongBetween(1, 10); boolean found = randomBoolean(); @@ -36,23 +38,15 @@ protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse createTes } @Override - protected org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse doParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse.fromXContent(parser); - } - - @Override - public DeleteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { + protected DeleteWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { return DeleteWatchResponse.fromXContent(parser); } @Override - public org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse convertHlrcToInternal(DeleteWatchResponse instance) { - return new org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse(instance.getId(), instance.getVersion(), - instance.isFound()); - } - - @Override - protected boolean supportsUnknownFields() { - return false; + protected void assertInstances(org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse serverTestInstance, + DeleteWatchResponse clientInstance) { + assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId())); + assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion())); + assertThat(clientInstance.isFound(), equalTo(serverTestInstance.isFound())); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java index ace75517a9333..c1492eb53020f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/ExecuteWatchResponseTests.java @@ -19,31 +19,23 @@ package org.elasticsearch.client.watcher.hlrc; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; import java.io.IOException; -public class ExecuteWatchResponseTests - extends AbstractHlrcXContentTestCase { +import static org.hamcrest.Matchers.equalTo; - @Override - public org.elasticsearch.client.watcher.ExecuteWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.ExecuteWatchResponse.fromXContent(parser); - } +public class ExecuteWatchResponseTests extends AbstractResponseTestCase< + ExecuteWatchResponse, org.elasticsearch.client.watcher.ExecuteWatchResponse> { @Override - public ExecuteWatchResponse convertHlrcToInternal(org.elasticsearch.client.watcher.ExecuteWatchResponse instance) { - return new ExecuteWatchResponse(instance.getRecordId(), instance.getRecord(), XContentType.JSON); - } - - @Override - protected ExecuteWatchResponse createTestInstance() { + protected ExecuteWatchResponse createServerTestInstance() { String id = "my_watch_0-2015-06-02T23:17:55.124Z"; try { XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -66,12 +58,14 @@ protected ExecuteWatchResponse createTestInstance() { } @Override - protected ExecuteWatchResponse doParseInstance(XContentParser parser) throws IOException { - return ExecuteWatchResponse.fromXContent(parser); + protected org.elasticsearch.client.watcher.ExecuteWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { + return org.elasticsearch.client.watcher.ExecuteWatchResponse.fromXContent(parser); } @Override - protected boolean supportsUnknownFields() { - return false; + protected void assertInstances(ExecuteWatchResponse serverTestInstance, + org.elasticsearch.client.watcher.ExecuteWatchResponse clientInstance) { + assertThat(clientInstance.getRecordId(), equalTo(serverTestInstance.getRecordId())); + assertThat(clientInstance.getRecordAsMap(), equalTo(serverTestInstance.getRecordSource().getAsMap())); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java index 9b65618cafc46..a47de0d15fda6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/watcher/hlrc/PutWatchResponseTests.java @@ -18,17 +18,19 @@ */ package org.elasticsearch.client.watcher.hlrc; +import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.client.watcher.PutWatchResponse; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.client.AbstractHlrcXContentTestCase; import java.io.IOException; -public class PutWatchResponseTests extends AbstractHlrcXContentTestCase< +import static org.hamcrest.Matchers.equalTo; + +public class PutWatchResponseTests extends AbstractResponseTestCase< org.elasticsearch.protocol.xpack.watcher.PutWatchResponse, PutWatchResponse> { @Override - protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createTestInstance() { + protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createServerTestInstance() { String id = randomAlphaOfLength(10); long seqNo = randomNonNegativeLong(); long primaryTerm = randomLongBetween(1, 20); @@ -38,23 +40,17 @@ protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse createTestIn } @Override - protected org.elasticsearch.protocol.xpack.watcher.PutWatchResponse doParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.protocol.xpack.watcher.PutWatchResponse.fromXContent(parser); - } - - @Override - public PutWatchResponse doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.watcher.PutWatchResponse.fromXContent(parser); - } - - @Override - public org.elasticsearch.protocol.xpack.watcher.PutWatchResponse convertHlrcToInternal(PutWatchResponse instance) { - return new org.elasticsearch.protocol.xpack.watcher.PutWatchResponse(instance.getId(), instance.getVersion(), - instance.getSeqNo(), instance.getPrimaryTerm(), instance.isCreated()); + protected PutWatchResponse doParseToClientInstance(XContentParser parser) throws IOException { + return PutWatchResponse.fromXContent(parser); } @Override - protected boolean supportsUnknownFields() { - return false; + protected void assertInstances(org.elasticsearch.protocol.xpack.watcher.PutWatchResponse serverTestInstance, + PutWatchResponse clientInstance) { + assertThat(clientInstance.getId(), equalTo(serverTestInstance.getId())); + assertThat(clientInstance.getSeqNo(), equalTo(serverTestInstance.getSeqNo())); + assertThat(clientInstance.getPrimaryTerm(), equalTo(serverTestInstance.getPrimaryTerm())); + assertThat(clientInstance.getVersion(), equalTo(serverTestInstance.getVersion())); + assertThat(clientInstance.isCreated(), equalTo(serverTestInstance.isCreated())); } } diff --git a/distribution/build.gradle b/distribution/build.gradle index 1d081a2755f85..9606604036101 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -117,7 +117,7 @@ task buildTransportModules { void copyModule(Sync copyTask, Project module) { copyTask.configure { - dependsOn { module.bundlePlugin } + dependsOn "${module.path}:bundlePlugin" from({ zipTree(module.bundlePlugin.outputs.files.singleFile) }) { includeEmptyDirs false @@ -167,6 +167,7 @@ buildDefaultLog4jConfig.doLast(writeLog4jProperties) // copy log4j2.properties from modules that have it void copyLog4jProperties(Task buildTask, Project module) { + buildTask.dependsOn "${module.path}:bundlePlugin" buildTask.doFirst { FileTree tree = zipTree(module.bundlePlugin.outputs.files.singleFile) FileTree filtered = tree.matching { diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index f68a731edf8f7..568ddfe97df16 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -51,10 +51,12 @@ import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.KeyStore; +import java.security.MessageDigest; import java.security.SecureRandom; import java.util.ArrayList; import java.util.Base64; import java.util.List; +import java.util.Locale; import java.util.Set; import static org.hamcrest.Matchers.containsString; @@ -126,6 +128,27 @@ public void testCannotReadStringFromClosedKeystore() throws Exception { assertThat(exception.getMessage(), containsString("closed")); } + public void testValueSHA256Digest() throws Exception { + final KeyStoreWrapper keystore = KeyStoreWrapper.create(); + final String stringSettingKeyName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT) + "1"; + final String stringSettingValue = randomAlphaOfLength(32); + keystore.setString(stringSettingKeyName, stringSettingValue.toCharArray()); + final String fileSettingKeyName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT) + "2"; + final byte[] fileSettingValue = randomByteArrayOfLength(32); + keystore.setFile(fileSettingKeyName, fileSettingValue); + + final byte[] stringSettingHash = MessageDigest.getInstance("SHA-256").digest(stringSettingValue.getBytes(StandardCharsets.UTF_8)); + assertThat(keystore.getSHA256Digest(stringSettingKeyName), equalTo(stringSettingHash)); + final byte[] fileSettingHash = MessageDigest.getInstance("SHA-256").digest(fileSettingValue); + assertThat(keystore.getSHA256Digest(fileSettingKeyName), equalTo(fileSettingHash)); + + keystore.close(); + + // value hashes accessible even when the keystore is closed + assertThat(keystore.getSHA256Digest(stringSettingKeyName), equalTo(stringSettingHash)); + assertThat(keystore.getSHA256Digest(fileSettingKeyName), equalTo(fileSettingHash)); + } + public void testUpgradeNoop() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()); diff --git a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc index ec2253b2c25f4..995d9d2c08963 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc @@ -32,6 +32,7 @@ include-tagged::{doc-tests-file}[{api}-request-options] <1> The page parameters `from` and `size`. `from` specifies the number of {dataframe-transforms} to skip. `size` specifies the maximum number of {dataframe-transforms} to get. Defaults to `0` and `100` respectively. +<2> Whether to ignore if a wildcard expression matches no transforms. include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc index cdc6254a4e443..8a3e1a96acb14 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc @@ -22,6 +22,19 @@ include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Constructing a new GET Stats request referencing an existing {dataframe-transform} +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-options] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of data frame transform stats to skip. +`size` specifies the maximum number of data frame transform stats to get. +Defaults to `0` and `100` respectively. +<2> Whether to ignore if a wildcard expression matches no transforms. + include::../execution.asciidoc[] @@ -39,4 +52,5 @@ include-tagged::{doc-tests-file}[{api}-response] <3> The running state of the transform indexer e.g `started`, `indexing`, etc. <4> The overall transform statistics recording the number of documents indexed etc. <5> The progress of the current run in the transform. Supplies the number of docs left until the next checkpoint -and the total number of docs expected. \ No newline at end of file +and the total number of docs expected. +<6> The assigned node information if the task is currently assigned to a node and running. \ No newline at end of file diff --git a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc index 9b05687c00875..1de4af5c5d592 100644 --- a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc @@ -32,6 +32,7 @@ include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- <1> If true wait for the data frame task to stop before responding <2> Controls the amount of time to wait until the {dataframe-job} stops. +<3> Whether to ignore if a wildcard expression matches no transforms. include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/indices/reload_analyzers.asciidoc b/docs/java-rest/high-level/indices/reload_analyzers.asciidoc new file mode 100644 index 0000000000000..29db206bf1402 --- /dev/null +++ b/docs/java-rest/high-level/indices/reload_analyzers.asciidoc @@ -0,0 +1,50 @@ +-- +:api: reload-analyzers +:request: ReloadAnalyzersRequest +:response: ReloadAnalyzersResponse +-- + +[id="{upid}-{api}"] +=== Reload Search Analyzers API + +[id="{upid}-{api}-request"] +==== Reload Search Analyzers Request + +An +{request}+ requires an `index` argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The index to reload + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Reload Search Analyzers Response + +The returned +{response}+ allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Shard statistics. Note that reloading does not happen on each shard of an +index, but once on each node the index has shards on. The reported shard count +can therefore differ from the number of index shards +<2> Reloading details of all indices the request was executed on +<3> Details can be retrieved by index name +<4> The reloaded index name +<5> The nodes the index was reloaded on +<6> The analyzer names that were reloaded diff --git a/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc index 243c075e18b03..3a06f268836f6 100644 --- a/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc @@ -19,6 +19,7 @@ A +{request}+ object requires a {dataframe-analytics-config} id. include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- <1> Constructing a new stop request referencing an existing {dataframe-analytics-config} +<2> Optionally used to stop a failed task include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc new file mode 100644 index 0000000000000..e8eeb7b3c9417 --- /dev/null +++ b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc @@ -0,0 +1,27 @@ +-- +:api: get-builtin-privileges +:request: GetBuiltinPrivilegesRequest +:response: GetBuiltinPrivilegesResponse +-- + +[id="{upid}-{api}"] +=== Get Builtin Privileges API + +include::../execution-no-req.asciidoc[] + +[id="{upid}-{api}-response"] +==== Get Builtin Privileges Response + +The returned +{response}+ contains the following properties + +`clusterPrivileges`:: +A `Set` of all _cluster_ privileges that are understood by this node. + +`indexPrivileges`:: +A `Set` of all _index_ privileges that are understood by this node. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- + diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 21ebdfab65155..27f5f38136d2e 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -155,6 +155,7 @@ include::indices/get_index.asciidoc[] include::indices/freeze_index.asciidoc[] include::indices/unfreeze_index.asciidoc[] include::indices/delete_template.asciidoc[] +include::indices/reload_analyzers.asciidoc[] == Cluster APIs @@ -418,6 +419,7 @@ The Java High Level REST Client supports the following Security APIs: * <> * <> * <<{upid}-invalidate-token>> +* <<{upid}-get-builtin-privileges>> * <<{upid}-get-privileges>> * <<{upid}-put-privileges>> * <<{upid}-delete-privileges>> @@ -435,6 +437,7 @@ include::security/put-role.asciidoc[] include::security/get-roles.asciidoc[] include::security/delete-role.asciidoc[] include::security/delete-privileges.asciidoc[] +include::security/get-builtin-privileges.asciidoc[] include::security/get-privileges.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/clear-realm-cache.asciidoc[] @@ -582,4 +585,4 @@ include::dataframe/put_data_frame.asciidoc[] include::dataframe/delete_data_frame.asciidoc[] include::dataframe/preview_data_frame.asciidoc[] include::dataframe/start_data_frame.asciidoc[] -include::dataframe/stop_data_frame.asciidoc[] \ No newline at end of file +include::dataframe/stop_data_frame.asciidoc[] diff --git a/docs/painless/painless-guide/painless-datetime.asciidoc b/docs/painless/painless-guide/painless-datetime.asciidoc index 5e98b39aaa713..45eb34b75726d 100644 --- a/docs/painless/painless-guide/painless-datetime.asciidoc +++ b/docs/painless/painless-guide/painless-datetime.asciidoc @@ -29,7 +29,7 @@ complex:: a datetime representation as a complex type (<>) that abstracts away internal details of how the datetime is stored and often provides utilities for modification and comparison; in Painless this is typically a -<> +<> Switching between different representations of datetimes is often necessary to achieve a script's objective(s). A typical pattern in a script is to switch a @@ -335,6 +335,43 @@ if (zdt1.isAfter(zdt2)) { } ---- +==== Datetime Zone + +Both string datetimes and complex datetimes have a timezone with a default of +`UTC`. Numeric datetimes do not have enough explicit information to +have a timezone, so `UTC` is always assumed. Use +<> (or fields) in +conjunction with a <> to change +the timezone for a complex datetime. Parse a string datetime into a complex +datetime to change the timezone, and then format the complex datetime back into +a desired string datetime. Note many complex datetimes are immutable so upon +modification a new complex datetime is created that requires +<> or immediate use. + +===== Datetime Zone Examples + +* Modify the timezone for a complex datetime ++ +[source,Painless] +---- +ZonedDateTime utc = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +ZonedDateTime pst = utc.withZoneSameInstant(ZoneId.of('America/Los_Angeles')); +---- ++ +* Modify the timezone for a string datetime ++ +[source,Painless] +---- +String gmtString = 'Thu, 13 Oct 1983 22:15:30 GMT'; +ZonedDateTime gmtZdt = ZonedDateTime.parse(gmtString, + DateTimeFormatter.RFC_1123_DATE_TIME); <1> +ZonedDateTime pstZdt = + gmtZdt.withZoneSameInstant(ZoneId.of('America/Los_Angeles')); +String pstString = pstZdt.format(DateTimeFormatter.RFC_1123_DATE_TIME); +---- +<1> Note the use of a built-in DateTimeFormatter. + ==== Datetime Input There are several common ways datetimes are used as input for a script @@ -372,7 +409,7 @@ through an input called `params`. + [source,Painless] ---- -long inputDatetime = params['input_datetime']; +long inputDateTime = params['input_datetime']; Instant instant = Instant.ofEpochMilli(inputDateTime); ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z')); ---- @@ -432,7 +469,7 @@ source document is most commonly accessible through an input called + [source,Painless] ---- -long inputDatetime = ctx['_source']['input_datetime']; <1> +long inputDateTime = ctx['_source']['input_datetime']; <1> Instant instant = Instant.ofEpochMilli(inputDateTime); ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z')); ---- @@ -513,10 +550,9 @@ String output = input.format(DateTimeFormatter.ISO_INSTANT); <1> + ** Assumptions: + -*** The fields `start_datetime` and `end_datetime` may *not* exist in all -indexes as part of the query -*** The fields `start_datetime` and `end_datetime` may *not* have values in all -indexed documents +*** The fields `start` and `end` may *not* exist in all indexes as part of the +query +*** The fields `start` and `end` may *not* have values in all indexed documents + ** Mappings: + @@ -527,10 +563,10 @@ indexed documents ... "properties": { ... - "start_datetime": { + "start": { "type": "date" }, - "end_datetime": { + "end": { "type": "date" } ... @@ -544,14 +580,13 @@ indexed documents + [source,Painless] ---- -if (doc.containsKey('start_datetime') && doc.containsKey('end_datetime')) { <1> +if (doc.containsKey('start') && doc.containsKey('end')) { <1> - if (doc['start_datetime'].size() > 0 && doc['end_datetime'].size() > 0) { <2> + if (doc['start'].size() > 0 && doc['end'].size() > 0) { <2> - def startDatetime = doc['start_datetime'].value; - def endDatetime = doc['end_datetime'].value; - long differenceInMillis = - ChronoUnit.MILLIS.between(startDateTime, endDateTime); + def start = doc['start'].value; + def end = doc['end'].value; + long differenceInMillis = ChronoUnit.MILLIS.between(start, end); // handle difference in times } else { @@ -564,6 +599,122 @@ if (doc.containsKey('start_datetime') && doc.containsKey('end_datetime')) { <1> <1> When a query's results span multiple indexes, some indexes may not contain a specific field. Use the `containsKey` method call on the `doc` input to ensure a field exists as part of the index for the current document. -<2> Some field's within a document may have no values. Use the `size` method +<2> Some fields within a document may have no values. Use the `size` method call on a field within the `doc` input to ensure that field has at least one value for the current document. + +==== Datetime Now + +Under most Painless contexts the current datetime, `now`, is not supported. +There are two primary reasons for this. The first is scripts are often run once +per document, so each time the script is run a different `now` is returned. The +second is scripts are often run in a distributed fashion without a way to +appropriately synchronize `now`. Instead, pass in a user-defined parameter with +either a string datetime or numeric datetime for `now`. A numeric datetime is +preferred as there is no need to parse it for comparision. + +===== Datetime Now Examples + +* Use a numeric datetime as `now` ++ +** Assumptions: ++ +*** The field `input_datetime` exists in all indexes as part of the query +*** All indexed documents contain the field `input_datetime` ++ +** Mappings: ++ +[source,JSON] +---- +{ + "mappings": { + ... + "properties": { + ... + "input_datetime": { + "type": "date" + } + ... + } + ... + } +} +---- ++ +** Input: ++ +[source,JSON] +---- +... +"script": { + ... + "params": { + "now": + } +} +... +---- ++ +** Script: ++ +[source,Painless] +---- +long now = params['now']; +def inputDateTime = doc['input_datetime']; +long millisDateTime = zdt.toInstant().toEpochMilli(); +long elapsedTime = now - millisDateTime; +---- ++ +* Use a string datetime as `now` ++ +** Assumptions: ++ +*** The field `input_datetime` exists in all indexes as part of the query +*** All indexed documents contain the field `input_datetime` ++ +** Mappings: ++ +[source,JSON] +---- +{ + "mappings": { + ... + "properties": { + ... + "input_datetime": { + "type": "date" + } + ... + } + ... + } +} +---- ++ +** Input: ++ +[source,JSON] +---- +... +"script": { + ... + "params": { + "now": "" + } +} +... +---- ++ +** Script: ++ +[source,Painless] +---- +String nowString = params['now']; +ZonedDateTime nowZdt = ZonedDateTime.parse(datetime); <1> +long now = ZonedDateTime.toInstant().toEpochMilli(); +def inputDateTime = doc['input_datetime']; +long millisDateTime = zdt.toInstant().toEpochMilli(); +long elapsedTime = now - millisDateTime; +---- +<1> Note this parses the same string datetime every time the script runs. Use a +numeric datetime to avoid a significant performance hit. diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index b9fbddc65c125..9f186ef1ffbaa 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -55,6 +55,8 @@ include::bucket/parent-aggregation.asciidoc[] include::bucket/range-aggregation.asciidoc[] +include::bucket/rare-terms-aggregation.asciidoc[] + include::bucket/reverse-nested-aggregation.asciidoc[] include::bucket/sampler-aggregation.asciidoc[] @@ -64,3 +66,4 @@ include::bucket/significantterms-aggregation.asciidoc[] include::bucket/significanttext-aggregation.asciidoc[] include::bucket/terms-aggregation.asciidoc[] + diff --git a/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc new file mode 100644 index 0000000000000..e2537b61aefda --- /dev/null +++ b/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc @@ -0,0 +1,357 @@ +[[search-aggregations-bucket-rare-terms-aggregation]] +=== Rare Terms Aggregation + +A multi-bucket value source based aggregation which finds "rare" terms -- terms that are at the long-tail +of the distribution and are not frequent. Conceptually, this is like a `terms` aggregation that is +sorted by `_count` ascending. As noted in the <>, +actually ordering a `terms` agg by count ascending has unbounded error. Instead, you should use the `rare_terms` +aggregation + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT /products +{ + "mappings": { + "properties": { + "genre": { + "type": "keyword" + }, + "product": { + "type": "keyword" + } + } + } +} + +POST /products/_doc/_bulk?refresh +{"index":{"_id":0}} +{"genre": "rock", "product": "Product A"} +{"index":{"_id":1}} +{"genre": "rock"} +{"index":{"_id":2}} +{"genre": "rock"} +{"index":{"_id":3}} +{"genre": "jazz", "product": "Product Z"} +{"index":{"_id":4}} +{"genre": "jazz"} +{"index":{"_id":5}} +{"genre": "electronic"} +{"index":{"_id":6}} +{"genre": "electronic"} +{"index":{"_id":7}} +{"genre": "electronic"} +{"index":{"_id":8}} +{"genre": "electronic"} +{"index":{"_id":9}} +{"genre": "electronic"} +{"index":{"_id":10}} +{"genre": "swing"} + +------------------------------------------------- +// NOTCONSOLE +// TESTSETUP + +////////////////////////// + +==== Syntax + +A `rare_terms` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "rare_terms": { + "field": "the_field", + "max_doc_count": 1 + } +} +-------------------------------------------------- +// NOTCONSOLE + +.`rare_terms` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`field` |The field we wish to find rare terms in |Required | +|`max_doc_count` |The maximum number of documents a term should appear in. |Optional |`1` +|`precision` |The precision of the internal CuckooFilters. Smaller precision leads to +better approximation, but higher memory usage. Cannot be smaller than `0.00001` |Optional |`0.01` +|`include` |Terms that should be included in the aggregation|Optional | +|`exclude` |Terms that should be excluded from the aggregation|Optional | +|`missing` |The value that should be used if a document does not have the field being aggregated|Optional | +|=== + + +Example: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/_search/_search\?filter_path=aggregations/] + +Response: + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations" : { + "genres" : { + "buckets" : [ + { + "key" : "swing", + "doc_count" : 1 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\.//] + +In this example, the only bucket that we see is the "swing" bucket, because it is the only term that appears in +one document. If we increase the `max_doc_count` to `2`, we'll see some more buckets: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "max_doc_count": 2 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/_search/_search\?filter_path=aggregations/] + +This now shows the "jazz" term which has a `doc_count` of 2": + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations" : { + "genres" : { + "buckets" : [ + { + "key" : "swing", + "doc_count" : 1 + }, + { + "key" : "jazz", + "doc_count" : 2 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\.//] + +[[search-aggregations-bucket-rare-terms-aggregation-max-doc-count]] +==== Maximum document count + +The `max_doc_count` parameter is used to control the upper bound of document counts that a term can have. There +is not a size limitation on the `rare_terms` agg like `terms` agg has. This means that terms +which match the `max_doc_count` criteria will be returned. The aggregation functions in this manner to avoid +the order-by-ascending issues that afflict the `terms` aggregation. + +This does, however, mean that a large number of results can be returned if chosen incorrectly. +To limit the danger of this setting, the maximum `max_doc_count` is 100. + +[[search-aggregations-bucket-rare-terms-aggregation-max-buckets]] +==== Max Bucket Limit + +The Rare Terms aggregation is more liable to trip the `search.max_buckets` soft limit than other aggregations due +to how it works. The `max_bucket` soft-limit is evaluated on a per-shard basis while the aggregation is collecting +results. It is possible for a term to be "rare" on a shard but become "not rare" once all the shard results are +merged together. This means that individual shards tend to collect more buckets than are truly rare, because +they only have their own local view. This list is ultimately pruned to the correct, smaller list of rare +terms on the coordinating node... but a shard may have already tripped the `max_buckets` soft limit and aborted +the request. + +When aggregating on fields that have potentially many "rare" terms, you may need to increase the `max_buckets` soft +limit. Alternatively, you might need to find a way to filter the results to return fewer rare values (smaller time +span, filter by category, etc), or re-evaluate your definition of "rare" (e.g. if something +appears 100,000 times, is it truly "rare"?) + +[[search-aggregations-bucket-rare-terms-aggregation-approximate-counts]] +==== Document counts are approximate + +The naive way to determine the "rare" terms in a dataset is to place all the values in a map, incrementing counts +as each document is visited, then return the bottom `n` rows. This does not scale beyond even modestly sized data +sets. A sharded approach where only the "top n" values are retained from each shard (ala the `terms` aggregation) +fails because the long-tail nature of the problem means it is impossible to find the "top n" bottom values without +simply collecting all the values from all shards. + +Instead, the Rare Terms aggregation uses a different approximate algorithm: + +1. Values are placed in a map the first time they are seen. +2. Each addition occurrence of the term increments a counter in the map +3. If the counter > the `max_doc_count` threshold, the term is removed from the map and placed in a +https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf[CuckooFilter] +4. The CuckooFilter is consulted on each term. If the value is inside the filter, it is known to be above the +threshold already and skipped. + +After execution, the map of values is the map of "rare" terms under the `max_doc_count` threshold. This map and CuckooFilter +are then merged with all other shards. If there are terms that are greater than the threshold (or appear in +a different shard's CuckooFilter) the term is removed from the merged list. The final map of values is returned +to the user as the "rare" terms. + +CuckooFilters have the possibility of returning false positives (they can say a value exists in their collection when +it actually does not). Since the CuckooFilter is being used to see if a term is over threshold, this means a false positive +from the CuckooFilter will mistakenly say a value is common when it is not (and thus exclude it from it final list of buckets). +Practically, this means the aggregations exhibits false-negative behavior since the filter is being used "in reverse" +of how people generally think of approximate set membership sketches. + +CuckooFilters are described in more detail in the paper: + +https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf[Fan, Bin, et al. "Cuckoo filter: Practically better than bloom."] +Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014. + +==== Precision + +Although the internal CuckooFilter is approximate in nature, the false-negative rate can be controlled with a +`precision` parameter. This allows the user to trade more runtime memory for more accurate results. + +The default precision is `0.001`, and the smallest (e.g. most accurate and largest memory overhead) is `0.00001`. +Below are some charts which demonstrate how the accuracy of the aggregation is affected by precision and number +of distinct terms. + +The X-axis shows the number of distinct values the aggregation has seen, and the Y-axis shows the percent error. +Each line series represents one "rarity" condition (ranging from one rare item to 100,000 rare items). For example, +the orange "10" line means ten of the values were "rare" (`doc_count == 1`), out of 1-20m distinct values (where the +rest of the values had `doc_count > 1`) + +This first chart shows precision `0.01`: + +image:images/rare_terms/accuracy_01.png[] + +And precision `0.001` (the default): + +image:images/rare_terms/accuracy_001.png[] + +And finally `precision 0.0001`: + +image:images/rare_terms/accuracy_0001.png[] + +The default precision of `0.001` maintains an accuracy of < 2.5% for the tested conditions, and accuracy slowly +degrades in a controlled, linear fashion as the number of distinct values increases. + +The default precision of `0.001` has a memory profile of `1.748⁻⁶ * n` bytes, where `n` is the number +of distinct values the aggregation has seen (it can also be roughly eyeballed, e.g. 20 million unique values is about +30mb of memory). The memory usage is linear to the number of distinct values regardless of which precision is chosen, +the precision only affects the slope of the memory profile as seen in this chart: + +image:images/rare_terms/memory.png[] + +For comparison, an equivalent terms aggregation at 20 million buckets would be roughly +`20m * 69b == ~1.38gb` (with 69 bytes being a very optimistic estimate of an empty bucket cost, far lower than what +the circuit breaker accounts for). So although the `rare_terms` agg is relatively heavy, it is still orders of +magnitude smaller than the equivalent terms aggregation + +==== Filtering Values + +It is possible to filter the values for which buckets will be created. This can be done using the `include` and +`exclude` parameters which are based on regular expression strings or arrays of exact values. Additionally, +`include` clauses can filter using `partition` expressions. + +===== Filtering Values with regular expressions + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "include" : "swi*", + "exclude" : "electro*" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +In the above example, buckets will be created for all the tags that starts with `swi`, except those starting +with `electro` (so the tag `swing` will be aggregated but not `electro_swing`). The `include` regular expression will determine what +values are "allowed" to be aggregated, while the `exclude` determines the values that should not be aggregated. When +both are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`. + +The syntax is the same as <>. + +===== Filtering Values with exact values + +For matching based on exact values the `include` and `exclude` parameters can simply take an array of +strings that represent the terms as they are found in the index: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "include" : ["swing", "rock"], + "exclude" : ["jazz"] + } + } + } +} +-------------------------------------------------- +// CONSOLE + + +==== Missing value + +The `missing` parameter defines how documents that are missing a value should be treated. +By default they will be ignored but it is also possible to treat them as if they +had a value. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "aggs" : { + "genres" : { + "rare_terms" : { + "field" : "genre", + "missing": "N/A" <1> + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`. + +==== Nested, RareTerms, and scoring sub-aggregations + +The RareTerms aggregation has to operate in `breadth_first` mode, since it needs to prune terms as doc count thresholds +are breached. This requirement means the RareTerms aggregation is incompatible with certain combinations of aggregations +that require `depth_first`. In particular, scoring sub-aggregations that are inside a `nested` force the entire aggregation tree to run +in `depth_first` mode. This will throw an exception since RareTerms is unable to process `depth_first`. + +As a concrete example, if `rare_terms` aggregation is the child of a `nested` aggregation, and one of the child aggregations of `rare_terms` +needs document scores (like a `top_hits` aggregation), this will throw an exception. \ No newline at end of file diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index 2ff19cebe893e..34646a0413e36 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -4,8 +4,9 @@ A token filter which removes elisions. For example, "l'avion" (the plane) will tokenized as "avion" (plane). -Accepts `articles` parameter which is a set of stop words articles. Also accepts -`articles_case`, which indicates whether the filter treats those articles as +Requires either an `articles` parameter which is a set of stop word articles, or +`articles_path` which points to a text file containing the stop set. Also optionally +accepts `articles_case`, which indicates whether the filter treats those articles as case sensitive. For example: diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 139f7c3ab0ad0..f47e97d27ea7f 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -43,6 +43,8 @@ Additional settings are: * `expand` (defaults to `true`). * `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request: + + [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/delete-transform.asciidoc b/docs/reference/data-frames/apis/delete-transform.asciidoc index 77c74b95c438c..d772bc3c15d89 100644 --- a/docs/reference/data-frames/apis/delete-transform.asciidoc +++ b/docs/reference/data-frames/apis/delete-transform.asciidoc @@ -12,31 +12,38 @@ beta[] Deletes an existing {dataframe-transform}. - -==== Request +[discrete] +[[delete-data-frame-transform-request]] +==== {api-request-title} `DELETE _data_frame/transforms/` -==== Description - -NOTE: Before you can delete the {dataframe-transform}, you must stop it. - -==== Path Parameters - -`data_frame_transform_id` (required):: - (string) Identifier for the {dataframe-transform}. - -==== Authorization +[discrete] +[[delete-data-frame-transform-prereqs]] +==== {api-prereq-title} -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. For more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -==== Examples +[discrete] +[[delete-data-frame-transform-desc]] +==== {api-description-title} + +NOTE: Before you can delete the {dataframe-transform}, you must stop it. + +[discrete] +[[delete-data-frame-transform-path-parms]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the {dataframe-transform}. -The following example deletes the `ecommerce_transform` {dataframe-transform}: +[discrete] +[[delete-data-frame-transform-examples]] +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/data-frames/apis/get-transform-stats.asciidoc index 4c91c0cf4a6a2..889a109b8a376 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/data-frames/apis/get-transform-stats.asciidoc @@ -12,8 +12,9 @@ beta[] Retrieves usage information for {dataframe-transforms}. - -==== Request +[discrete] +[[get-data-frame-transform-stats-request]] +==== {api-request-title} `GET _data_frame/transforms//_stats` @@ -26,39 +27,79 @@ Retrieves usage information for {dataframe-transforms}. `GET _data_frame/transforms/*/_stats` + -//===== Description +[discrete] +[[get-data-frame-transform-stats-prereqs]] +==== {api-prereq-title} -==== Path Parameters +* If the {es} {security-features} are enabled, you must have +`monitor_data_frame_transforms` cluster privileges to use this API. The built-in +`data_frame_transforms_user` role has these privileges. For more information, +see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. -`data_frame_transform_id`:: +[discrete] +[[get-data-frame-transform-stats-desc]] +==== {api-description-title} + +You can get statistics for multiple {dataframe-transforms} in a single API +request by using a comma-separated list of identifiers or a wildcard expression. +You can get statistics for all {dataframe-transforms} by using `_all`, by +specifying `*` as the ``, or by omitting the +``. + +[discrete] +[[get-data-frame-transform-stats-path-parms]] +==== {api-path-parms-title} + +`` (Optional):: (string) Identifier for the {dataframe-transform}. It can be a {dataframe-transform} identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all {dataframe-transforms}. - -`from`:: - (integer) Skips the specified number of {dataframe-transforms}. The - default value is `0`. - -`size`:: - (integer) Specifies the maximum number of {dataframe-transforms} to obtain. The default value is `100`. - -==== Results - -The API returns the following information: + +[discrete] +[[get-data-frame-transform-stats-query-parms]] +==== {api-query-parms-title} + +`allow_no_match` (Optional):: + (boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `transforms` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- + +`from` (Optional):: + (integer) Skips the specified number of {dataframe-transforms}. The + default value is `0`. + +`size` (Optional):: + (integer) Specifies the maximum number of {dataframe-transforms} to obtain. + The default value is `100`. + +[discrete] +[[get-data-frame-transform-stats-response]] +==== {api-response-body-title} `transforms`:: (array) An array of statistics objects for {dataframe-transforms}, which are sorted by the `id` value in ascending order. + +[[get-data-frame-transform-stats-response-codes]] +==== {api-response-codes-title} -==== Authorization - -If the {es} {security-features} are enabled, you must have -`monitor_data_frame_transforms` cluster privileges to use this API. The built-in -`data_frame_transforms_user` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. +[discrete] +[[get-data-frame-transform-stats-example]] ==== Examples The following example skips for the first five {dataframe-transforms} and diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/data-frames/apis/get-transform.asciidoc index c46bd99138e6b..bf7901c191e33 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/data-frames/apis/get-transform.asciidoc @@ -12,8 +12,9 @@ beta[] Retrieves configuration information for {dataframe-transforms}. - -==== Request +[discrete] +[[get-data-frame-transform-request]] +==== {api-request-title} `GET _data_frame/transforms/` + @@ -25,40 +26,80 @@ Retrieves configuration information for {dataframe-transforms}. `GET _data_frame/transforms/*` -//===== Description +[discrete] +[[get-data-frame-transform-prereqs]] +==== {api-prereq-title} -==== Path Parameters +* If the {es} {security-features} are enabled, you must have +`monitor_data_frame_transforms` cluster privileges to use this API. The built-in +`data_frame_transforms_user` role has these privileges. For more information, +see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. -`data_frame_transform_id`:: +[discrete] +[[get-data-frame-transform-desc]] +==== {api-description-title} + +You can get information for multiple {dataframe-transforms} in a single API +request by using a comma-separated list of identifiers or a wildcard expression. +You can get information for all {dataframe-transforms} by using `_all`, by +specifying `*` as the ``, or by omitting the +``. + +[discrete] +[[get-data-frame-transform-path-parms]] +==== {api-path-parms-title} + +`` (Optional):: (string) Identifier for the {dataframe-transform}. It can be a {dataframe-transform} identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all {dataframe-transforms}. - -`from`:: - (integer) Skips the specified number of {dataframe-transforms}. The - default value is `0`. - -`size`:: - (integer) Specifies the maximum number of {dataframe-transforms} to obtain. The default value is `100`. - -==== Results - -The API returns the following information: + +[discrete] +[[get-data-frame-transform-query-parms]] +==== {api-query-parms-title} + +`allow_no_match` (Optional):: +(boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `transforms` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- + +`from` (Optional):: + (integer) Skips the specified number of {dataframe-transforms}. The + default value is `0`. + +`size` (Optional):: + (integer) Specifies the maximum number of {dataframe-transforms} to obtain. + The default value is `100`. + +[discrete] +[[get-data-frame-transform-response]] +==== {api-response-body-title} `transforms`:: (array) An array of transform resources, which are sorted by the `id` value in ascending order. + +[[get-data-frame-transform-response-codes]] +==== {api-response-codes-title} -==== Authorization - -If the {es} {security-features} are enabled, you must have -`monitor_data_frame_transforms` cluster privileges to use this API. The built-in -`data_frame_transforms_user` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. -==== Examples +[discrete] +[[get-data-frame-transform-example]] +==== {api-examples-title} The following example retrieves information about a maximum of ten transforms: diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/data-frames/apis/preview-transform.asciidoc index dd8fb6d74d379..a4338d0ef60b3 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/data-frames/apis/preview-transform.asciidoc @@ -12,24 +12,17 @@ beta[] Previews a {dataframe-transform}. - -==== Request +[discrete] +[[preview-data-frame-transform-request]] +==== {api-request-title} `POST _data_frame/transforms/_preview` -//==== Description -//==== Path Parameters - -==== Request Body - -`source`:: The source index or index pattern. +[discrete] +[[preview-data-frame-transform-prereq]] +==== {api-prereq-title} -`pivot`:: Defines the pivot function `group by` fields and the aggregation to -reduce the data. - -==== Authorization - -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. You must also have `read` and `view_index_metadata` privileges on the source index for the @@ -37,10 +30,19 @@ If the {es} {security-features} are enabled, you must have {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -==== Examples +[discrete] +[[preview-data-frame-transform-request-body]] +==== {api-request-body-title} + +`source` (Required):: + (object) The source index or index pattern. -The following example obtains a preview of a {dataframe-transform} on the {kib} -eCommerce sample data: +`pivot` (Required):: + (object) Defines the pivot function `group by` fields and the aggregation to + reduce the data. See <>. + +[discrete] +==== {api-examples-title} [source,js] -------------------------------------------------- @@ -88,7 +90,17 @@ The data that is returned for this example is as follows: "customer_id" : "12" } ... - ] + ], + "mappings": { + "properties": { + "max_price": { + "type": "double" + }, + "customer_id": { + "type": "keyword" + } + } + } } ---- // NOTCONSOLE diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 93ce6db6df3ab..6910cb85a25f5 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -12,12 +12,27 @@ beta[] Instantiates a {dataframe-transform}. - -==== Request +[discrete] +[[put-data-frame-transform-request]] +==== {api-request-title} `PUT _data_frame/transforms/` -===== Description +[discrete] +[[put-data-frame-transform-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have +`manage_data_frame_transforms` cluster privileges to use this API. The built-in +`data_frame_transforms_admin` role has these privileges. You must also +have `read` and `view_index_metadata` privileges on the source index and `read`, +`create_index`, and `index` privileges on the destination index. For more +information, see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[discrete] +[[put-data-frame-transform-desc]] +==== {api-description-title} IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. Do not put a {dataframe-transform} directly into any @@ -25,42 +40,37 @@ IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. If {es} {security-features} are enabled, do not give users any privileges on `.data-frame-internal*` indices. -==== Path Parameters +[discrete] +[[put-data-frame-transform-path-parms]] +==== {api-path-parms-title} -`data_frame_transform_id` (required):: +`` (Required):: (string) Identifier for the {dataframe-transform}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +[discrete] +[[put-data-frame-transform-request-body]] +==== {api-request-body-title} -==== Request Body - -`source` (required):: (object) The source configuration, consisting of `index` and optionally -a `query`. +`description` (Optional):: + (string) Free text description of the {dataframe-transform}. -`dest` (required):: (object) The destination configuration, consisting of `index` and optionally a -`pipeline` id. +`dest` (Required):: + (object) The destination configuration, which consists of `index` and + optionally a `pipeline` id. -`pivot`:: (object) Defines the pivot function `group by` fields and the aggregation to -reduce the data. See <>. - -`description`:: Optional free text description of the data frame transform - - -==== Authorization - -If the {es} {security-features} are enabled, you must have -`manage_data_frame_transforms` cluster privileges to use this API. The built-in -`data_frame_transforms_admin` role has these privileges. You must also -have `read` and `view_index_metadata` privileges on the source index and `read`, -`create_index`, and `index` privileges on the destination index. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +`pivot` (Optional):: + (object) Defines the pivot function `group by` fields and the aggregation to + reduce the data. See <>. -==== Examples +`source` (Required):: + (object) The source configuration, which consists of `index` and optionally + a `query`. -The following example creates a {dataframe-transform} for the {kib} eCommerce -sample data: +[discrete] +[[put-data-frame-transform-example]] +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/data-frames/apis/start-transform.asciidoc index 7baefb34ee313..b76bcb0dd4796 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/data-frames/apis/start-transform.asciidoc @@ -12,32 +12,35 @@ beta[] Starts one or more {dataframe-transforms}. -==== Request +[discrete] +[[start-data-frame-transform-request]] +==== {api-request-title} `POST _data_frame/transforms//_start` -//==== Description +[discrete] +[[start-data-frame-transform-prereqs]] +==== {api-prereq-title} -==== Path Parameters - -`data_frame_transform_id` (required):: - (string) Identifier for the {dataframe-transform}. This identifier can contain - lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It - must start and end with alphanumeric characters. - -//==== Request Body -==== Authorization - -If the {es} {security-features} are enabled, you must have +* If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. You must also have `view_index_metadata` privileges on the source index for the {dataframe-transform}. For more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -==== Examples +[discrete] +[[start-data-frame-transform-path-parms]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the {dataframe-transform}. This identifier can contain + lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + must start and end with alphanumeric characters. -The following example starts the `ecommerce_transform` {dataframe-transform}: +[discrete] +[[start-data-frame-transform-example]] +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/data-frames/apis/stop-transform.asciidoc b/docs/reference/data-frames/apis/stop-transform.asciidoc index 58bff626f7da6..80c2654babe0d 100644 --- a/docs/reference/data-frames/apis/stop-transform.asciidoc +++ b/docs/reference/data-frames/apis/stop-transform.asciidoc @@ -12,7 +12,9 @@ beta[] Stops one or more {dataframe-transforms}. -==== Request +[discrete] +[[stop-data-frame-transform-request]] +==== {api-request-title} `POST _data_frame/transforms//_stop` + @@ -20,44 +22,81 @@ Stops one or more {dataframe-transforms}. `POST _data_frame/transforms/_all/_stop` +[discrete] +[[stop-data-frame-transform-prereq]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have +`manage_data_frame_transforms` cluster privileges to use this API. The built-in +`data_frame_transforms_admin` role has these privileges. For more information, +see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[discrete] +[[stop-data-frame-transform-desc]] +==== {api-description-title} -==== Description You can stop multiple {dataframe-transforms} in a single API request by using a comma-separated list of {dataframe-transforms} or a wildcard expression. -All {dataframe-transforms} can be stopped by using `_all` or `*` as the ``. +All {dataframe-transforms} can be stopped by using `_all` or `*` as the +``. -==== Path Parameters +[discrete] +[[stop-data-frame-transform-path-parms]] +==== {api-path-parms-title} -`data_frame_transform_id` (required):: +`` (Required):: (string) Identifier for the {dataframe-transform}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -==== Query Parameters - -`wait_for_completion`:: - (boolean) If set to true, causes the API to block until the indexer state completely stops. If set to false, the API returns immediately and the indexer will be stopped asynchronously in the background. Defaults to `false`. - - `timeout`:: - (time value) If `wait_for_completion=true`, the API blocks for (at maximum) - the specified duration while waiting for the transform to stop. If more than - `timeout` time has passed, the API throws a timeout exception. Even if a - timeout exception is thrown, the stop request is still processing and - eventually moves the transform to `STOPPED`. The timeout simply means the API - call itself timed out while waiting for the status change. Defaults to `30s` - -//==== Request Body -==== Authorization - -If the {es} {security-features} are enabled, you must have -`manage_data_frame_transforms` cluster privileges to use this API. The built-in -`data_frame_transforms_admin` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. - -==== Examples - -The following example stops the `ecommerce_transform` {dataframe-transform}: +[discrete] +[[stop-data-frame-transform-query-parms]] +==== {api-query-parms-title} + +`allow_no_match` (Optional):: +(boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns a successful acknowledgement message +when there are no matches. When there are only partial matches, the API stops +the appropriate {dataframe-transforms}. For example, if the request contains +`test-id1*,test-id2*` as the identifiers and there are no {dataframe-transforms} +that match `test-id2*`, the API nonetheless stops the {dataframe-transforms} +that match `test-id1*`. + +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- + +`timeout` (Optional):: + (time value) If `wait_for_completion=true`, the API blocks for (at maximum) + the specified duration while waiting for the transform to stop. If more than + `timeout` time has passed, the API throws a timeout exception. Even if a + timeout exception is thrown, the stop request is still processing and + eventually moves the transform to `STOPPED`. The timeout simply means the API + call itself timed out while waiting for the status change. Defaults to `30s` + +`wait_for_completion` (Optional):: + (boolean) If set to `true`, causes the API to block until the indexer state + completely stops. If set to `false`, the API returns immediately and the + indexer will be stopped asynchronously in the background. Defaults to `false`. + +[discrete] +[[stop-data-frame-transform-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. + +[discrete] +[[stop-data-frame-transform-example]] +==== {api-examples-title} [source,js] -------------------------------------------------- diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index b83858cecfd8d..28389fb05ba94 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -5,12 +5,12 @@ [float] === Introduction -Each index in Elasticsearch is <> +Each index in Elasticsearch is <> and each shard can have multiple copies. These copies are known as a _replication group_ and must be kept in sync when documents are added or removed. If we fail to do so, reading from one copy will result in very different results than reading from another. The process of keeping the shard copies in sync and serving reads from them is what we call the _data replication model_. -Elasticsearch’s data replication model is based on the _primary-backup model_ and is described very well in the +Elasticsearch’s data replication model is based on the _primary-backup model_ and is described very well in the https://www.microsoft.com/en-us/research/publication/pacifica-replication-in-log-based-distributed-storage-systems/[PacificA paper] of Microsoft Research. That model is based on having a single copy from the replication group that acts as the primary shard. The other copies are called _replica shards_. The primary serves as the main entry point for all indexing operations. It is in charge of @@ -23,7 +23,7 @@ it has for various interactions between write and read operations. [float] === Basic write model -Every indexing operation in Elasticsearch is first resolved to a replication group using <>, +Every indexing operation in Elasticsearch is first resolved to a replication group using <>, typically based on the document ID. Once the replication group has been determined, the operation is forwarded internally to the current _primary shard_ of the group. The primary shard is responsible for validating the operation and forwarding it to the other replicas. Since replicas can be offline, the primary @@ -50,7 +50,7 @@ configuration mistake could cause an operation to fail on a replica despite it b are infrequent but the primary has to respond to them. In the case that the primary itself fails, the node hosting the primary will send a message to the master about it. The indexing -operation will wait (up to 1 minute, by <>) for the master to promote one of the replicas to be a +operation will wait (up to 1 minute, by <>) for the master to promote one of the replicas to be a new primary. The operation will then be forwarded to the new primary for processing. Note that the master also monitors the health of the nodes and may decide to proactively demote a primary. This typically happens when the node holding the primary is isolated from the cluster by a networking issue. See <> for more details. @@ -60,8 +60,8 @@ when executing it on the replica shards. This may be caused by an actual failure issue preventing the operation from reaching the replica (or preventing the replica from responding). All of these share the same end result: a replica which is part of the in-sync replica set misses an operation that is about to be acknowledged. In order to avoid violating the invariant, the primary sends a message to the master requesting -that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged -by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start +that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged +by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start building a new shard copy in order to restore the system to a healthy state. [[demoted-primary]] @@ -72,13 +72,13 @@ will be rejected by the replicas. When the primary receives a response from the it is no longer the primary then it will reach out to the master and will learn that it has been replaced. The operation is then routed to the new primary. -.What happens if there are no replicas? +.What happens if there are no replicas? ************ This is a valid scenario that can happen due to index configuration or simply because all the replicas have failed. In that case the primary is processing operations without any external validation, which may seem problematic. On the other hand, the primary cannot fail other shards on its own but request the master to do -so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed -that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed +so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed +that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed into the primary will not be lost. Of course, since at that point we are running with only single copy of the data, physical hardware issues can cause data loss. See <> for some mitigation options. ************ @@ -91,7 +91,7 @@ take non-trivial CPU power. One of the beauties of the primary-backup model is t (with the exception of in-flight operations). As such, a single in-sync copy is sufficient to serve read requests. When a read request is received by a node, that node is responsible for forwarding it to the nodes that hold the relevant shards, -collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow +collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow is as follows: . Resolve the read requests to the relevant shards. Note that since most searches will be sent to one or more indices, @@ -153,8 +153,8 @@ Dirty reads:: An isolated primary can expose writes that will not be acknowledge [float] === The Tip of the Iceberg -This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more -going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in +This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more +going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in keeping this system behaving correctly. This document also doesn't cover known and important bugs (both closed and open). We recognize that https://github.com/elastic/elasticsearch/issues?q=label%3Aresiliency[GitHub is hard to keep up with]. To help people stay on top of those, we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page] diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 1f3d8f879dc7c..77b380dd1dfed 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -1,107 +1,33 @@ [[getting-started]] -= Getting started += Getting started with {es} [partintro] -- -TIP: The fastest way to get started with {es} is to https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day trial of Elasticsearch Service] in the cloud. +Ready to take {es} for a test drive and see for yourself how you can use the +REST APIs to store, search, and analyze data? -Elasticsearch is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time. It is generally used as the underlying engine/technology that powers applications that have complex search features and requirements. +Follow this getting started tutorial to: -Here are a few sample use-cases that Elasticsearch could be used for: +. Get an {es} instance up and running +. Index some sample documents +. Search for documents using the {es} query language +. Analyze the results using bucket and metrics aggregations -* You run an online web store where you allow your customers to search for products that you sell. In this case, you can use Elasticsearch to store your entire product catalog and inventory and provide search and autocomplete suggestions for them. -* You want to collect log or transaction data and you want to analyze and mine this data to look for trends, statistics, summarizations, or anomalies. In this case, you can use Logstash (part of the Elasticsearch/Logstash/Kibana stack) to collect, aggregate, and parse your data, and then have Logstash feed this data into Elasticsearch. Once the data is in Elasticsearch, you can run searches and aggregations to mine any information that is of interest to you. -* You run a price alerting platform which allows price-savvy customers to specify a rule like "I am interested in buying a specific electronic gadget and I want to be notified if the price of gadget falls below $X from any vendor within the next month". In this case you can scrape vendor prices, push them into Elasticsearch and use its reverse-search (Percolator) capability to match price movements against customer queries and eventually push the alerts out to the customer once matches are found. -* You have analytics/business-intelligence needs and want to quickly investigate, analyze, visualize, and ask ad-hoc questions on a lot of data (think millions or billions of records). In this case, you can use Elasticsearch to store your data and then use Kibana (part of the Elasticsearch/Logstash/Kibana stack) to build custom dashboards that can visualize aspects of your data that are important to you. Additionally, you can use the Elasticsearch aggregations functionality to perform complex business intelligence queries against your data. -For the rest of this tutorial, you will be guided through the process of getting Elasticsearch up and running, taking a peek inside it, and performing basic operations like indexing, searching, and modifying your data. At the end of this tutorial, you should have a good idea of what Elasticsearch is, how it works, and hopefully be inspired to see how you can use it to either build sophisticated search applications or to mine intelligence from your data. --- - -[[getting-started-concepts]] -== Basic Concepts - -There are a few concepts that are core to Elasticsearch. Understanding these concepts from the outset will tremendously help ease the learning process. - -[float] -=== Near Realtime (NRT) - -Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. - -[float] -=== Cluster - -A cluster is a collection of one or more nodes (servers) that together holds your entire data and provides federated indexing and search capabilities across all nodes. A cluster is identified by a unique name which by default is "elasticsearch". This name is important because a node can only be part of a cluster if the node is set up to join the cluster by its name. - -Make sure that you don't reuse the same cluster names in different -environments, otherwise you might end up with nodes joining the wrong cluster. -For instance you could use `logging-dev`, `logging-stage`, and `logging-prod` -for the development, staging, and production clusters. - -Note that it is valid and perfectly fine to have a cluster with only a single node in it. Furthermore, you may also have multiple independent clusters each with its own unique cluster name. - -[float] -=== Node - -A node is a single server that is part of your cluster, stores your data, and participates in the cluster's indexing and search -capabilities. Just like a cluster, a node is identified by a name which by default is a random Universally Unique IDentifier (UUID) that is assigned to the node at startup. You can define any node name you want if you do not want the default. This name is important for administration purposes where you want to identify which servers in your network correspond to which nodes in your Elasticsearch cluster. - -A node can be configured to join a specific cluster by the cluster name. By default, each node is set up to join a cluster named `elasticsearch` which means that if you start up a number of nodes on your network and--assuming they can discover each other--they will all automatically form and join a single cluster named `elasticsearch`. - -In a single cluster, you can have as many nodes as you want. Furthermore, if there are no other Elasticsearch nodes currently running on your network, starting a single node will by default form a new single-node cluster named `elasticsearch`. - -[float] -=== Index - -An index is a collection of documents that have somewhat similar characteristics. For example, you can have an index for customer data, another index for a product catalog, and yet another index for order data. An index is identified by a name (that must be all lowercase) and this name is used to refer to the index when performing indexing, search, update, and delete operations against the documents in it. - -In a single cluster, you can define as many indexes as you want. - -[float] -=== Type +Need more context? -deprecated[6.0.0,See <>] +Check out the <> to learn the lingo and understand the basics of +how {es} works. If you're already familiar with {es} and want to see how it works +with the rest of the stack, you might want to jump to the +{stack-gs}/get-started-elastic-stack.html[Elastic Stack +Tutorial] to see how to set up a system monitoring solution with {es}, {kib}, +{beats}, and {ls}. -A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. - -[float] -=== Document - -A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is a ubiquitous internet data interchange format. Within an index, you can store as many documents as you want. - -[[getting-started-shards-and-replicas]] -[float] -=== Shards & Replicas - -An index can potentially store a large amount of data that can exceed the hardware limits of a single node. For example, a single index of a billion documents taking up 1TB of disk space may not fit on the disk of a single node or may be too slow to serve search requests from a single node alone. - -To solve this problem, Elasticsearch provides the ability to subdivide your index into multiple pieces called shards. When you create an index, you can simply define the number of shards that you want. Each shard is in itself a fully-functional and independent "index" that can be hosted on any node in the cluster. - -Sharding is important for two primary reasons: - -* It allows you to horizontally split/scale your content volume -* It allows you to distribute and parallelize operations across shards (potentially on multiple nodes) thus increasing performance/throughput - - -The mechanics of how a shard is distributed and also how its documents are aggregated back into search requests are completely managed by Elasticsearch and is transparent to you as the user. - -In a network/cloud environment where failures can be expected anytime, it is very useful and highly recommended to have a failover mechanism in case a shard/node somehow goes offline or disappears for whatever reason. To this end, Elasticsearch allows you to make one or more copies of your index's shards into what are called replica shards, or replicas for short. - -Replication is important for two primary reasons: - -* It provides high availability in case a shard/node fails. For this reason, it is important to note that a replica shard is never allocated on the same node as the original/primary shard that it was copied from. -* It allows you to scale out your search volume/throughput since searches can be executed on all replicas in parallel. - - -To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). - -The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may also change the number of replicas dynamically anytime. You can change the number of shards for an existing index using the {ref}/indices-shrink-index.html[`_shrink`] and {ref}/indices-split-index.html[`_split`] APIs, however this is not a trivial task and pre-planning for the correct number of shards is the optimal approach. - -By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. - -NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. -You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API. - -With that out of the way, let's get started with the fun part... +TIP: The fastest way to get started with {es} is to +https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day +trial of Elasticsearch Service] in the cloud. +-- [[getting-started-install]] == Installation diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 00578ce8c050f..4949c43e6ce65 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -120,9 +120,9 @@ configuring allocation rules is optional. When configuring allocation rules, setting number of replicas is optional. Although this action can be treated as two separate index settings updates, both can be configured at once. -Read more about index replicas <>. -Read more about shard allocation filtering in -the <>. +For more information about how {es} uses replicas for scaling, see +<>. See <> for more information about +controlling where Elasticsearch allocates shards of a particular index. [[ilm-allocate-options]] .Allocate Options diff --git a/docs/reference/images/rare_terms/accuracy_0001.png b/docs/reference/images/rare_terms/accuracy_0001.png new file mode 100644 index 0000000000000..0c13a3938cde2 Binary files /dev/null and b/docs/reference/images/rare_terms/accuracy_0001.png differ diff --git a/docs/reference/images/rare_terms/accuracy_001.png b/docs/reference/images/rare_terms/accuracy_001.png new file mode 100644 index 0000000000000..2aa1be316c382 Binary files /dev/null and b/docs/reference/images/rare_terms/accuracy_001.png differ diff --git a/docs/reference/images/rare_terms/accuracy_01.png b/docs/reference/images/rare_terms/accuracy_01.png new file mode 100644 index 0000000000000..7182b7d3c537e Binary files /dev/null and b/docs/reference/images/rare_terms/accuracy_01.png differ diff --git a/docs/reference/images/rare_terms/memory.png b/docs/reference/images/rare_terms/memory.png new file mode 100644 index 0000000000000..e0de5c2163913 Binary files /dev/null and b/docs/reference/images/rare_terms/memory.png differ diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 7bb29cb06730c..f9d4b5a4e0528 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -65,8 +65,6 @@ include::frozen-indices.asciidoc[] include::administering.asciidoc[] -include::rest-api/index.asciidoc[] - include::commands/index.asciidoc[] :edit_url: @@ -76,6 +74,8 @@ include::testing.asciidoc[] include::glossary.asciidoc[] +include::rest-api/index.asciidoc[] + include::release-notes/highlights.asciidoc[] include::migration/index.asciidoc[] diff --git a/docs/reference/indices/apis/reload-analyzers.asciidoc b/docs/reference/indices/apis/reload-analyzers.asciidoc new file mode 100644 index 0000000000000..657f6556df4b2 --- /dev/null +++ b/docs/reference/indices/apis/reload-analyzers.asciidoc @@ -0,0 +1,105 @@ +[role="xpack"] +[testenv="basic"] +[[indices-reload-analyzers]] +== Reload Search Analyzers + +experimental[] + +Reloads search analyzers and its resources. + +Synonym filters (both `synonym` and `synonym_graph`) can be declared as +updateable if they are only used in <> +with the `updateable` flag: + +[source,js] +-------------------------------------------------- +PUT /my_index +{ + "settings": { + "index" : { + "analysis" : { + "analyzer" : { + "my_synonyms" : { + "tokenizer" : "whitespace", + "filter" : ["synonym"] + } + }, + "filter" : { + "synonym" : { + "type" : "synonym", + "synonyms_path" : "analysis/synonym.txt", + "updateable" : true <1> + } + } + } + } + }, + "mappings": { + "properties": { + "text": { + "type": "text", + "analyzer" : "standard", + "search_analyzer": "my_synonyms" <2> + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> Mark the synonym filter as updateable. +<2> Synonym analyzer is usable as a search_analyzer. + +NOTE: Trying to use the above analyzer as an index analyzer will result in an error. + +Using the <>, you can trigger reloading of the +synonym definition. The contents of the configured synonyms file will be reloaded and the +synonyms definition the filter uses will be updated. + +The `_reload_search_analyzers` API can be run on one or more indices and will trigger +reloading of the synonyms from the configured file. + +NOTE: Reloading will happen on every node the index has shards, so its important +to update the synonym file contents on every data node (even the ones that don't currently +hold shard copies; shards might be relocated there in the future) before calling +reload to ensure the new state of the file is reflected everywhere in the cluster. + +[source,js] +-------------------------------------------------- +POST /my_index/_reload_search_analyzers +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The reload request returns information about the nodes it was executed on and the +analyzers that were reloaded: + +[source,js] +-------------------------------------------------- +{ + "_shards" : { + "total" : 2, + "successful" : 2, + "failed" : 0 + }, + "reload_details" : [ + { + "index" : "my_index", + "reloaded_analyzers" : [ + "my_synonyms" + ], + "reloaded_node_ids" : [ + "mfdqTXn_T7SGr2Ho2KT8uw" + ] + } + ] +} +-------------------------------------------------- +// TEST[continued] +// TESTRESPONSE[s/"total" : 2/"total" : $body._shards.total/] +// TESTRESPONSE[s/"successful" : 2/"successful" : $body._shards.successful/] +// TESTRESPONSE[s/mfdqTXn_T7SGr2Ho2KT8uw/$body.reload_details.0.reloaded_node_ids.0/] + +NOTE: Reloading does not happen on each shard of an index, but once on each node +the index has shards on. The total shard count can therefore differ from the number +of index shards. diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index 6d0866d303b88..4ba434ecbbb6e 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -2,22 +2,68 @@ == Open / Close Index API The open and close index APIs allow to close an index, and later on -opening it. A closed index has almost no overhead on the cluster (except -for maintaining its metadata), and is blocked for read/write operations. -A closed index can be opened which will then go through the normal -recovery process. +opening it. -The REST endpoint is `/{index}/_close` and `/{index}/_open`. For -example: +A closed index is blocked for read/write operations and does not allow +all operations that opened indices allow. It is not possible to index +documents or to search for documents in a closed index. This allows +closed indices to not have to maintain internal data structures for +indexing or searching documents, resulting in a smaller overhead on +the cluster. + +When opening or closing an index, the master is responsible for +restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. The +data of opened/closed indices is automatically replicated by the +cluster to ensure that enough shard copies are safely kept around +at all times. + +The REST endpoint is `/{index}/_close` and `/{index}/_open`. + +The following example shows how to close an index: [source,js] -------------------------------------------------- POST /my_index/_close +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] + +This will return the following response: + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true, + "shards_acknowledged" : true, + "indices" : { + "my_index" : { + "closed" : true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +A closed index can be reopened like this: +[source,js] +-------------------------------------------------- POST /my_index/_open -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_index\n/] +// TEST[s/^/PUT my_index\nPOST my_index\/_close\n/] + +which will yield the following response: + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true, + "shards_acknowledged" : true +} +-------------------------------------------------- +// TESTRESPONSE It is possible to open and close multiple indices. An error will be thrown if the request explicitly refers to a missing index. This behaviour can be @@ -36,6 +82,6 @@ API by setting `cluster.indices.close.enable` to `false`. The default is `true`. [float] === Wait For Active Shards -Because opening an index allocates its shards, the +Because opening or closing an index allocates its shards, the <> setting on -index creation applies to the index opening action as well. +index creation applies to the `_open` and `_close` index actions as well. diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index 6fa2e8c796df6..a945a56c1942b 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -23,7 +23,30 @@ another processor that renames a field. The <> then the configured pipelines. To use a pipeline, simply specify the `pipeline` parameter on an index or bulk request. This -way, the ingest node knows which pipeline to use. For example: +way, the ingest node knows which pipeline to use. + +For example: +Create a pipeline + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/my_pipeline_id +{ + "description" : "describe pipeline", + "processors" : [ + { + "set" : { + "field": "foo", + "value": "new" + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST + +Index with defined pipeline [source,js] -------------------------------------------------- @@ -33,7 +56,27 @@ PUT my-index/_doc/my-id?pipeline=my_pipeline_id } -------------------------------------------------- // CONSOLE -// TEST[catch:bad_request] +// TEST[continued] + +Response: +[source,js] +-------------------------------------------------- +{ + "_index" : "my-index", + "_type" : "_doc", + "_id" : "my-id", + "_version" : 1, + "result" : "created", + "_shards" : { + "total" : 2, + "successful" : 2, + "failed" : 0 + }, + "_seq_no" : 0, + "_primary_term" : 1 +} +-------------------------------------------------- +// TESTRESPONSE[s/"successful" : 2/"successful" : 1/] An index may also declare a <> that will be used in the absence of the `pipeline` parameter. diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc index 7193c6aa9f6e3..edbfb1f77d71e 100644 --- a/docs/reference/mapping/params/enabled.asciidoc +++ b/docs/reference/mapping/params/enabled.asciidoc @@ -89,8 +89,8 @@ GET my_index/_mapping <3> <2> The document can be retrieved. <3> Checking the mapping reveals that no fields have been added. -TIP: The `enabled` setting can be updated on existing fields -using the <>. +The `enabled` setting for existing fields and the top-level mapping +definition cannot be updated. Note that because Elasticsearch completely skips parsing the field contents, it is possible to add non-object data to a disabled field: diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index daf5c92bcf34d..33c0eaf339f24 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -30,7 +30,7 @@ PUT my_index/_doc/2 <3> "message": "Syntax error with some long stacktrace" } -GET _search <4> +GET my_index/_search <4> { "aggs": { "messages": { diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 47952d61c4954..4aa178d443f55 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -258,7 +258,7 @@ Elasticsearch 6.x:: * The `_default_` mapping type is deprecated. -* In 6.7, the index creation, index template, and mapping APIs support a query +* In 6.8, the index creation, index template, and mapping APIs support a query string parameter (`include_type_name`) which indicates whether requests and responses should include a type name. It defaults to `true`, and should be set to an explicit value to prepare to upgrade to 7.0. Not setting `include_type_name` @@ -442,12 +442,12 @@ documents to it using typeless `index` calls, and load documents with typeless Index creation, index template, and mapping APIs support a new `include_type_name` URL parameter that specifies whether mapping definitions in requests and responses -should contain the type name. The parameter defaults to `true` in version 6.7 to +should contain the type name. The parameter defaults to `true` in version 6.8 to match the pre-7.0 behavior of using type names in mappings. It defaults to `false` in version 7.0 and will be removed in version 8.0. -It should be set explicitly in 6.7 to prepare to upgrade to 7.0. To avoid deprecation -warnings in 6.7, the parameter can be set to either `true` or `false`. In 7.0, setting +It should be set explicitly in 6.8 to prepare to upgrade to 7.0. To avoid deprecation +warnings in 6.8, the parameter can be set to either `true` or `false`. In 7.0, setting `include_type_name` at all will result in a deprecation warning. See some examples of interactions with Elasticsearch with this option set to `false`: @@ -717,12 +717,12 @@ indices. [float] ==== Mixed-version clusters -In a cluster composed of both 6.7 and 7.0 nodes, the parameter +In a cluster composed of both 6.8 and 7.0 nodes, the parameter `include_type_name` should be specified in indices APIs like index creation. This is because the parameter has a different default between -6.7 and 7.0, so the same mapping definition will not be valid for both +6.8 and 7.0, so the same mapping definition will not be valid for both node versions. Typeless document APIs such as `bulk` and `update` are only available as of -7.0, and will not work with 6.7 nodes. This also holds true for the typeless +7.0, and will not work with 6.8 nodes. This also holds true for the typeless versions of queries that perform document lookups, such as `terms`. diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index c0db156dc3a1c..bdfcf1128a061 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -42,8 +42,6 @@ string:: <> and <> <>:: Defines parent/child relation for documents within the same index -<>:: Defines an alias to an existing field. - <>:: Record numeric feature to boost hits at query time. <>:: Record numeric features to boost hits at query time. @@ -54,6 +52,11 @@ string:: <> and <> <>:: A text-like field optimized for queries to implement as-you-type completion +<>:: Defines an alias to an existing field. + +<>:: Allows an entire JSON object to be indexed as a single field. + + [float] === Multi-fields @@ -82,6 +85,8 @@ include::types/date.asciidoc[] include::types/date_nanos.asciidoc[] +include::types/flattened.asciidoc[] + include::types/geo-point.asciidoc[] include::types/geo-shape.asciidoc[] diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index f79bdde9cc4dd..7ea04b42330c2 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -7,9 +7,7 @@ experimental[] A `dense_vector` field stores dense vectors of float values. The maximum number of dimensions that can be in a vector should -not exceed 1024. The number of dimensions can be -different across documents. A `dense_vector` field is -a single-valued field. +not exceed 1024. A `dense_vector` field is a single-valued field. These vectors can be used for <>. For example, a document score can represent a distance between @@ -24,7 +22,8 @@ PUT my_index "mappings": { "properties": { "my_vector": { - "type": "dense_vector" + "type": "dense_vector", + "dims": 3 <1> }, "my_text" : { "type" : "keyword" @@ -42,13 +41,14 @@ PUT my_index/_doc/1 PUT my_index/_doc/2 { "my_text" : "text2", - "my_vector" : [-0.5, 10, 10, 4] + "my_vector" : [-0.5, 10, 10] } -------------------------------------------------- // CONSOLE +<1> dims—the number of dimensions in the vector, required parameter. + Internally, each document's dense vector is encoded as a binary doc value. Its size in bytes is equal to -`4 * NUMBER_OF_DIMENSIONS`, where `NUMBER_OF_DIMENSIONS` - -number of the vector's dimensions. \ No newline at end of file +`4 * dims`, where `dims`—the number of the vector's dimensions. \ No newline at end of file diff --git a/docs/reference/mapping/types/flattened.asciidoc b/docs/reference/mapping/types/flattened.asciidoc new file mode 100644 index 0000000000000..80fd72c3dcc1f --- /dev/null +++ b/docs/reference/mapping/types/flattened.asciidoc @@ -0,0 +1,188 @@ +[role="xpack"] +[testenv="basic"] + +[[flattened]] +=== Flattened datatype + +By default, each subfield in an object is mapped and indexed separately. If +the names or types of the subfields are not known in advance, then they are +<>. + +The `flattened` type provides an alternative approach, where the entire +object is mapped as a single field. Given an object, the `flattened` +mapping will parse out its leaf values and index them into one field as +keywords. The object's contents can then be searched through simple queries +and aggregations. + +This data type can be useful for indexing objects with a large or unknown +number of unique keys. Only one field mapping is created for the whole JSON +object, which can help prevent a <> +from having too many distinct field mappings. + +On the other hand, flattened object fields present a trade-off in terms of +search functionality. Only basic queries are allowed, with no support for +numeric range queries or highlighting. Further information on the limitations +can be found in the <> section. + +NOTE: The `flattened` mapping type should **not** be used for indexing all +document content, as it treats all values as keywords and does not provide full +search functionality. The default approach, where each subfield has its own +entry in the mappings, works well in the majority of cases. + +An flattened object field can be created as follows: +[source,js] +-------------------------------- +PUT bug_reports +{ + "mappings": { + "properties": { + "title": { + "type": "text" + }, + "labels": { + "type": "flattened" + } + } + } +} + +POST bug_reports/_doc/1 +{ + "title": "Results are not sorted correctly.", + "labels": { + "priority": "urgent", + "release": ["v1.2.5", "v1.3.0"], + "timestamp": { + "created": 1541458026, + "closed": 1541457010 + } + } +} +-------------------------------- +// CONSOLE +// TESTSETUP + +During indexing, tokens are created for each leaf value in the JSON object. The +values are indexed as string keywords, without analysis or special handling for +numbers or dates. + +Querying the top-level `flattened` field searches all leaf values in the +object: + +[source,js] +-------------------------------- +POST bug_reports/_search +{ + "query": { + "term": {"labels": "urgent"} + } +} +-------------------------------- +// CONSOLE + +To query on a specific key in the flattened object, object dot notation is used: +[source,js] +-------------------------------- +POST bug_reports/_search +{ + "query": { + "term": {"labels.release": "v1.3.0"} + } +} +-------------------------------- +// CONSOLE + +[[supported-operations]] +==== Supported operations + +Because of the similarities in the way values are indexed, `flattened` +fields share much of the same mapping and search functionality as +<> fields. + +Currently, flattened object fields can be used with the following query types: + +- `term`, `terms`, and `terms_set` +- `prefix` +- `range` +- `match` and `multi_match` +- `query_string` and `simple_query_string` +- `exists` + +When querying, it is not possible to refer to field keys using wildcards, as in +`{ "term": {"labels.time*": 1541457010}}`. Note that all queries, including +`range`, treat the values as string keywords. Highlighting is not supported on +`flattened` fields. + +It is possible to sort on an flattened object field, as well as perform simple +keyword-style aggregations such as `terms`. As with queries, there is no +special support for numerics -- all values in the JSON object are treated as +keywords. When sorting, this implies that values are compared +lexicographically. + +Flattened object fields currently cannot be stored. It is not possible to +specify the <> parameter in the mapping. + +[[flattened-params]] +==== Parameters for flattened object fields + +The following mapping parameters are accepted: + +[horizontal] + +<>:: + + Mapping field-level query time boosting. Accepts a floating point number, + defaults to `1.0`. + +`depth_limit`:: + + The maximum allowed depth of the flattened object field, in terms of nested + inner objects. If a flattened object field exceeds this limit, then an + error will be thrown. Defaults to `20`. + +<>:: + + Should the field be stored on disk in a column-stride fashion, so that it + can later be used for sorting, aggregations, or scripting? Accepts `true` + (default) or `false`. + +<>:: + + Should global ordinals be loaded eagerly on refresh? Accepts `true` or + `false` (default). Enabling this is a good idea on fields that are + frequently used for terms aggregations. + +<>:: + + Leaf values longer than this limit will not be indexed. By default, there + is no limit and all values will be indexed. Note that this limit applies + to the leaf values within the flattened object field, and not the length of + the entire field. + +<>:: + + Determines if the field should be searchable. Accepts `true` (default) or + `false`. + +<>:: + + What information should be stored in the index for scoring purposes. + Defaults to `docs` but can also be set to `freqs` to take term frequency + into account when computing scores. + +<>:: + + A string value which is substituted for any explicit `null` values within + the flattened object field. Defaults to `null`, which means null sields are + treated as if it were missing. + +<>:: + + Which scoring algorithm or _similarity_ should be used. Defaults + to `BM25`. + +`split_queries_on_whitespace`:: + + Whether <> should split the input on + whitespace when building a query for this field. Accepts `true` or `false` + (default). diff --git a/docs/reference/migration/migrate_8_0/reindex.asciidoc b/docs/reference/migration/migrate_8_0/reindex.asciidoc index ef4f5aed147ca..912f0f9dbf11b 100644 --- a/docs/reference/migration/migrate_8_0/reindex.asciidoc +++ b/docs/reference/migration/migrate_8_0/reindex.asciidoc @@ -12,4 +12,18 @@ Instead, please specify the index-name without any encoding. [float] ==== Removal of types -The `/{index}/{type}/_delete_by_query` and `/{index}/{type}/_update_by_query` REST endpoints have been removed in favour of `/{index}/_delete_by_query` and `/{index}/_update_by_query`, since indexes no longer contain types, these typed endpoints are obsolete. \ No newline at end of file +The `/{index}/{type}/_delete_by_query` and `/{index}/{type}/_update_by_query` REST endpoints have been removed in favour of `/{index}/_delete_by_query` and `/{index}/_update_by_query`, since indexes no longer contain types, these typed endpoints are obsolete. + +[float] +==== Removal of size parameter + +Previously, a `_reindex` request had two different size specifications in the body: + +- Outer level, determining the maximum number of documents to process +- Inside the `source` element, determining the scroll/batch size. + +The outer level `size` parameter has now been renamed to `max_docs` to +avoid confusion and clarify its semantics. + +Similarly, the `size` parameter has been renamed to `max_docs` for +`_delete_by_query` and `_update_by_query` to keep the 3 interfaces consistent. \ No newline at end of file diff --git a/docs/reference/migration/migrate_8_0/search.asciidoc b/docs/reference/migration/migrate_8_0/search.asciidoc index 6fba2970f593e..97796a10fca22 100644 --- a/docs/reference/migration/migrate_8_0/search.asciidoc +++ b/docs/reference/migration/migrate_8_0/search.asciidoc @@ -20,3 +20,8 @@ The same functionality can be achieved by the `match` query if the total number The `cutoff_frequency` parameter was deprecated in 7.x and has been removed in 8.0 from `match` and `multi_match` queries. The same functionality can be achieved without any configuration provided that the total number of hits is not tracked. + +[float] +===== Removal of sort parameters + +The `nested_filter` and `nested_path` options, deprecated in 6.x, have been removed in favor of the `nested` context. diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc index a7cacef8ff017..f3ee4fd97ef85 100644 --- a/docs/reference/migration/migrate_8_0/security.asciidoc +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -33,3 +33,11 @@ The `elasticsearch-migrate` tool provided a way to convert file realm users and roles into the native realm. It has been deprecated since 7.2.0. Users and roles should now be created in the native realm directly. + +[float] +[[separating-node-and-client-traffic]] +==== The `transport.profiles.*.xpack.security.type` setting has been removed + +The `transport.profiles.*.xpack.security.type` setting has been removed since +the Transport Client has been removed and therefore all client traffic now uses +the HTTP transport. Transport profiles using this setting should be removed. diff --git a/docs/reference/ml/apis/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc index c5f9b5fc2444a..2a38648e48fe5 100644 --- a/docs/reference/ml/apis/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -12,8 +12,8 @@ A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. - -==== Request +[[ml-close-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_close` + @@ -21,8 +21,15 @@ operations, but you can still explore and navigate results. `POST _ml/anomaly_detectors/_all/_close` + +[[ml-close-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-close-job-desc]] +==== {api-description-title} You can close multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs @@ -47,32 +54,26 @@ after the close job API returns. The `force` query parameter should only be use situations where the job has already failed, or where you are not interested in results the job might have recently produced or might produce in the future. +[[ml-close-job-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id`:: +`` (Required):: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. +[[ml-close-job-query-parms]] +==== {api-query-parms-title} -==== Query Parameters - -`force`:: +`force` (Optional):: (boolean) Use to close a failed job, or to forcefully close a job which has not responded to its initial close request. -`timeout`:: +`timeout` (Optional):: (time units) Controls the time to wait until a job has closed. The default value is 30 minutes. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-close-job-example]] +==== {api-examples-title} The following example closes the `total-requests` job: diff --git a/docs/reference/ml/apis/delete-calendar-event.asciidoc b/docs/reference/ml/apis/delete-calendar-event.asciidoc index 68f7a0738375d..0aa9ce5cc8d92 100644 --- a/docs/reference/ml/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-event.asciidoc @@ -8,34 +8,37 @@ Deletes scheduled events from a calendar. - -==== Request +[[ml-delete-calendar-event-request]] +==== {api-request-title} `DELETE _ml/calendars//events/` +[[ml-delete-calendar-event-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-delete-calendar-event-desc]] +==== {api-description-title} This API removes individual events from a calendar. To remove all scheduled events and delete the calendar, see the <>. -==== Path Parameters +[[ml-delete-calendar-event-path-parms]] +==== {api-path-parms-title} -`calendar_id`(required):: +`` (Required):: (string) Identifier for the calendar. -`event_id` (required):: +`` (Required):: (string) Identifier for the scheduled event. You can obtain this identifier by using the <>. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - -==== Examples +[[ml-delete-calendar-event-example]] +==== {api-examples-title} The following example deletes a scheduled event from the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/delete-calendar-job.asciidoc b/docs/reference/ml/apis/delete-calendar-job.asciidoc index 118a706d29460..a555b3d3b922b 100644 --- a/docs/reference/ml/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-job.asciidoc @@ -8,28 +8,30 @@ Deletes jobs from a calendar. - -==== Request +[[ml-delete-calendar-job-request]] +==== {api-request-title} `DELETE _ml/calendars//jobs/` +[[ml-delete-calendar-job-prereqs]] +==== {api-prereq-title} -==== Path Parameters - -`calendar_id`(required):: - (string) Identifier for the calendar. +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -`job_id` (required):: - (string) An identifier for the job. It can be a job identifier, a group name, or a - comma-separated list of jobs or groups. +[[ml-delete-calendar-job-path-parms]] +==== {api-path-parms-title} +`` (Required):: + (string) Identifier for the calendar. -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +`` (Required):: + (string) An identifier for the job. It can be a job identifier, a group name, + or a comma-separated list of jobs or groups. -==== Examples +[[ml-delete-calendar-job-example]] +==== {api-examples-title} The following example removes the association between the `planned-outages` calendar and `total-requests` job: diff --git a/docs/reference/ml/apis/delete-calendar.asciidoc b/docs/reference/ml/apis/delete-calendar.asciidoc index 2707f3175e04c..065c117c49c63 100644 --- a/docs/reference/ml/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/apis/delete-calendar.asciidoc @@ -8,31 +8,32 @@ Deletes a calendar. - -==== Request +[[ml-delete-calendar-request]] +==== {api-request-title} `DELETE _ml/calendars/` +[[ml-delete-calendar-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-delete-calendar-desc]] +==== {api-description-title} This API removes all scheduled events from the calendar then deletes the calendar. +[[ml-delete-calendar-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-delete-calendar-example]] +==== {api-examples-title} The following example deletes the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc index d2a7845be7584..23917bf9e3365 100644 --- a/docs/reference/ml/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -10,38 +10,39 @@ Deletes an existing {dfeed}. - -==== Request +[[ml-delete-datafeed-request]] +==== {api-request-title} `DELETE _ml/datafeeds/` +[[ml-delete-datafeed-prereqs]] +==== {api-prereq-title} -==== Description - -NOTE: Unless the `force` parameter is used, the {dfeed} must be stopped before it can be deleted. - - -==== Path Parameters - -`feed_id` (required):: - (string) Identifier for the {dfeed} - +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -===== Query Parameters +[[ml-delete-datafeed-desc]] +==== {api-description-title} -`force`:: - (boolean) Use to forcefully delete a started {dfeed}; this method is quicker than - stopping and deleting the {dfeed}. +NOTE: Unless you use the `force` parameter, you must stop the {dfeed} before you +can delete it. +[[ml-delete-datafeed-path-parms]] +==== {api-path-parms-title} -===== Authorization +`` (Required):: + (string) Identifier for the {dfeed}. -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. +[[ml-delete-datafeed-query-parms]] +==== {api-query-parms-title} +`force` (Optional):: + (boolean) Use to forcefully delete a started {dfeed}; this method is quicker + than stopping and deleting the {dfeed}. -==== Examples +[[ml-delete-datafeed-example]] +==== {api-examples-title} The following example deletes the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/delete-expired-data.asciidoc b/docs/reference/ml/apis/delete-expired-data.asciidoc index 8814a1686736e..ada9ec1c8c34e 100644 --- a/docs/reference/ml/apis/delete-expired-data.asciidoc +++ b/docs/reference/ml/apis/delete-expired-data.asciidoc @@ -8,25 +8,27 @@ Deletes expired and unused machine learning data. -==== Request +[[ml-delete-expired-data-request]] +==== {api-request-title} `DELETE _ml/_delete_expired_data` -==== Description +[[ml-delete-expired-data-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-delete-expired-data-desc]] +==== {api-description-title} Deletes all job results, model snapshots and forecast data that have exceeded their `retention days` period. Machine learning state documents that are not associated with any job are also deleted. -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. - - -==== Examples +[[ml-delete-expired-data-example]] +==== {api-examples-title} The endpoint takes no arguments: diff --git a/docs/reference/ml/apis/delete-filter.asciidoc b/docs/reference/ml/apis/delete-filter.asciidoc index b2cbb7ef2832d..1962db29ad74e 100644 --- a/docs/reference/ml/apis/delete-filter.asciidoc +++ b/docs/reference/ml/apis/delete-filter.asciidoc @@ -8,32 +8,33 @@ Deletes a filter. - -==== Request +[[ml-delete-filter-request]] +==== {api-request-title} `DELETE _ml/filters/` +[[ml-delete-filter-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-delete-filter-desc]] +==== {api-description-title} This API deletes a {stack-ov}/ml-rules.html[filter]. If a {ml} job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. +[[ml-delete-filter-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`filter_id` (required):: +`` (Required):: (string) Identifier for the filter. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-delete-filter-example]] +==== {api-examples-title} The following example deletes the `safe_domains` filter: diff --git a/docs/reference/ml/apis/delete-forecast.asciidoc b/docs/reference/ml/apis/delete-forecast.asciidoc index 133b9105e478a..aac054217fced 100644 --- a/docs/reference/ml/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/apis/delete-forecast.asciidoc @@ -8,7 +8,8 @@ Deletes forecasts from a {ml} job. -==== Request +[[ml-delete-forecast-request]] +==== {api-request-title} `DELETE _ml/anomaly_detectors//_forecast` + @@ -16,48 +17,54 @@ Deletes forecasts from a {ml} job. `DELETE _ml/anomaly_detectors//_forecast/_all` +[[ml-delete-forecast-prereqs]] +==== {api-prereq-title} -==== Description +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -By default, forecasts are retained for 14 days. You can specify a different -retention period with the `expires_in` parameter in the <>. The delete forecast API enables you to delete one or more forecasts before they expire. +[[ml-delete-forecast-desc]] +==== {api-description-title} -NOTE: When you delete a job its associated forecasts are deleted. +By default, forecasts are retained for 14 days. You can specify a different +retention period with the `expires_in` parameter in the +<>. The delete forecast API enables you to delete +one or more forecasts before they expire. -For more information, see {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the Future]. +NOTE: When you delete a job, its associated forecasts are deleted. +For more information, see +{stack-ov}/ml-overview.html#ml-forecasting[Forecasting the future]. -==== Path Parameters +[[ml-delete-forecast-path-parms]] +==== {api-path-parms-title} -`job_id` (required):: +`` (Required):: (string) Identifier for the job. -`forecast_id`:: +`forecast_id` (Optional):: (string) A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all`, the API deletes all forecasts from the job. -==== Request Parameters +[[ml-delete-forecast-query-parms]] +==== {api-query-parms-title} -`allow_no_forecasts`:: +`allow_no_forecasts` (Optional):: (boolean) Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. The default value is `true`. -`timeout`:: +`timeout` (Optional):: (time units) Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. The default value is `30s`. For more information about time units, see <>. - - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. -==== Examples +[[ml-delete-forecast-example]] +==== {api-examples-title} The following example deletes all forecasts from the `total-requests` job: diff --git a/docs/reference/ml/apis/delete-job.asciidoc b/docs/reference/ml/apis/delete-job.asciidoc index a52c434f93cfe..efd172ef5fb60 100644 --- a/docs/reference/ml/apis/delete-job.asciidoc +++ b/docs/reference/ml/apis/delete-job.asciidoc @@ -8,13 +8,20 @@ Deletes an existing anomaly detection job. - -==== Request +[[ml-delete-job-request]] +==== {api-request-title} `DELETE _ml/anomaly_detectors/` +[[ml-delete-job-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-delete-job-desc]] +==== {api-description-title} All job configuration, model state and results are deleted. @@ -30,29 +37,25 @@ is used the job must be closed before it can be deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. -==== Path Parameters +[[ml-delete-job-path-parms]] +==== {api-path-parms-title} -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. -===== Query Parameters +[[ml-delete-job-query-parms]] +==== {api-query-parms-title} -`force`:: +`force` (Optional):: (boolean) Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. -`wait_for_completion`:: +`wait_for_completion` (Optional):: (boolean) Specifies whether the request should return immediately or wait until the job deletion completes. Defaults to `true`. -==== Authorization - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-delete-job-example]] +==== {api-examples-title} The following example deletes the `total-requests` job: diff --git a/docs/reference/ml/apis/delete-snapshot.asciidoc b/docs/reference/ml/apis/delete-snapshot.asciidoc index 18092ff8e89c1..0e696f2a01139 100644 --- a/docs/reference/ml/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/apis/delete-snapshot.asciidoc @@ -8,34 +8,36 @@ Deletes an existing model snapshot. - -==== Request +[[ml-delete-snapshot-request]] +==== {api-request-title} `DELETE _ml/anomaly_detectors//model_snapshots/` +[[ml-delete-snapshot-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-delete-snapshot-desc]] +==== {api-description-title} IMPORTANT: You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. -==== Path Parameters - -`job_id` (required):: - (string) Identifier for the job - -`snapshot_id` (required):: - (string) Identifier for the model snapshot - - -==== Authorization +[[ml-delete-snapshot-path-parms]] +==== {api-path-parms-title} -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +`` (Required):: + (string) Identifier for the job. +`` (Required):: + (string) Identifier for the model snapshot. -==== Examples +[[ml-delete-snapshot-example]] +==== {api-examples-title} The following example deletes the `1491948163` snapshot: diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index f3029635bf44e..212e80c7e1bd2 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -11,12 +11,20 @@ experimental[] Finds the structure of a text file. The text file must contain data that is suitable to be ingested into {es}. -==== Request +[[ml-find-file-structure-request]] +==== {api-request-title} `POST _ml/find_file_structure` +[[ml-find-file-structure-prereqs]] +==== {api-prereq-title} -==== Description +* If the {es} {security-features} are enabled, you must have `monitor_ml` or +`monitor` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-find-file-structure-desc]] +==== {api-description-title} This API provides a starting point for ingesting data into {es} in a format that is suitable for subsequent use with other {ml} functionality. @@ -47,38 +55,39 @@ specify the `explain` query parameter. It causes an `explanation` to appear in the response, which should help in determining why the returned structure was chosen. -==== Query Parameters +[[ml-find-file-structure-query-parms]] +==== {api-query-parms-title} -`charset`:: +`charset` (Optional):: (string) The file's character set. It must be a character set that is supported by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. -`column_names`:: +`column_names` (Optional):: (string) If you have set `format` to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the file. If the file does not have a header role, columns are named "column1", "column2", "column3", etc. -`delimiter`:: +`delimiter` (Optional):: (string) If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. If this parameter is not specified, the structure finder considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). -`explain`:: +`explain` (Optional):: (boolean) If this parameter is set to `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. The default value is `false`. -`format`:: +`format` (Optional):: (string) The high level structure of the file. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. If this parameter is not specified, the structure finder chooses one. -`grok_pattern`:: +`grok_pattern` (Optional):: (string) If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the file. The name of the timestamp field in the Grok pattern must match what is specified @@ -86,20 +95,20 @@ chosen. name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. -`has_header_row`:: +`has_header_row` (Optional):: (boolean) If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the file. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the file to other rows. -`line_merge_size_limit`:: +`line_merge_size_limit` (Optional):: (unsigned integer) The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured files. The default is 10000. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. -`lines_to_sample`:: +`lines_to_sample` (Optional):: (unsigned integer) The number of lines to include in the structural analysis, starting from the beginning of the file. The minimum is 2; the default is 1000. If the value of this parameter is greater than the number of lines in @@ -115,7 +124,7 @@ efficient to upload a sample file with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. -- -`quote`:: +`quote` (Optional):: (string) If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not @@ -123,18 +132,18 @@ to request analysis of 100000 lines to achieve some variety. format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. -`should_trim_fields`:: +`should_trim_fields` (Optional):: (boolean) If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. -`timeout`:: +`timeout` (Optional):: (time) Sets the maximum amount of time that the structure analysis make take. If the analysis is still running when the timeout expires then it will be aborted. The default value is 25 seconds. -`timestamp_field`:: +`timestamp_field` (Optional):: (string) The name of the field that contains the primary timestamp of each record in the file. In particular, if the file were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + @@ -153,7 +162,7 @@ field (if any) is the primary timestamp field. For structured file formats, it is not compulsory to have a timestamp in the file. -- -`timestamp_format`:: +`timestamp_format` (Optional):: (string) The Java time format of the timestamp field in the file. + + -- @@ -197,22 +206,16 @@ format from a built-in set. -- -==== Request Body +[[ml-find-file-structure-request-body]] +==== {api-request-body-title} The text file that you want to analyze. It must contain data that is suitable to be ingested into {es}. It does not need to be in JSON format and it does not need to be UTF-8 encoded. The size is limited to the {es} HTTP receive buffer size, which defaults to 100 Mb. - -==== Authorization - -You must have `monitor_ml`, or `monitor` cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. - - [[ml-find-file-structure-examples]] -==== Examples +==== {api-examples-title} Suppose you have a newline-delimited JSON file that contains information about some books. You can send the contents to the `find_file_structure` endpoint: diff --git a/docs/reference/ml/apis/flush-job.asciidoc b/docs/reference/ml/apis/flush-job.asciidoc index e2793b2c1a174..590f866ca1799 100644 --- a/docs/reference/ml/apis/flush-job.asciidoc +++ b/docs/reference/ml/apis/flush-job.asciidoc @@ -8,13 +8,20 @@ Forces any buffered data to be processed by the job. - -==== Request +[[ml-flush-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_flush` +[[ml-flush-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-flush-job-desc]] +==== {api-description-title} The flush jobs API is only applicable when sending data for analysis using the <>. Depending on the content of the buffer, then it @@ -26,44 +33,38 @@ remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. +[[ml-flush-job-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id` (required):: -(string) Identifier for the job - +`` (Required):: +(string) Identifier for the job. -==== Query Parameters +[[ml-flush-job-query-parms]] +==== {api-query-parms-title} -`advance_time`:: +`advance_time` (Optional):: (string) Specifies to advance to a particular time value. Results are generated and the model is updated for data from the specified time interval. -`calc_interim`:: +`calc_interim` (Optional):: (boolean) If true, calculates the interim results for the most recent bucket or all buckets within the latency period. -`end`:: +`end` (Optional):: (string) When used in conjunction with `calc_interim`, specifies the range of buckets on which to calculate interim results. -`skip_time`:: +`skip_time` (Optional):: (string) Specifies to skip to a particular time value. Results are not generated and the model is not updated for data from the specified time interval. -`start`:: +`start` (Optional):: (string) When used in conjunction with `calc_interim`, specifies the range of buckets on which to calculate interim results. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-flush-job-example]] +==== {api-examples-title} The following example flushes the `total-requests` job: diff --git a/docs/reference/ml/apis/forecast.asciidoc b/docs/reference/ml/apis/forecast.asciidoc index 71a7e1db2b185..d137b2e1be3ce 100644 --- a/docs/reference/ml/apis/forecast.asciidoc +++ b/docs/reference/ml/apis/forecast.asciidoc @@ -8,14 +8,22 @@ Predicts the future behavior of a time series by using its historical behavior. -==== Request +[[ml-forecast-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_forecast` +[[ml-forecast-prereqs]] +==== {api-prereq-title} -==== Description +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -See {xpack-ref}/ml-overview.html#ml-forecasting[Forecasting the Future]. +[[ml-forecast-desc]] +==== {api-description-title} + +See {stack-ov}/ml-overview.html#ml-forecasting[Forecasting the future]. [NOTE] =============================== @@ -25,33 +33,29 @@ forecast. For more information about this property, see <>. * The job must be open when you create a forecast. Otherwise, an error occurs. =============================== -==== Path Parameters +[[ml-forecast-path-parms]] +==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. +[[ml-forecast-request-body]] +==== {api-request-body-title} -==== Request Parameters - -`duration`:: +`duration` (Optional):: (time units) A period of time that indicates how far into the future to forecast. For example, `30d` corresponds to 30 days. The default value is 1 day. The forecast starts at the last record that was processed. For more information about time units, see <>. -`expires_in`:: +`expires_in` (Optional):: (time units) The period of time that forecast results are retained. After a forecast expires, the results are deleted. The default value is 14 days. If set to a value of `0`, the forecast is never automatically deleted. For more information about time units, see <>. -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-forecast-example]] +==== {api-examples-title} The following example requests a 10 day forecast for the `total-requests` job: diff --git a/docs/reference/ml/apis/get-bucket.asciidoc b/docs/reference/ml/apis/get-bucket.asciidoc index 39c548dd64e8d..2a73d0f5d3538 100644 --- a/docs/reference/ml/apis/get-bucket.asciidoc +++ b/docs/reference/ml/apis/get-bucket.asciidoc @@ -8,64 +8,75 @@ Retrieves job results for one or more buckets. - -==== Request +[[ml-get-bucket-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/buckets` + `GET _ml/anomaly_detectors//results/buckets/` +[[ml-get-bucket-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. For more information, see +{stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. -==== Description +[[ml-get-bucket-desc]] +==== {api-description-title} The get buckets API presents a chronological view of the records, grouped by bucket. +[[ml-get-bucket-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id`:: +`` (Required):: (string) Identifier for the job -`timestamp`:: +`` (Optional):: (string) The timestamp of a single bucket result. - If you do not specify this optional parameter, the API returns information + If you do not specify this parameter, the API returns information about all buckets. +[[ml-get-bucket-request-body]] +==== {api-request-body-title} -==== Request Body - -`anomaly_score`:: +`anomaly_score` (Optional):: (double) Returns buckets with anomaly scores greater or equal than this value. -`desc`:: +`desc` (Optional):: (boolean) If true, the buckets are sorted in descending order. -`end`:: +`end` (Optional):: (string) Returns buckets with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If true, the output excludes interim results. By default, interim results are included. -`expand`:: +`expand` (Optional):: (boolean) If true, the output includes anomaly records. -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of buckets. `size`::: (integer) Specifies the maximum number of buckets to obtain. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested buckets. By default, the buckets are sorted by the `timestamp` field. -`start`:: +`start` (Optional):: (string) Returns buckets with timestamps after this time. - -===== Results +[[ml-get-bucket-results]] +==== {api-response-body-title} The API returns the following information: @@ -73,18 +84,8 @@ The API returns the following information: (array) An array of bucket objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. - - -==== Examples +[[ml-get-bucket-example]] +==== {api-examples-title} The following example gets bucket information for the `it-ops-kpi` job: diff --git a/docs/reference/ml/apis/get-calendar-event.asciidoc b/docs/reference/ml/apis/get-calendar-event.asciidoc index a890f67db0d23..173a249488684 100644 --- a/docs/reference/ml/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/apis/get-calendar-event.asciidoc @@ -9,39 +9,49 @@ Retrieves information about the scheduled events in calendars. - -==== Request +[[ml-get-calendar-event-request]] +==== {api-request-title} `GET _ml/calendars//events` + `GET _ml/calendars/_all/events` +[[ml-get-calendar-event-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -===== Description +[[ml-get-calendar-event-desc]] +==== {api-description-title} You can get scheduled event information for a single calendar or for all calendars by using `_all`. -==== Path Parameters +[[ml-get-calendar-event-path-parms]] +==== {api-path-parms-title} -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. -==== Request Body +[[ml-get-calendar-event-request-body]] +==== {api-request-body-title} -`end`:: +`end` (Optional):: (string) Specifies to get events with timestamps earlier than this time. -`from`:: +`from` (Optional):: (integer) Skips the specified number of events. -`size`:: +`size` (Optional):: (integer) Specifies the maximum number of events to obtain. -`start`:: +`start` (Optional):: (string) Specifies to get events with timestamps after this time. -==== Results +[[ml-get-calendar-event-results]] +==== {api-response-body-title} The API returns the following information: @@ -49,15 +59,8 @@ The API returns the following information: (array) An array of scheduled event resources. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-get-calendar-event-example]] +==== {api-examples-title} The following example gets information about the scheduled events in the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc index 09e429b1f6de3..3d55f825bdb86 100644 --- a/docs/reference/ml/apis/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -8,37 +8,44 @@ Retrieves configuration information for calendars. - -==== Request +[[ml-get-calendar-request]] +==== {api-request-title} `GET _ml/calendars/` + `GET _ml/calendars/_all` +[[ml-get-calendar-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -===== Description +[[ml-get-calendar-desc]] +==== {api-description-title} You can get information for a single calendar or for all calendars by using `_all`. +[[ml-get-calendar-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`calendar_id`:: +`` (Required):: (string) Identifier for the calendar. +[[ml-get-calendar-request-body]] +==== {api-request-body-title} -==== Request Body - -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of calendars. -`size`::: +`size` (Optional)::: (integer) Specifies the maximum number of calendars to obtain. - -==== Results +[[ml-get-calendar-results]] +==== {api-response-body-title} The API returns the following information: @@ -46,15 +53,8 @@ The API returns the following information: (array) An array of calendar resources. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-get-calendar-example]] +==== {api-examples-title} The following example gets configuration information for the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/get-category.asciidoc b/docs/reference/ml/apis/get-category.asciidoc index 1fbfda20eccc0..6301eaf13a538 100644 --- a/docs/reference/ml/apis/get-category.asciidoc +++ b/docs/reference/ml/apis/get-category.asciidoc @@ -8,38 +8,50 @@ Retrieves job results for one or more categories. - -==== Request +[[ml-get-category-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/categories` + `GET _ml/anomaly_detectors//results/categories/` -==== Description +[[ml-get-category-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-get-category-desc]] +==== {api-description-title} For more information about categories, see -{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. +{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. -==== Path Parameters +[[ml-get-category-path-parms]] +==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. -`category_id`:: - (long) Identifier for the category. If you do not specify this optional parameter, +`` (Optional):: + (long) Identifier for the category. If you do not specify this parameter, the API returns information about all categories in the job. +[[ml-get-category-request-body]] +==== {api-request-body-title} -==== Request Body - -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of categories. `size`::: (integer) Specifies the maximum number of categories to obtain. - -==== Results +[[ml-get-category-results]] +==== {api-response-body-title} The API returns the following information: @@ -47,18 +59,8 @@ The API returns the following information: (array) An array of category objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. - - -==== Examples +[[ml-get-category-example]] +==== {api-examples-title} The following example gets information about one category for the `esxi_log` job: diff --git a/docs/reference/ml/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc index d1b842509b408..6ce99785912af 100644 --- a/docs/reference/ml/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -10,9 +10,8 @@ Retrieves usage information for {dfeeds}. - -==== Request - +[[ml-get-datafeed-stats-request]] +==== {api-request-title} `GET _ml/datafeeds//_stats` + @@ -20,11 +19,17 @@ Retrieves usage information for {dfeeds}. `GET _ml/datafeeds/_stats` + -`GET _ml/datafeeds/_all/_stats` + +`GET _ml/datafeeds/_all/_stats` +[[ml-get-datafeed-stats-prereqs]] +==== {api-prereq-title} +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-get-datafeed-stats-desc]] +==== {api-description-title} You can get statistics for multiple {dfeeds} in a single API request by using a comma-separated list of {dfeeds} or a wildcard expression. You can get @@ -36,15 +41,16 @@ If the {dfeed} is stopped, the only information you receive is the IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. -==== Path Parameters +[[ml-get-datafeed-stats-path-parms]] +==== {api-path-parms-title} -`feed_id`:: +`` (Optional):: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. If you do not specify one of these options, the API returns statistics for all {dfeeds}. - -==== Results +[[ml-get-datafeed-stats-results]] +==== {api-response-body-title} The API returns the following information: @@ -52,15 +58,8 @@ The API returns the following information: (array) An array of {dfeed} count objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-get-datafeed-stats-example]] +==== {api-examples-title} The following example gets usage information for the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc index 2c9ef7e3aec74..abc79ae5c7d71 100644 --- a/docs/reference/ml/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -10,8 +10,8 @@ Retrieves configuration information for {dfeeds}. -==== Request - +[[ml-get-datafeed-request]] +==== {api-request-title} `GET _ml/datafeeds/` + @@ -19,10 +19,17 @@ Retrieves configuration information for {dfeeds}. `GET _ml/datafeeds/` + -`GET _ml/datafeeds/_all` + +`GET _ml/datafeeds/_all` + +[[ml-get-datafeed-prereqs]] +==== {api-prereq-title} +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -===== Description +[[ml-get-datafeed-desc]] +==== {api-description-title} You can get information for multiple {dfeeds} in a single API request by using a comma-separated list of {dfeeds} or a wildcard expression. You can get @@ -31,15 +38,16 @@ information for all {dfeeds} by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. -==== Path Parameters +[[ml-get-datafeed-path-parms]] +==== {api-path-parms-title} -`feed_id`:: +`` (Optional):: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all {dfeeds}. - -==== Results +[[ml-get-datafeed-results]] +==== {api-response-body-title} The API returns the following information: @@ -47,15 +55,8 @@ The API returns the following information: (array) An array of {dfeed} objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-get-datafeed-example]] +==== {api-examples-title} The following example gets configuration information for the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc index 53dfab2530a10..ad5fee343f6d5 100644 --- a/docs/reference/ml/apis/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -8,36 +8,43 @@ Retrieves filters. - -==== Request +[[ml-get-filter-request]] +==== {api-request-title} `GET _ml/filters/` + `GET _ml/filters/` +[[ml-get-filter-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -===== Description +[[ml-get-filter-desc]] +==== {api-description-title} You can get a single filter or all filters. For more information, see {stack-ov}/ml-rules.html[Machine learning custom rules]. +[[ml-get-filter-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`filter_id`:: +`` (Optional):: (string) Identifier for the filter. +[[ml-get-filter-query-parms]] +==== {api-query-parms-title} -==== Querystring Parameters - -`from`::: +`from` (Optional)::: (integer) Skips the specified number of filters. -`size`::: +`size` (Optional)::: (integer) Specifies the maximum number of filters to obtain. - -==== Results +[[ml-get-filter-results]] +==== {api-response-body-title} The API returns the following information: @@ -45,15 +52,8 @@ The API returns the following information: (array) An array of filter resources. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-get-filter-example]] +==== {api-examples-title} The following example gets configuration information for the `safe_domains` filter: diff --git a/docs/reference/ml/apis/get-influencer.asciidoc b/docs/reference/ml/apis/get-influencer.asciidoc index 7425a734ed441..8d7ca889a264f 100644 --- a/docs/reference/ml/apis/get-influencer.asciidoc +++ b/docs/reference/ml/apis/get-influencer.asciidoc @@ -8,48 +8,58 @@ Retrieves job results for one or more influencers. - -==== Request +[[ml-get-influencer-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/influencers` -//===== Description +[[ml-get-influencer-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. -==== Path Parameters +[[ml-get-influencer-path-parms]] +==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. -==== Request Body +[[ml-get-influencer-request-body]] +==== {api-request-body-title} -`desc`:: +`desc` (Optional):: (boolean) If true, the results are sorted in descending order. -`end`:: +`end` (Optional):: (string) Returns influencers with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If true, the output excludes interim results. By default, interim results are included. -`influencer_score`:: +`influencer_score` (Optional):: (double) Returns influencers with anomaly scores greater or equal than this value. -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of influencers. `size`::: (integer) Specifies the maximum number of influencers to obtain. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested influencers. By default the influencers are sorted by the `influencer_score` value. -`start`:: +`start` (Optional):: (string) Returns influencers with timestamps after this time. - -==== Results +[[ml-get-influencer-results]] +==== {api-response-body-title} The API returns the following information: @@ -57,19 +67,8 @@ The API returns the following information: (array) An array of influencer objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. - - -==== Examples +[[ml-get-influencer-example]] +==== {api-examples-title} The following example gets influencer information for the `it_ops_new_kpi` job: diff --git a/docs/reference/ml/apis/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc index f3a3207c1a0f3..8a705d7ff9ed1 100644 --- a/docs/reference/ml/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -8,10 +8,8 @@ Retrieves usage information for jobs. - -==== Request - - +[[ml-get-job-stats-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//_stats` @@ -19,10 +17,17 @@ Retrieves usage information for jobs. `GET _ml/anomaly_detectors/_stats` + -`GET _ml/anomaly_detectors/_all/_stats` + +`GET _ml/anomaly_detectors/_all/_stats` +[[ml-get-job-stats-prereqs]] +==== {api-prereq-title} -===== Description +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-get-job-stats-desc]] +==== {api-description-title} You can get statistics for multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can @@ -31,16 +36,16 @@ get statistics for all jobs by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 jobs. +[[ml-get-job-stats-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id`:: +`` (Optional):: (string) An identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns statistics for all jobs. - -==== Results +[[ml-get-job-stats-results]] +==== {api-response-body-title} The API returns the following information: @@ -48,15 +53,8 @@ The API returns the following information: (array) An array of job statistics objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-get-job-stats-example]] +==== {api-examples-title} The following example gets usage information for the `farequote` job: diff --git a/docs/reference/ml/apis/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc index 4eb7eaf5a7f07..176ca09fc56c5 100644 --- a/docs/reference/ml/apis/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -8,8 +8,8 @@ Retrieves configuration information for jobs. - -==== Request +[[ml-get-job-request]] +==== {api-request-title} `GET _ml/anomaly_detectors/` + @@ -19,8 +19,15 @@ Retrieves configuration information for jobs. `GET _ml/anomaly_detectors/_all` +[[ml-get-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -===== Description +[[ml-get-job-desc]] +==== {api-description-title} You can get information for multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can @@ -29,15 +36,16 @@ get information for all jobs by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 jobs. +[[ml-get-job-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id`:: +` (Optional)`:: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all jobs. -==== Results +[[ml-get-job-results]] +==== {api-response-body-title} The API returns the following information: @@ -45,15 +53,8 @@ The API returns the following information: (array) An array of job resources. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-get-job-example]] +==== {api-examples-title} The following example gets configuration information for the `total-requests` job: diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/apis/get-ml-info.asciidoc index 41b680e1327c0..2c486741ffd42 100644 --- a/docs/reference/ml/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/apis/get-ml-info.asciidoc @@ -10,28 +10,30 @@ Returns defaults and limits used by machine learning. -==== Request +[[get-ml-info-request]] +==== {api-request-title} `GET _ml/info` -==== Description +[[get-ml-info-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[get-ml-info-desc]] +==== {api-description-title} This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security Privileges] and -{stack-ov}/built-in-roles.html[Built-in Roles]. - - -==== Examples +[[get-ml-info-example]] +==== {api-examples-title} The endpoint takes no arguments: diff --git a/docs/reference/ml/apis/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc index d8592e6516bbb..4d8287f9a54f7 100644 --- a/docs/reference/ml/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -9,7 +9,8 @@ Retrieves overall bucket results that summarize the bucket results of multiple jobs. -==== Request +[[ml-get-overall-buckets-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/overall_buckets` + @@ -17,7 +18,18 @@ bucket results of multiple jobs. `GET _ml/anomaly_detectors/_all/results/overall_buckets` -==== Description +[[ml-get-overall-buckets-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + +[[ml-get-overall-buckets-desc]] +==== {api-description-title} You can summarize the bucket results for all jobs by using `_all` or by specifying `*` as the ``. @@ -41,43 +53,46 @@ to request overall buckets that span longer than the largest job's `bucket_span` When set, the `overall_score` will be the max `overall_score` of the corresponding overall buckets with a span equal to the largest job's `bucket_span`. -==== Path Parameters +[[ml-get-overall-buckets-path-parms]] +==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. -==== Request Body +[[ml-get-overall-buckets-request-body]] +==== {api-request-body-title} -`allow_no_jobs`:: +`allow_no_jobs` (Optional):: (boolean) If `false` and the `job_id` does not match any job an error will be returned. The default value is `true`. -`bucket_span`:: +`bucket_span` (Optional):: (string) The span of the overall buckets. Must be greater or equal to the largest job's `bucket_span`. Defaults to the largest job's `bucket_span`. -`end`:: +`end` (Optional):: (string) Returns overall buckets with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If `true`, the output excludes interim overall buckets. Overall buckets are interim if any of the job buckets within the overall bucket interval are interim. By default, interim results are included. -`overall_score`:: - (double) Returns overall buckets with overall scores greater or equal than this value. +`overall_score` (Optional):: + (double) Returns overall buckets with overall scores greater or equal than + this value. -`start`:: +`start` (Optional):: (string) Returns overall buckets with timestamps after this time. -`top_n`:: +`top_n` (Optional):: (integer) The number of top job bucket scores to be used in the `overall_score` calculation. The default value is `1`. - -===== Results +[[ml-get-overall-buckets-results]] +==== {api-response-body-title} The API returns the following information: @@ -85,18 +100,8 @@ The API returns the following information: (array) An array of overall bucket objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. - - -==== Examples +[[ml-get-overall-buckets-example]] +==== {api-examples-title} The following example gets overall buckets for jobs with IDs matching `job-*`: diff --git a/docs/reference/ml/apis/get-record.asciidoc b/docs/reference/ml/apis/get-record.asciidoc index afc7d2733c872..0acc3e0e49fce 100644 --- a/docs/reference/ml/apis/get-record.asciidoc +++ b/docs/reference/ml/apis/get-record.asciidoc @@ -8,49 +8,58 @@ Retrieves anomaly records for a job. - -==== Request +[[ml-get-record-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//results/records` -//===== Description +[[ml-get-record-prereqs]] +==== {api-prereq-title} -==== Path Parameters +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also +need `read` index privilege on the index that stores the results. The +`machine_learning_admin` and `machine_learning_user` roles provide these +privileges. See {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. -`job_id`:: - (string) Identifier for the job. +[[ml-get-record-path-parms]] +==== {api-path-parms-title} +`job_id` (Required):: + (string) Identifier for the job. -==== Request Body +[[ml-get-record-request-body]] +==== {api-request-body-title} -`desc`:: +`desc` (Optional):: (boolean) If true, the results are sorted in descending order. -`end`:: +`end` (Optional):: (string) Returns records with timestamps earlier than this time. -`exclude_interim`:: +`exclude_interim` (Optional):: (boolean) If true, the output excludes interim results. By default, interim results are included. -`page`:: +`page` (Optional):: `from`::: (integer) Skips the specified number of records. `size`::: (integer) Specifies the maximum number of records to obtain. -`record_score`:: +`record_score` (Optional):: (double) Returns records with anomaly scores greater or equal than this value. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested records. By default, the records are sorted by the `anomaly_score` value. -`start`:: +`start` (Optional):: (string) Returns records with timestamps after this time. - -==== Results +[[ml-get-record-results]] +==== {api-response-body-title} The API returns the following information: @@ -58,19 +67,8 @@ The API returns the following information: (array) An array of record objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. You also need `read` index privilege on the index -that stores the results. The `machine_learning_admin` and `machine_learning_user` -roles provide these privileges. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges] and -{xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. - - -==== Examples +[[ml-get-record-example]] +==== {api-examples-title} The following example gets record information for the `it-ops-kpi` job: diff --git a/docs/reference/ml/apis/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc index 4935a6e2d238f..ea1b15df33f33 100644 --- a/docs/reference/ml/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -8,47 +8,54 @@ Retrieves information about model snapshots. - -==== Request +[[ml-get-snapshot-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//model_snapshots` + `GET _ml/anomaly_detectors//model_snapshots/` -//===== Description +[[ml-get-snapshot-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Path Parameters +[[ml-get-snapshot-path-parms]] +==== {api-path-parms-title} -`job_id`:: +`` (Required):: (string) Identifier for the job. -`snapshot_id`:: +`` (Optional):: (string) Identifier for the model snapshot. If you do not specify this optional parameter, the API returns information about all model snapshots. -==== Request Body +[[ml-get-snapshot-request-body]] +==== {api-request-body-title} -`desc`:: +`desc` (Optional):: (boolean) If true, the results are sorted in descending order. -`end`:: +`end` (Optional):: (date) Returns snapshots with timestamps earlier than this time. -`from`:: +`from` (Optional):: (integer) Skips the specified number of snapshots. -`size`:: +`size` (Optional):: (integer) Specifies the maximum number of snapshots to obtain. -`sort`:: +`sort` (Optional):: (string) Specifies the sort field for the requested snapshots. By default, the snapshots are sorted by their timestamp. -`start`:: +`start` (Optional):: (string) Returns snapshots with timestamps after this time. - -==== Results +[[ml-get-snapshot-results]] +==== {api-response-body-title} The API returns the following information: @@ -56,16 +63,8 @@ The API returns the following information: (array) An array of model snapshot objects. For more information, see <>. - -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. -//<>. - - -==== Examples +[[ml-get-snapshot-example]] +==== {api-examples-title} The following example gets model snapshot information for the `it_ops_new_logs` job: diff --git a/docs/reference/ml/apis/open-job.asciidoc b/docs/reference/ml/apis/open-job.asciidoc index 08c7b97d9c050..84000cb89b0de 100644 --- a/docs/reference/ml/apis/open-job.asciidoc +++ b/docs/reference/ml/apis/open-job.asciidoc @@ -10,41 +10,42 @@ Opens one or more jobs. A job must be opened in order for it to be ready to receive and analyze data. A job can be opened and closed multiple times throughout its lifecycle. - -==== Request +[[ml-open-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors/{job_id}/_open` +[[ml-open-job-prereqs]] +==== {api-prereq-title} -==== Description - -When you open a new job, it starts with an empty model. +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -When you open an existing job, the most recent model state is automatically loaded. -The job is ready to resume its analysis from where it left off, once new data is received. +[[ml-open-job-desc]] +==== {api-description-title} +When you open a new job, it starts with an empty model. -==== Path Parameters +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once new +data is received. -`job_id` (required):: -(string) Identifier for the job +[[ml-open-job-path-parms]] +==== {api-path-parms-title} +`` (Required):: + (string) Identifier for the job -==== Request Body +[[ml-open-job-request-body]] +==== {api-request-body-title} -`timeout`:: +`timeout` (Optional):: (time) Controls the time to wait until a job has opened. The default value is 30 minutes. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-open-job-example]] +==== {api-examples-title} The following example opens the `total-requests` job and sets an optional property: diff --git a/docs/reference/ml/apis/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc index 5d122a5d6d1a8..88d771f3b7f18 100644 --- a/docs/reference/ml/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -8,38 +8,40 @@ Posts scheduled events in a calendar. -==== Request +[[ml-post-calendar-event-request]] +==== {api-request-title} `POST _ml/calendars//events` +[[ml-post-calendar-event-prereqs]] +==== {api-prereq-title} -==== Description +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -This API accepts a list of {xpack-ref}/ml-calendars.html[scheduled events], each +[[ml-post-calendar-event-desc]] +==== {api-description-title} + +This API accepts a list of {stack-ov}/ml-calendars.html[scheduled events], each of which must have a start time, end time, and description. -==== Path Parameters +[[ml-post-calendar-event-path-parms]] +==== {api-path-parms-title} -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. +[[ml-post-calendar-event-request-body]] +==== {api-request-body-title} -==== Request Body - -`events`:: - (array) A list of one of more scheduled events. The event's start and end times - may be specified as integer milliseconds since the epoch or as a string in ISO 8601 - format. See <>. - - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - +`events` (Required):: + (array) A list of one of more scheduled events. The event's start and end + times may be specified as integer milliseconds since the epoch or as a string + in ISO 8601 format. See <>. -==== Examples +[[ml-post-calendar-event-example]] +==== {api-examples-title} You can add scheduled events to the `planned-outages` calendar as follows: diff --git a/docs/reference/ml/apis/post-data.asciidoc b/docs/reference/ml/apis/post-data.asciidoc index 2df0df69e9030..3c2d0e49fde93 100644 --- a/docs/reference/ml/apis/post-data.asciidoc +++ b/docs/reference/ml/apis/post-data.asciidoc @@ -8,13 +8,20 @@ Sends data to an anomaly detection job for analysis. - -==== Request +[[ml-post-data-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_data` +[[ml-post-data-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-post-data-desc]] +==== {api-description-title} The job must have a state of `open` to receive and process the data. @@ -42,39 +49,32 @@ IMPORTANT: For each job, data can only be accepted from a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. +[[ml-post-data-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id` (required):: - (string) Identifier for the job - - -==== Query Parameters +`` (Required):: + (string) Identifier for the job. -`reset_start`:: - (string) Specifies the start of the bucket resetting range +[[ml-post-data-query-parms]] +==== {api-query-parms-title} -`reset_end`:: - (string) Specifies the end of the bucket resetting range +`reset_start` (Optional):: + (string) Specifies the start of the bucket resetting range. +`reset_end` (Optional):: + (string) Specifies the end of the bucket resetting range. -==== Request Body +[[ml-post-data-request-body]] +==== {api-request-body-title} A sequence of one or more JSON documents containing the data to be analyzed. Only whitespace characters are permitted in between the documents. +[[ml-post-data-example]] +==== {api-examples-title} -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. -//<>. - - -==== Examples - -The following example posts data from the it_ops_new_kpi.json file to the `it_ops_new_kpi` job: +The following example posts data from the `it_ops_new_kpi.json` file to the +`it_ops_new_kpi` job: [source,js] -------------------------------------------------- @@ -83,8 +83,8 @@ $ curl -s -H "Content-type: application/json" --data-binary @it_ops_new_kpi.json -------------------------------------------------- -When the data is sent, you receive information about the operational progress of the job. -For example: +When the data is sent, you receive information about the operational progress of +the job. For example: [source,js] ---- diff --git a/docs/reference/ml/apis/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc index 83af6a78057cc..4ca3ebcd10e40 100644 --- a/docs/reference/ml/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -10,45 +10,41 @@ Previews a {dfeed}. - -==== Request +[[ml-preview-datafeed-request]] +==== {api-request-title} `GET _ml/datafeeds//_preview` +[[ml-preview-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, +`manage_ml`, or `manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-preview-datafeed-desc]] +==== {api-description-title} The preview {dfeeds} API returns the first "page" of results from the `search` that is created by using the current {dfeed} settings. This preview shows the structure of the data that will be passed to the anomaly detection engine. - -==== Path Parameters - -`datafeed_id` (required):: - (string) Identifier for the {dfeed} - - -==== Authorization - -If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, -`manage_ml`, or `manage` cluster privileges to use this API. For more -information, see -{stack-ov}/security-privileges.html[Security Privileges]. - - -==== Security Integration - -When {es} {security-features} are enabled, the {dfeed} query is previewed using -the credentials of the user calling the preview {dfeed} API. When the {dfeed} -is started it runs the query using the roles of the last user to -create or update it. If the two sets of roles differ then the preview may -not accurately reflect what the {dfeed} will return when started. To avoid +IMPORTANT: When {es} {security-features} are enabled, the {dfeed} query is +previewed using the credentials of the user calling the preview {dfeed} API. +When the {dfeed} is started it runs the query using the roles of the last user +to create or update it. If the two sets of roles differ then the preview may +not accurately reflect what the {dfeed} will return when started. To avoid such problems, the same user that creates/updates the {dfeed} should preview it to ensure it is returning the expected data. +[[ml-preview-datafeed-path-parms]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the {dfeed}. -==== Examples +[[ml-preview-datafeed-example]] +==== {api-examples-title} The following example obtains a preview of the `datafeed-farequote` {dfeed}: diff --git a/docs/reference/ml/apis/put-calendar-job.asciidoc b/docs/reference/ml/apis/put-calendar-job.asciidoc index cafc5f670627c..0a1ee2fcc6de0 100644 --- a/docs/reference/ml/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/apis/put-calendar-job.asciidoc @@ -8,28 +8,30 @@ Adds a job to a calendar. -==== Request +[[ml-put-calendar-job-request]] +==== {api-request-title} `PUT _ml/calendars//jobs/` +[[ml-put-calendar-job-prereqs]] +==== {api-prereq-title} -==== Path Parameters +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -`calendar_id` (required):: - (string) Identifier for the calendar. - -`job_id` (required):: - (string) An identifier for the job. It can be a job identifier, a group name, or a - comma-separated list of jobs or groups. +[[ml-put-calendar-job-path-parms]] +==== {api-path-parms-title} -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +`` (Required):: + (string) Identifier for the calendar. +`` (Required):: + (string) An identifier for the job. It can be a job identifier, a group name, + or a comma-separated list of jobs or groups. -==== Examples +[[ml-put-calendar-job-example]] +==== {api-examples-title} The following example associates the `planned-outages` calendar with the `total-requests` job: diff --git a/docs/reference/ml/apis/put-calendar.asciidoc b/docs/reference/ml/apis/put-calendar.asciidoc index 9b1e781e3cc06..f98dd541d6753 100644 --- a/docs/reference/ml/apis/put-calendar.asciidoc +++ b/docs/reference/ml/apis/put-calendar.asciidoc @@ -8,35 +8,38 @@ Instantiates a calendar. -==== Request +[[ml-put-calendar-request]] +==== {api-request-title} `PUT _ml/calendars/` -===== Description +[[ml-put-calendar-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-put-calendar-desc]] +==== {api-description-title} For more information, see -{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. +{stack-ov}/ml-calendars.html[Calendars and Scheduled Events]. -==== Path Parameters +[[ml-put-calendar-path-parms]] +==== {api-path-parms-title} -`calendar_id` (required):: +`` (Required):: (string) Identifier for the calendar. +[[ml-put-calendar-request-body]] +==== {api-request-body-title} -==== Request Body - -`description`:: +`description` (Optional):: (string) A description of the calendar. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-put-calendar-example]] +==== {api-examples-title} The following example creates the `planned-outages` calendar: diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 2e0f6700191cd..6c4578abb1671 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -10,102 +10,100 @@ Instantiates a {dfeed}. - -==== Request +[[ml-put-datafeed-request]] +==== {api-request-title} `PUT _ml/datafeeds/` +[[ml-put-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Description +[[ml-put-datafeed-desc]] +==== {api-description-title} You must create a job before you create a {dfeed}. You can associate only one {dfeed} to each job. -IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {dfeed} - directly to the `.ml-config` index using the Elasticsearch index API. - If {es} {security-features} are enabled, do not give users `write` - privileges on the `.ml-config` index. - - -==== Path Parameters - -`feed_id` (required):: +[IMPORTANT] +==== +* You must use {kib} or this API to create a {dfeed}. Do not put a +{dfeed} directly to the `.ml-config` index using the {es} index API. If {es} +{security-features} are enabled, do not give users `write` privileges on the +`.ml-config` index. +* When {es} {security-features} are enabled, your {dfeed} remembers which roles +the user who created it had at the time of creation and runs the query using +those same roles. +==== + +[[ml-put-datafeed-path-parms]] +==== {api-path-parms-title} + +`` (Required):: (string) A numerical character string that uniquely identifies the {dfeed}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +[[ml-put-datafeed-request-body]] +==== {api-request-body-title} -==== Request Body - -`aggregations`:: +`aggregations` (Optional):: (object) If set, the {dfeed} performs aggregation searches. For more information, see <>. -`chunking_config`:: +`chunking_config` (Optional):: (object) Specifies how data searches are split into time chunks. See <>. -`delayed_data_check_config`:: +`delayed_data_check_config` (Optional):: (object) Specifies whether the data feed checks for missing data and the size of the window. See <>. -`frequency`:: +`frequency` (Optional):: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. -`indices` (required):: +`indices` (Required):: (array) An array of index names. Wildcards are supported. For example: `["it_ops_metrics", "server*"]`. -`job_id` (required):: +`job_id` (Required):: (string) A numerical character string that uniquely identifies the job. -`query`:: +`query` (Optional):: (object) The {es} query domain-specific language (DSL). This value corresponds to the query object in an {es} search POST body. All the options that are supported by {Es} can be used, as this object is passed verbatim to {es}. By default, this property has the following value: `{"match_all": {"boost": 1}}`. -`query_delay`:: +`query_delay` (Optional):: (time units) The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in {es} until 10:06 a.m., set this property to 120 seconds. The default value is `60s`. -`script_fields`:: +`script_fields` (Optional):: (object) Specifies scripts that evaluate custom expressions and returns script fields to the {dfeed}. The <> in a job can contain - functions that use these script fields. - For more information, + functions that use these script fields. For more information, see {ref}/search-request-script-fields.html[Script Fields]. -`scroll_size`:: +`scroll_size` (Optional):: (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. For more information about these properties, see <>. - -==== Authorization - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - - -==== Security integration - -When {es} {security-features} are enabled, your {dfeed} remembers which roles the -user who created it had at the time of creation and runs the query using those -same roles. - - -==== Examples +[[ml-put-datafeed-example]] +==== {api-examples-title} The following example creates the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/put-filter.asciidoc b/docs/reference/ml/apis/put-filter.asciidoc index abe52dfb13b25..ad0d6d34ea81d 100644 --- a/docs/reference/ml/apis/put-filter.asciidoc +++ b/docs/reference/ml/apis/put-filter.asciidoc @@ -8,42 +8,45 @@ Instantiates a filter. -==== Request +[[ml-put-filter-request]] +==== {api-request-title} `PUT _ml/filters/` -===== Description +[[ml-put-filter-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-put-filter-desc]] +==== {api-description-title} A {stack-ov}/ml-rules.html[filter] contains a list of strings. It can be used by one or more jobs. Specifically, filters are referenced in the `custom_rules` property of <>. -==== Path Parameters +[[ml-put-filter-path-parms]] +==== {api-path-parms-title} -`filter_id` (required):: +`` (Required):: (string) Identifier for the filter. +[[ml-put-filter-request-body]] +==== {api-request-body-title} -==== Request Body - -`description`:: +`description` (Optional):: (string) A description of the filter. -`items`:: +`items` (Required):: (array of strings) The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-put-filter-example]] +==== {api-examples-title} The following example creates the `safe_domains` filter: diff --git a/docs/reference/ml/apis/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc index e3d80c276dc55..c60de488180d9 100644 --- a/docs/reference/ml/apis/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -8,84 +8,87 @@ Instantiates a job. -==== Request +[[ml-put-job-request]] +==== {api-request-title} `PUT _ml/anomaly_detectors/` -===== Description +[[ml-put-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-put-job-desc]] +==== {api-description-title} IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job directly to the `.ml-config` index using the Elasticsearch index API. If {es} {security-features} are enabled, do not give users `write` privileges on the `.ml-config` index. +[[ml-put-job-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id` (required):: +`` (Required):: (string) Identifier for the job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +[[ml-put-job-request-body]] +==== {api-request-body-title} -==== Request Body - -`analysis_config`:: +`analysis_config` (Required):: (object) The analysis configuration, which specifies how to analyze the data. See <>. -`analysis_limits`:: +`analysis_limits` (Optional):: (object) Specifies runtime limits for the job. See <>. -`background_persist_interval`:: +`background_persist_interval` (Optional):: (time units) Advanced configuration option. The time between each periodic persistence of the model. See <>. -`custom_settings`:: +`custom_settings` (Optional):: (object) Advanced configuration option. Contains custom meta data about the job. See <>. -`data_description` (required):: +`data_description` (Required):: (object) Describes the format of the input data. This object is required, but it can be empty (`{}`). See <>. -`description`:: +`description` (Optional):: (string) A description of the job. -`groups`:: +`groups` (Optional):: (array of strings) A list of job groups. See <>. -`model_plot_config`:: +`model_plot_config` (Optional):: (object) Advanced configuration option. Specifies to store model information along with the results. This adds overhead to the performance of the system and is not feasible for jobs with many entities, see <>. -`model_snapshot_retention_days`:: +`model_snapshot_retention_days` (Optional):: (long) The time in days that model snapshots are retained for the job. Older snapshots are deleted. The default value is `1`, which means snapshots are retained for one day (twenty-four hours). -`renormalization_window_days`:: +`renormalization_window_days` (Optional):: (long) Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. See <>. -`results_index_name`:: +`results_index_name` (Optional):: (string) A text string that affects the name of the {ml} results index. The default value is `shared`, which generates an index named `.ml-anomalies-shared`. -`results_retention_days`:: +`results_retention_days` (Optional):: (long) Advanced configuration option. The number of days for which job results are retained. See <>. -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-put-job-example]] +==== {api-examples-title} The following example creates the `total-requests` job: diff --git a/docs/reference/ml/apis/revert-snapshot.asciidoc b/docs/reference/ml/apis/revert-snapshot.asciidoc index b560f7b041206..86d3d4c14a93c 100644 --- a/docs/reference/ml/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/apis/revert-snapshot.asciidoc @@ -8,12 +8,20 @@ Reverts to a specific snapshot. -==== Request +[[ml-revert-snapshot-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//model_snapshots//_revert` +[[ml-revert-snapshot-prereqs]] +==== {api-prereq-title} -==== Description +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-revert-snapshot-desc]] +==== {api-description-title} The {ml} feature in {xpack} reacts quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models @@ -25,18 +33,19 @@ Friday or a critical system failure. IMPORTANT: Before you revert to a saved snapshot, you must close the job. +[[ml-revert-snapshot-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`job_id` (required):: - (string) Identifier for the job +`` (Required):: + (string) Identifier for the job. -`snapshot_id` (required):: - (string) Identifier for the model snapshot +`` (Required):: + (string) Identifier for the model snapshot. -==== Request Body +[[ml-revert-snapshot-request-body]] +==== {api-request-body-title} -`delete_intervening_results`:: +`delete_intervening_results` (Optional):: (boolean) If true, deletes the results in the time period between the latest results and the time of the reverted snapshot. It also resets the model to accept records for this time period. The default value is false. @@ -45,15 +54,8 @@ NOTE: If you choose not to delete intervening results when reverting a snapshot, the job will not accept input data that is older than the current time. If you want to resend data, then delete the intervening results. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-revert-snapshot-example]] +==== {api-examples-title} The following example reverts to the `1491856080` snapshot for the `it_ops_new_kpi` job: diff --git a/docs/reference/ml/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/apis/set-upgrade-mode.asciidoc index 5434d70d4e61e..6a00656430c66 100644 --- a/docs/reference/ml/apis/set-upgrade-mode.asciidoc +++ b/docs/reference/ml/apis/set-upgrade-mode.asciidoc @@ -9,7 +9,8 @@ Sets a cluster wide upgrade_mode setting that prepares {ml} indices for an upgrade. -==== Request +[[ml-set-upgrade-mode-request]] +==== {api-request-title} ////////////////////////// [source,js] @@ -25,7 +26,15 @@ POST /_ml/set_upgrade_mode?enabled=false&timeout=10m `POST _ml/set_upgrade_mode` -==== Description +[[ml-set-upgrade-mode-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-set-upgrade-mode-desc]] +==== {api-description-title} When upgrading your cluster, in some circumstances you must restart your nodes and reindex your {ml} indices. In those circumstances, there must be no {ml} jobs running. @@ -37,7 +46,6 @@ though stopping jobs is not a requirement in that case. For more information, see {stack-ref}/upgrading-elastic-stack.html[Upgrading the {stack}]. - When `enabled=true` this API temporarily halts all job and {dfeed} tasks and prohibits new job and {dfeed} tasks from starting. @@ -50,23 +58,18 @@ You can see the current value for the `upgrade_mode` setting by using the IMPORTANT: No new {ml} jobs can be opened while the `upgrade_mode` setting is `true`. -==== Query Parameters +[[ml-set-upgrade-mode-query-parms]] +==== {api-query-parms-title} -`enabled`:: +`enabled` (Optional):: (boolean) When `true`, this enables `upgrade_mode`. Defaults to `false` -`timeout`:: +`timeout` (Optional):: (time) The time to wait for the request to be completed. The default value is 30 seconds. -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. - - -==== Examples +[[ml-set-upgrade-mode-example]] +==== {api-examples-title} The following example enables `upgrade_mode` for the cluster: diff --git a/docs/reference/ml/apis/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc index aee237b72c837..05cf0766e9522 100644 --- a/docs/reference/ml/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -12,11 +12,20 @@ Starts one or more {dfeeds}. A {dfeed} must be started in order to retrieve data from {es}. A {dfeed} can be started and stopped multiple times throughout its lifecycle. -==== Request +[[ml-start-datafeed-request]] +==== {api-request-title} `POST _ml/datafeeds//_start` -==== Description +[[ml-start-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml` or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-start-datafeed-desc]] +==== {api-description-title} NOTE: Before you can start a {dfeed}, the job must be open. Otherwise, an error occurs. @@ -56,42 +65,33 @@ If you specify a `start` value that is earlier than the timestamp of the latest processed record, the {dfeed} continues from 1 millisecond after the timestamp of the latest processed record. +IMPORTANT: When {es} {security-features} are enabled, your {dfeed} remembers +which roles the last user to create or update it had at the time of +creation/update and runs the query using those same roles. -==== Path Parameters +[[ml-start-datafeed-path-parms]] +==== {api-path-parms-title} -`feed_id` (required):: -(string) Identifier for the {dfeed} +`` (Required):: + (string) Identifier for the {dfeed}. -==== Request Body +[[ml-start-datafeed-request-body]] +==== {api-request-body-title} -`end`:: +`end` (Optional):: (string) The time that the {dfeed} should end. This value is exclusive. The default value is an empty string. -`start`:: +`start` (Optional):: (string) The time that the {dfeed} should begin. This value is inclusive. The default value is an empty string. -`timeout`:: +`timeout` (Optional):: (time) Controls the amount of time to wait until a {dfeed} starts. The default value is 20 seconds. - -==== Authorization - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - - -==== Security integration - -When {es} {security-features} are enabled, your {dfeed} remembers which roles the -last user to create or update it had at the time of creation/update and runs the -query using those same roles. - - -==== Examples +[[ml-start-datafeed-example]] +==== {api-examples-title} The following example starts the `datafeed-it-ops-kpi` {dfeed}: diff --git a/docs/reference/ml/apis/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc index 1489137b9db07..bdac8d51fab04 100644 --- a/docs/reference/ml/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -10,10 +10,8 @@ Stops one or more {dfeeds}. -A {dfeed} that is stopped ceases to retrieve data from {es}. -A {dfeed} can be started and stopped multiple times throughout its lifecycle. - -==== Request +[[ml-stop-datafeed-request]] +==== {api-request-title} `POST _ml/datafeeds//_stop` + @@ -21,39 +19,42 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. `POST _ml/datafeeds/_all/_stop` +[[ml-stop-datafeed-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -===== Description +[[ml-stop-datafeed-desc]] +==== {api-description-title} + +A {dfeed} that is stopped ceases to retrieve data from {es}. +A {dfeed} can be started and stopped multiple times throughout its lifecycle. You can stop multiple {dfeeds} in a single API request by using a comma-separated list of {dfeeds} or a wildcard expression. You can close all {dfeeds} by using `_all` or by specifying `*` as the ``. +[[ml-stop-datafeed-path-parms]] +==== {api-path-parms-title} -==== Path Parameters - -`feed_id`:: +`` (Required):: (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. +[[ml-stop-datafeed-request-body]] +==== {api-request-body-title} -==== Request Body - -`force`:: +`force` (Optional):: (boolean) If true, the {dfeed} is stopped forcefully. -`timeout`:: +`timeout` (Optional):: (time) Controls the amount of time to wait until a {dfeed} stops. The default value is 20 seconds. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-stop-datafeed-example]] +==== {api-examples-title} The following example stops the `datafeed-total-requests` {dfeed}: diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index 63878913c7f1a..b57088673d845 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -10,62 +10,77 @@ Updates certain properties of a {dfeed}. -==== Request +[[ml-update-datafeed-request]] +==== {api-request-title} `POST _ml/datafeeds//_update` -===== Description +[[ml-update-datafeed-prereqs]] +==== {api-prereq-title} -NOTE: If you update the `delayed_data_check_config` property, you must stop and +* If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` +cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-update-datafeed-desc]] +==== {api-description-title} + +If you update the `delayed_data_check_config` property, you must stop and start the {dfeed} for the change to be applied. -==== Path Parameters +IMPORTANT: When {es} {security-features} are enabled, your {dfeed} remembers +which roles the user who updated it had at the time of update and runs the query +using those same roles. + +[[ml-update-datafeed-path-parms]] +==== {api-path-parms-title} -`feed_id` (required):: - (string) Identifier for the {dfeed} +`` (Required):: + (string) Identifier for the {dfeed}. -==== Request Body +[[ml-update-datafeed-request-body]] +==== {api-request-body-title} The following properties can be updated after the {dfeed} is created: -`aggregations`:: +`aggregations` (Optional):: (object) If set, the {dfeed} performs aggregation searches. For more information, see <>. -`chunking_config`:: +`chunking_config` (Optional):: (object) Specifies how data searches are split into time chunks. See <>. -`delayed_data_check_config`:: +`delayed_data_check_config` (Optional):: (object) Specifies whether the data feed checks for missing data and the size of the window. See <>. -`frequency`:: +`frequency` (Optional):: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. -`indices`:: +`indices` (Optional):: (array) An array of index names. Wildcards are supported. For example: `["it_ops_metrics", "server*"]`. -`job_id`:: +`job_id` (Optional):: (string) A numerical character string that uniquely identifies the job. -`query`:: +`query` (Optional):: (object) The {es} query domain-specific language (DSL). This value corresponds to the query object in an {es} search POST body. All the options that are supported by {es} can be used, as this object is passed verbatim to {es}. By default, this property has the following value: `{"match_all": {"boost": 1}}`. -`query_delay`:: +`query_delay` (Optional):: (time units) The number of seconds behind real-time that data is queried. For example, if data from 10:04 a.m. might not be searchable in {es} until 10:06 a.m., set this property to 120 seconds. The default value is `60s`. -`script_fields`:: +`script_fields` (Optional):: (object) Specifies scripts that evaluate custom expressions and returns script fields to the {dfeed}. The <> in a job can contain @@ -73,29 +88,15 @@ The following properties can be updated after the {dfeed} is created: For more information, see {ref}/search-request-script-fields.html[Script Fields]. -`scroll_size`:: +`scroll_size` (Optional):: (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. For more information about these properties, see <>. - -==== Authorization - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. - - -==== Security Integration - -When {es} {security-features} are enabled, your {dfeed} remembers which roles the -user who updated it had at the time of update and runs the query using those -same roles. - - -==== Examples +[[ml-update-datafeed-example]] +==== {api-examples-title} The following example updates the query for the `datafeed-total-requests` {dfeed} so that only log entries of error level are analyzed: diff --git a/docs/reference/ml/apis/update-filter.asciidoc b/docs/reference/ml/apis/update-filter.asciidoc index 45c294a0b8bc6..df8f3056d12cc 100644 --- a/docs/reference/ml/apis/update-filter.asciidoc +++ b/docs/reference/ml/apis/update-filter.asciidoc @@ -8,40 +8,41 @@ Updates the description of a filter, adds items, or removes items. -==== Request +[[ml-update-filter-request]] +==== {api-request-title} `POST _ml/filters//_update` -//==== Description +[[ml-update-filter-prereqs]] +==== {api-prereq-title} -==== Path Parameters +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -`filter_id` (required):: - (string) Identifier for the filter. +[[ml-update-filter-path-parms]] +==== {api-path-parms-title} +`` (Required):: + (string) Identifier for the filter. -==== Request Body +[[ml-update-filter-request-body]] +==== {api-request-body-title} -`description`:: +`description` (Optional):: (string) A description for the filter. See <>. -`add_items`:: +`add_items` (Optional):: (array of strings) The items to add to the filter. -`remove_items`:: +`remove_items` (Optional):: (array of strings) The items to remove from the filter. +[[ml-update-filter-example]] +==== {api-examples-title} -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples - -You can change the description, add and remove items to the `safe_domains` filter as follows: +You can change the description, add and remove items to the `safe_domains` +filter as follows: [source,js] -------------------------------------------------- diff --git a/docs/reference/ml/apis/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc index 3382e7fe34675..e78bda613d801 100644 --- a/docs/reference/ml/apis/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -8,17 +8,27 @@ Updates certain properties of a job. -==== Request +[[ml-update-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_update` +[[ml-update-job-prereqs]] +==== {api-prereq-title} -==== Path Parameters +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -`job_id` (required):: - (string) Identifier for the job -==== Request Body +[[ml-update-job-path-parms]] +==== {api-path-parms-title} + +`` (Required):: + (string) Identifier for the job. + +[[ml-update-job-request-body]] +==== {api-request-body-title} The following properties can be updated after the job is created: @@ -86,14 +96,8 @@ A detector update object has the following properties: No other detector property can be updated. -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-update-job-example]] +==== {api-examples-title} The following example updates the `total-requests` job: diff --git a/docs/reference/ml/apis/update-snapshot.asciidoc b/docs/reference/ml/apis/update-snapshot.asciidoc index ffd38f590b1e2..1fe2ed5384bc0 100644 --- a/docs/reference/ml/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/apis/update-snapshot.asciidoc @@ -8,45 +8,45 @@ Updates certain properties of a snapshot. -==== Request +[[ml-update-snapshot-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//model_snapshots//_update` +[[ml-update-snapshot-prereqs]] +==== {api-prereq-title} -//==== Description +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. -==== Path Parameters -`job_id` (required):: - (string) Identifier for the job +[[ml-update-snapshot-path-parms]] +==== {api-path-parms-title} -`snapshot_id` (required):: - (string) Identifier for the model snapshot +`` (Required):: + (string) Identifier for the job. -==== Request Body +`` (Required):: + (string) Identifier for the model snapshot. + +[[ml-update-snapshot-request-body]] +==== {api-request-body-title} The following properties can be updated after the model snapshot is created: -`description`:: - (string) An optional description of the model snapshot. For example, +`description` (Optional):: + (string) A description of the model snapshot. For example, "Before black friday". -`retain`:: +`retain` (Optional):: (boolean) If true, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. Note that this snapshot will still be deleted when the job is deleted. The default value is false. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. -//<>. - - -==== Examples +[[ml-update-snapshot-example]] +==== {api-examples-title} The following example updates the snapshot identified as `1491852978`: diff --git a/docs/reference/ml/apis/validate-detector.asciidoc b/docs/reference/ml/apis/validate-detector.asciidoc index 0f9fe9902e36e..2e5896b95cc93 100644 --- a/docs/reference/ml/apis/validate-detector.asciidoc +++ b/docs/reference/ml/apis/validate-detector.asciidoc @@ -8,30 +8,32 @@ Validates detector configuration information. -==== Request +[[ml-valid-detector-request]] +==== {api-request-title} `POST _ml/anomaly_detectors/_validate/detector` -==== Description +[[ml-valid-detector-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-valid-detector-desc]] +==== {api-description-title} The validate detectors API enables you validate the detector configuration before you create a job. - -==== Request Body +[[ml-valid-detector-request-body]] +==== {api-request-body-title} For a list of the properties that you can specify in the body of this API, see <>. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-valid-detector-example]] +==== {api-examples-title} The following example validates detector configuration information: diff --git a/docs/reference/ml/apis/validate-job.asciidoc b/docs/reference/ml/apis/validate-job.asciidoc index 5fbfb62dd28a6..faa7cab2f3928 100644 --- a/docs/reference/ml/apis/validate-job.asciidoc +++ b/docs/reference/ml/apis/validate-job.asciidoc @@ -8,30 +8,32 @@ Validates job configuration information. -==== Request +[[ml-valid-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors/_validate` -==== Description +[[ml-valid-job-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have `manage_ml` or +`manage` cluster privileges to use this API. See +{stack-ov}/security-privileges.html[Security privileges]. + +[[ml-valid-job-desc]] +==== {api-description-title} The validate jobs API enables you validate the job configuration before you create the job. - -==== Request Body +[[ml-valid-job-request-body]] +==== {api-request-body-title} For a list of the properties that you can specify in the body of this API, see <>. - -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - - -==== Examples +[[ml-valid-job-example]] +==== {api-examples-title} The following example validates job configuration information: diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 7f47031988fd0..a25ab22260d3d 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -108,18 +108,20 @@ To create a dedicated master-eligible node in the {default-dist}, set: [source,yaml] ------------------- node.master: true <1> -node.data: false <2> -node.ingest: false <3> -node.ml: false <4> -xpack.ml.enabled: true <5> -cluster.remote.connect: false <6> +node.voting_only: false <2> +node.data: false <3> +node.ingest: false <4> +node.ml: false <5> +xpack.ml.enabled: true <6> +cluster.remote.connect: false <7> ------------------- <1> The `node.master` role is enabled by default. -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default). -<5> The `xpack.ml.enabled` setting is enabled by default. -<6> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> The `xpack.ml.enabled` setting is enabled by default. +<7> Disable {ccs} (enabled by default). To create a dedicated master-eligible node in the {oss-dist}, set: @@ -176,6 +178,30 @@ reasonably fast persistent storage and a reliable and low-latency network connection to the rest of the cluster, since they are on the critical path for <>. +Voting-only master-eligible nodes may also fill other roles in your cluster. +For instance, a node may be both a data node and a voting-only master-eligible +node. A _dedicated_ voting-only master-eligible nodes is a voting-only +master-eligible node that fills no other roles in the cluster. To create a +dedicated voting-only master-eligible node in the {default-dist}, set: + +[source,yaml] +------------------- +node.master: true <1> +node.voting_only: true <2> +node.data: false <3> +node.ingest: false <4> +node.ml: false <5> +xpack.ml.enabled: true <6> +cluster.remote.connect: false <7> +------------------- +<1> The `node.master` role is enabled by default. +<2> Enable the `node.voting_only` role (disabled by default). +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> The `xpack.ml.enabled` setting is enabled by default. +<7> Disable {ccs} (enabled by default). + [float] [[data-node]] === Data Node @@ -192,16 +218,18 @@ To create a dedicated data node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: true <2> -node.ingest: false <3> -node.ml: false <4> -cluster.remote.connect: false <5> +node.voting_only: false <2> +node.data: true <3> +node.ingest: false <4> +node.ml: false <5> +cluster.remote.connect: false <6> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> The `node.data` role is enabled by default. -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default). -<5> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> The `node.data` role is enabled by default. +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> Disable {ccs} (enabled by default). To create a dedicated data node in the {oss-dist}, set: [source,yaml] @@ -230,16 +258,18 @@ To create a dedicated ingest node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: false <2> -node.ingest: true <3> -node.ml: false <4> -cluster.remote.connect: false <5> +node.voting_only: false <2> +node.data: false <3> +node.ingest: true <4> +node.ml: false <5> +cluster.remote.connect: false <6> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> The `node.ingest` role is enabled by default. -<4> Disable the `node.ml` role (enabled by default). -<5> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> The `node.ingest` role is enabled by default. +<5> Disable the `node.ml` role (enabled by default). +<6> Disable {ccs} (enabled by default). To create a dedicated ingest node in the {oss-dist}, set: @@ -281,16 +311,18 @@ To create a dedicated coordinating node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: false <2> -node.ingest: false <3> -node.ml: false <4> -cluster.remote.connect: false <5> +node.voting_only: false <2> +node.data: false <3> +node.ingest: false <4> +node.ml: false <5> +cluster.remote.connect: false <6> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default). -<5> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> Disable the `node.ml` role (enabled by default). +<6> Disable {ccs} (enabled by default). To create a dedicated coordinating node in the {oss-dist}, set: @@ -325,18 +357,20 @@ To create a dedicated {ml} node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> -node.data: false <2> -node.ingest: false <3> -node.ml: true <4> -xpack.ml.enabled: true <5> -cluster.remote.connect: false <6> +node.voting_only: false <2> +node.data: false <3> +node.ingest: false <4> +node.ml: true <5> +xpack.ml.enabled: true <6> +cluster.remote.connect: false <7> ------------------- <1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> The `node.ml` role is enabled by default. -<5> The `xpack.ml.enabled` setting is enabled by default. -<6> Disable {ccs} (enabled by default). +<2> The `node.voting_only` role is disabled by default. +<3> Disable the `node.data` role (enabled by default). +<4> Disable the `node.ingest` role (enabled by default). +<5> The `node.ml` role is enabled by default. +<6> The `xpack.ml.enabled` setting is enabled by default. +<7> Disable {ccs} (enabled by default). [float] [[change-node-role]] diff --git a/docs/reference/query-dsl/boosting-query.asciidoc b/docs/reference/query-dsl/boosting-query.asciidoc index 5cd12ce1f00b7..c57235e71606d 100644 --- a/docs/reference/query-dsl/boosting-query.asciidoc +++ b/docs/reference/query-dsl/boosting-query.asciidoc @@ -1,36 +1,60 @@ [[query-dsl-boosting-query]] === Boosting Query -The `boosting` query can be used to effectively demote results that -match a given query. Unlike the "NOT" clause in bool query, this still -selects documents that contain undesirable terms, but reduces their -overall score. +Returns documents matching a `positive` query while reducing the +<> of documents that also match a +`negative` query. -It accepts a `positive` query and a `negative` query. -Only documents that match the `positive` query will be included -in the results list, but documents that also match the `negative` query -will be downgraded by multiplying the original `_score` of the document -with the `negative_boost`. +You can use the `boosting` query to demote certain documents without +excluding them from the search results. + +[[boosting-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- +---- GET /_search { "query": { "boosting" : { "positive" : { "term" : { - "field1" : "value1" + "text" : "apple" } }, "negative" : { "term" : { - "field2" : "value2" + "text" : "pie tart fruit crumble tree" } }, - "negative_boost" : 0.2 + "negative_boost" : 0.5 } } } --------------------------------------------------- +---- // CONSOLE + +[[boosting-top-level-params]] +==== Top-level parameters for `boosting` + +`positive` (Required):: +Query you wish to run. Any returned documents must match this query. + +`negative` (Required):: ++ +-- +Query used to decrease the <> of matching +documents. + +If a returned document matches the `positive` query and this query, the +`boosting` query calculates the final <> +for the document as follows: + +. Take the original relevance score from the `positive` query. +. Multiply the score by the `negative_boost` value. +-- + +`negative_boost` (Required):: +Floating point number between `0` and `1.0` used to decrease the +<> of documents matching the `negative` +query. \ No newline at end of file diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 3049cb363173a..7353ca137f3e1 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -82,6 +82,53 @@ to search across multiple fields as if they were all the same field; for example you could index the same text into stemmed and unstemmed fields, and search for stemmed tokens near unstemmed ones. +[[intervals-prefix]] +==== `prefix` + +The `prefix` rule finds terms that start with a specified prefix. The prefix will +expand to match at most 128 terms; if there are more matching terms in the index, +then an error will be returned. To avoid this limit, enable the +<> option on the field being searched. + +[horizontal] +`prefix`:: +Match terms starting with this prefix +`analyzer`:: +Which analyzer should be used to normalize the `prefix`. By default, the +search analyzer of the top-level field will be used. +`use_field`:: +If specified, then match intervals from this field rather than the top-level field. +The `prefix` will be normalized using the search analyzer from this field, unless +`analyzer` is specified separately. + +[[intervals-wildcard]] +==== `wildcard` + +The `wildcard` rule finds terms that match a wildcard pattern. The pattern will +expand to match at most 128 terms; if there are more matching terms in the index, +then an error will be returned. + +[horizontal] +`pattern`:: +Find terms matching this pattern ++ +-- +This parameter supports two wildcard operators: + +* `?`, which matches any single character +* `*`, which can match zero or more characters, including an empty one + +WARNING: Avoid beginning patterns with `*` or `?`. This can increase +the iterations needed to find matching terms and slow search performance. +-- +`analyzer`:: +Which analyzer should be used to normalize the `pattern`. By default, the +search analyzer of the top-level field will be used. +`use_field`:: +If specified, then match intervals from this field rather than the top-level field. +The `pattern` will be normalized using the search analyzer from this field, unless +`analyzer` is specified separately. + [[intervals-all_of]] ==== `all_of` diff --git a/docs/reference/query-dsl/range-query.asciidoc b/docs/reference/query-dsl/range-query.asciidoc index 61c46996949f1..27db882fe1dd3 100644 --- a/docs/reference/query-dsl/range-query.asciidoc +++ b/docs/reference/query-dsl/range-query.asciidoc @@ -1,14 +1,16 @@ [[query-dsl-range-query]] === Range Query -Matches documents with fields that have terms within a certain range. -The type of the Lucene query depends on the field type, for `string` -fields, the `TermRangeQuery`, while for number/date fields, the query is -a `NumericRangeQuery`. The following example returns all documents where -`age` is between `10` and `20`: +Returns documents that contain terms within a provided range. + +[[range-query-ex-request]] +==== Example request + +The following search returns documents where the `age` field contains a term +between `10` and `20`. [source,js] --------------------------------------------------- +---- GET _search { "query": { @@ -21,147 +23,209 @@ GET _search } } } --------------------------------------------------- +---- // CONSOLE -The `range` query accepts the following parameters: +[[range-query-top-level-params]] +==== Top-level parameters for `range` + +``:: ++ +-- +Field you wish to search. +-- + +[[range-query-field-params]] +==== Parameters for `` + +`gt`:: +Greater than. Optional. + +`gte`:: +Greater than or equal to. Optional. + +`lt`:: +Less than. Optional. + +`lte`:: +Less than or equal to. Optional. + +`format`:: ++ +-- +Date format used to convert `date` values in the query. + +By default, {es} uses the <> provided in the +``'s mapping. This value overrides that mapping format. -[horizontal] -`gte`:: Greater-than or equal to -`gt`:: Greater-than -`lte`:: Less-than or equal to -`lt`:: Less-than -`boost`:: Sets the boost value of the query, defaults to `1.0` +For valid syntax, see <>. Optional. +[WARNING] +==== +If a `format` and `date` value are incomplete, {es} replaces any missing year, +month, or date component with the start of +https://en.wikipedia.org/wiki/Unix_time[Unix time], which is January 1st, 1970. + +For example, if the `format` value is `dd`, {es} converts a `gte` value of `10` +to `1970-01-10T00:00:00.000Z`. +==== + +-- + +[[querying-range-fields]] +`relation`:: ++ +-- +Indicates how the range query matches values for `range` fields. Optional. Valid +values are: + +`INTERSECTS` (Default):: +Matches documents with a range field value that intersects the query's range. + +`CONTAINS`:: +Matches documents with a range field value that entirely contains the query's range. + +`WITHIN`:: +Matches documents with a range field value entirely within the query's range. +-- + +`time_zone`:: ++ +-- +https://en.wikipedia.org/wiki/List_of_UTC_time_offsets[Coordinated Universal +Time (UTC) offset] or +https://en.wikipedia.org/wiki/List_of_tz_database_time_zones[IANA time zone] +used to convert `date` values in the query to UTC. Optional. + +Valid values are ISO 8601 UTC offsets, such as `+01:00` or -`08:00`, and IANA +time zone IDs, such as `America/Los_Angeles`. + +For an example query using the `time_zone` parameter, see +<>. + +[NOTE] +==== +The `time_zone` parameter does **not** affect the <> value +of `now`. `now` is always the current system time in UTC. + +However, the `time_zone` parameter does convert dates calculated using `now` and +<>. For example, the `time_zone` parameter will +convert a value of `now/d`. +==== +-- + +`boost`:: ++ +-- +Floating point number used to decrease or increase the +<> of a query. Default is `1.0`. +Optional. + +You can use the `boost` parameter to adjust relevance scores for searches +containing two or more queries. + +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. +-- + +[[range-query-notes]] +==== Notes [[ranges-on-dates]] -==== Ranges on date fields +===== Using the `range` query with `date` fields + +When the `` parameter is a <> field datatype, you can use +<> with the following parameters: -When running `range` queries on fields of type <>, ranges can be -specified using <>: +* `gt` +* `gte` +* `lt` +* `lte` + +For example, the following search returns documents where the `timestamp` field +contains a date between today and yesterday. [source,js] --------------------------------------------------- +---- GET _search { "query": { "range" : { - "date" : { + "timestamp" : { "gte" : "now-1d/d", "lt" : "now/d" } } } } --------------------------------------------------- +---- // CONSOLE -===== Date math and rounding - -When using <> to round dates to the nearest day, month, -hour, etc, the rounded dates depend on whether the ends of the ranges are -inclusive or exclusive. -Rounding up moves to the last millisecond of the rounding scope, and rounding -down to the first millisecond of the rounding scope. For example: +[[range-query-date-math-rounding]] +====== Date math and rounding +{es} rounds <> values in parameters as follows: -[horizontal] `gt`:: ++ +-- +Rounds up to the lastest millisecond. - Greater than the date rounded up: `2014-11-18||/M` becomes - `2014-11-30T23:59:59.999`, ie excluding the entire month. +For example, `2014-11-18||/M` rounds up to `2014-11-30T23:59:59.999`, including +the entire month. +-- `gte`:: ++ +-- +Rounds down to the first millisecond. - Greater than or equal to the date rounded down: `2014-11-18||/M` becomes - `2014-11-01`, ie including the entire month. +For example, `2014-11-18||/M` rounds down to `2014-11-01`, excluding +the entire month. +-- `lt`:: ++ +-- +Rounds down to the first millisecond. - Less than the date rounded down: `2014-11-18||/M` becomes `2014-11-01`, ie - excluding the entire month. +For example, `2014-11-18||/M` rounds down to `2014-11-01`, excluding +the entire month. +-- `lte`:: ++ +-- +Rounds up to the lastest millisecond. - Less than or equal to the date rounded up: `2014-11-18||/M` becomes - `2014-11-30T23:59:59.999`, ie including the entire month. +For example, `2014-11-18||/M` rounds up to `2014-11-30T23:59:59.999`, including +the entire month. +-- -===== Date format in range queries +[[range-query-time-zone]] +===== Example query using `time_zone` parameter -Formatted dates will be parsed using the <> -specified on the <> field by default, but it can be overridden by -passing the `format` parameter to the `range` query: +You can use the `time_zone` parameter to convert `date` values to UTC using a +UTC offset. For example: [source,js] --------------------------------------------------- -GET _search -{ - "query": { - "range" : { - "born" : { - "gte": "01/01/2012", - "lte": "2013", - "format": "dd/MM/yyyy||yyyy" - } - } - } -} --------------------------------------------------- -// CONSOLE - -Note that if the date misses some of the year, month and day coordinates, the -missing parts are filled with the start of -https://en.wikipedia.org/wiki/Unix_time[unix time], which is January 1st, 1970. -This means, that when e.g. specifying `dd` as the format, a value like `"gte" : 10` -will translate to `1970-01-10T00:00:00.000Z`. - -===== Time zone in range queries - -Dates can be converted from another timezone to UTC either by specifying the -time zone in the date value itself (if the <> -accepts it), or it can be specified as the `time_zone` parameter: - -[source,js] --------------------------------------------------- +---- GET _search { "query": { "range" : { "timestamp" : { - "gte": "2015-01-01 00:00:00", <1> - "lte": "now", <2> - "time_zone": "+01:00" + "time_zone": "+01:00", <1> + "gte": "2015-01-01 00:00:00", <2> + "lte": "now" <3> } } } } --------------------------------------------------- +---- // CONSOLE -<1> This date will be converted to `2014-12-31T23:00:00 UTC`. -<2> `now` is not affected by the `time_zone` parameter, its always the current system time (in UTC). -However, when using <> (e.g. down to the nearest day using `now/d`), -the provided `time_zone` will be considered. - - -[[querying-range-fields]] -==== Querying range fields - -`range` queries can be used on fields of type <>, allowing to -match a range specified in the query with a range field value in the document. -The `relation` parameter controls how these two ranges are matched: - -[horizontal] -`WITHIN`:: - - Matches documents who's range field is entirely within the query's range. - -`CONTAINS`:: - - Matches documents who's range field entirely contains the query's range. - -`INTERSECTS`:: - - Matches documents who's range field intersects the query's range. - This is the default value when querying range fields. - -For examples, see <> mapping type. +<1> Indicates that `date` values use a UTC offset of `+01:00`. +<2> With a UTC offset of `+01:00`, {es} converts this date to +`2014-12-31T23:00:00 UTC`. +<3> The `time_zone` parameter does not affect the `now` value. \ No newline at end of file diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index 42e0ec083d560..401d323f6fff4 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -199,8 +199,7 @@ a vector function is executed, 0 is returned as a result for this document. NOTE: If a document's dense vector field has a number of dimensions -different from the query's vector, 0 is used for missing dimensions -in the calculations of vector functions. +different from the query's vector, an error will be thrown. [[random-score-function]] diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 3ebfb672e205f..0f097e494bfda 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -1,121 +1,231 @@ [[query-dsl-terms-set-query]] === Terms Set Query -Returns any documents that match with at least one or more of the -provided terms. The terms are not analyzed and thus must match exactly. -The number of terms that must match varies per document and is either -controlled by a minimum should match field or computed per document in -a minimum should match script. +Returns documents that contain a minimum number of *exact* terms in a provided +field. -The field that controls the number of required terms that must match must -be a number field: +The `terms_set` query is the same as the <>, except you can define the number of matching terms required to +return a document. For example: + +* A field, `programming_languages`, contains a list of known programming +languages, such as `c++`, `java`, or `php` for job candidates. You can use the +`terms_set` query to return documents that match at least two of these +languages. + +* A field, `permissions`, contains a list of possible user permissions for an +application. You can use the `terms_set` query to return documents that +match a subset of these permissions. + +[[terms-set-query-ex-request]] +==== Example request + +[[terms-set-query-ex-request-index-setup]] +===== Index setup +In most cases, you'll need to include a <> field mapping in +your index to use the `terms_set` query. This numeric field contains the +number of matching terms required to return a document. + +To see how you can set up an index for the `terms_set` query, try the +following example. + +. Create an index, `job-candidates`, with the following field mappings: ++ +-- + +* `name`, a <> field. This field contains the name of the +job candidate. + +* `programming_languages`, a <> field. This field contains +programming languages known by the job candidate. + +* `required_matches`, a <> `long` field. This field contains +the number of matching terms required to return a document. [source,js] --------------------------------------------------- -PUT /my-index +---- +PUT /job-candidates { "mappings": { "properties": { + "name": { + "type": "keyword" + }, + "programming_languages": { + "type": "keyword" + }, "required_matches": { "type": "long" } } } } +---- +// CONSOLE +// TESTSETUP + +-- -PUT /my-index/_doc/1?refresh +. Index a document with an ID of `1` and the following values: ++ +-- + +* `Jane Smith` in the `name` field. + +* `["c++", "java"]` in the `programming_languages` field. + +* `2` in the `required_matches` field. + +Include the `?refresh` parameter so the document is immediately available for +search. + +[source,js] +---- +PUT /job-candidates/_doc/1?refresh { - "codes": ["ghi", "jkl"], + "name": "Jane Smith", + "programming_languages": ["c++", "java"], "required_matches": 2 } +---- +// CONSOLE + +-- + +. Index another document with an ID of `2` and the following values: ++ +-- + +* `Jason Response` in the `name` field. + +* `["java", "php"]` in the `programming_languages` field. + +* `2` in the `required_matches` field. -PUT /my-index/_doc/2?refresh +[source,js] +---- +PUT /job-candidates/_doc/2?refresh { - "codes": ["def", "ghi"], + "name": "Jason Response", + "programming_languages": ["java", "php"], "required_matches": 2 } --------------------------------------------------- +---- // CONSOLE -// TESTSETUP -An example that uses the minimum should match field: +-- + +You can now use the `required_matches` field value as the number of +matching terms required to return a document in the `terms_set` query. + +[[terms-set-query-ex-request-query]] +===== Example query + +The following search returns documents where the `programming_languages` field +contains at least two of the following terms: + +* `c++` +* `java` +* `php` + +The `minimum_should_match_field` is `required_matches`. This means the +number of matching terms required is `2`, the value of the `required_matches` +field. [source,js] --------------------------------------------------- -GET /my-index/_search +---- +GET /job-candidates/_search { "query": { "terms_set": { - "codes" : { - "terms" : ["abc", "def", "ghi"], + "programming_languages": { + "terms": ["c++", "java", "php"], "minimum_should_match_field": "required_matches" } } } } --------------------------------------------------- +---- // CONSOLE -Response: +[[terms-set-top-level-params]] +==== Top-level parameters for `terms_set` -[source,js] --------------------------------------------------- -{ - "took": 13, - "timed_out": false, - "_shards": { - "total": 1, - "successful": 1, - "skipped" : 0, - "failed": 0 - }, - "hits": { - "total" : { - "value": 1, - "relation": "eq" - }, - "max_score": 0.87546873, - "hits": [ - { - "_index": "my-index", - "_type": "_doc", - "_id": "2", - "_score": 0.87546873, - "_source": { - "codes": ["def", "ghi"], - "required_matches": 2 - } - } - ] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/] +``:: +Field you wish to search. + +[[terms-set-field-params]] +==== Parameters for `` -Scripts can also be used to control how many terms are required to match -in a more dynamic way. For example a create date or a popularity field -can be used as basis for the number of required terms to match. +`terms`:: ++ +-- +Array of terms you wish to find in the provided ``. To return a document, +a required number of terms must exactly match the field values, including +whitespace and capitalization. -Also the `params.num_terms` parameter is available in the script to indicate the -number of terms that have been specified. +The required number of matching terms is defined in the +`minimum_should_match_field` or `minimum_should_match_script` parameter. +-- -An example that always limits the number of required terms to match to never -become larger than the number of terms specified: +`minimum_should_match_field`:: +<> field containing the number of matching terms +required to return a document. + +`minimum_should_match_script`:: ++ +-- +Custom script containing the number of matching terms required to return a +document. + +For parameters and valid values, see <>. + +For an example query using the `minimum_should_match_script` parameter, see +<>. +-- + +[[terms-set-query-notes]] +==== Notes + +[[terms-set-query-script]] +===== How to use the `minimum_should_match_script` parameter +You can use `minimum_should_match_script` to define the required number of +matching terms using a script. This is useful if you need to set the number of +required terms dynamically. + +[[terms-set-query-script-ex]] +====== Example query using `minimum_should_match_script` + +The following search returns documents where the `programming_languages` field +contains at least two of the following terms: + +* `c++` +* `java` +* `php` + +The `source` parameter of this query indicates: + +* The required number of terms to match cannot exceed `params.num_terms`, the +number of terms provided in the `terms` field. +* The required number of terms to match is `2`, the value of the +`required_matches` field. [source,js] --------------------------------------------------- -GET /my-index/_search +---- +GET /job-candidates/_search { "query": { "terms_set": { - "codes" : { - "terms" : ["abc", "def", "ghi"], + "programming_languages": { + "terms": ["c++", "java", "php"], "minimum_should_match_script": { "source": "Math.min(params.num_terms, doc['required_matches'].value)" - } + }, + "boost": 1.0 } } } } --------------------------------------------------- -// CONSOLE +---- +// CONSOLE \ No newline at end of file diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 579378b5e28fd..4d15ee7e25503 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -615,4 +615,9 @@ the steps needed to migrate. The `common` terms query is deprecated. Use the <> instead. The `match` query skips blocks of documents efficiently, -without any configuration, if the total number of hits is not tracked. \ No newline at end of file +without any configuration, if the total number of hits is not tracked. + +[role="exclude",id="xpack-api"] +=== X-Pack APIs + +{es} {xpack} APIs are now documented in <>. \ No newline at end of file diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index a5b93747dfa1a..9f70c2a3cef65 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -1,17 +1,21 @@ -[role="xpack"] -[[xpack-api]] -= {xpack} APIs +[[rest-apis]] += REST APIs [partintro] -- -{xpack} exposes REST APIs that are used by the UI components and can be called -directly to configure and access {xpack} features. +{es} exposes REST APIs that are used by the UI components and can be called +directly to configure and access {es} features. + +[NOTE] +We are working on including more {es} APIs in this section. Some content might +not be included yet. * <> * <> * <> * <> * <>, <> +* <> * <> * <> * <> @@ -35,4 +39,5 @@ include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{es-repo-dir}/indices/apis/unfreeze.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] +include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[] include::defs.asciidoc[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index ad442fc0b99de..adbf5f01979a0 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -71,6 +71,10 @@ Example response: "available" : true, "enabled" : true }, + "flattened" : { + "available" : true, + "enabled" : true + }, "graph" : { "available" : true, "enabled" : true diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index c12ec3a679a72..ccbc3da6e063b 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -252,7 +252,7 @@ field support has a `nested` sort option with the following properties: A filter that the inner objects inside the nested path should match with in order for its field values to be taken into account by sorting. Common case is to repeat the query / filter inside the - nested filter or query. By default no `nested_filter` is active. + nested filter or query. By default no `filter` is active. `max_children`:: The maximum number of children to consider per root document when picking the sort value. Defaults to unlimited. @@ -260,14 +260,8 @@ field support has a `nested` sort option with the following properties: Same as top-level `nested` but applies to another nested path within the current nested object. -[WARNING] -.Nested sort options before Elasticsearch 6.1 -============================================ - -The `nested_path` and `nested_filter` options have been deprecated in -favor of the options documented above. - -============================================ +NOTE: Elasticsearch will throw an error if a nested field is defined in a sort without +a `nested` context. ===== Nested sorting examples @@ -300,7 +294,7 @@ POST /_search // CONSOLE In the below example `parent` and `child` fields are of type `nested`. -The `nested_path` needs to be specified at each level; otherwise, Elasticsearch doesn't know on what nested level sort values need to be captured. +The `nested.path` needs to be specified at each level; otherwise, Elasticsearch doesn't know on what nested level sort values need to be captured. [source,js] -------------------------------------------------- @@ -374,7 +368,7 @@ GET /_search // CONSOLE NOTE: If a nested inner object doesn't match with -the `nested_filter` then a missing value is used. +the `nested.filter` then a missing value is used. ==== Ignoring Unmapped Fields diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc index 1d23430e37eec..2190f17e4588f 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc @@ -43,11 +43,13 @@ instances: `.env`: [source,yaml] ---- -CERTS_DIR=/usr/share/elasticsearch/config/certificates <1> -ELASTIC_PASSWORD=PleaseChangeMe <2> +COMPOSE_PROJECT_NAME=es <1> +CERTS_DIR=/usr/share/elasticsearch/config/certificates <2> +ELASTIC_PASSWORD=PleaseChangeMe <3> ---- -<1> The path, inside the Docker image, where certificates are expected to be found. -<2> Initial password for the `elastic` user. +<1> Use an `es_` prefix for all volumes and networks created by docker-compose. +<2> The path, inside the Docker image, where certificates are expected to be found. +<3> Initial password for the `elastic` user. [[getting-starter-tls-create-certs-composefile]] `create-certs.yml`: @@ -69,21 +71,21 @@ services: image: {docker-image} command: > bash -c ' - if [[ ! -d config/certificates/certs ]]; then - mkdir config/certificates/certs; + yum install -y -q -e 0 unzip; + if [[ ! -f /certs/bundle.zip ]]; then + bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip; + unzip /certs/bundle.zip -d /certs; <1> fi; - if [[ ! -f /local/certs/bundle.zip ]]; then - bin/elasticsearch-certgen --silent --in config/certificates/instances.yml --out config/certificates/certs/bundle.zip; - unzip config/certificates/certs/bundle.zip -d config/certificates/certs; <1> - fi; - chgrp -R 0 config/certificates/certs + chown -R 1000:0 /certs ' - user: $\{UID:-1000\} + user: "0" working_dir: /usr/share/elasticsearch - volumes: ['.:/usr/share/elasticsearch/config/certificates'] + volumes: ['certs:/certs', '.:/usr/share/elasticsearch/config/certificates'] + +volumes: {"certs"} ---- -<1> The new node certificates and CA certificate+key are placed under the local directory `certs`. +<1> The new node certificates and CA certificate+key are placed in a docker volume `es_certs`. endif::[] [[getting-starter-tls-create-docker-compose]] @@ -106,7 +108,7 @@ services: image: {docker-image} environment: - node.name=es01 - - discovery.seed_hosts=es02 + - discovery.seed_hosts=es01,es02 - cluster.initial_master_nodes=es01,es02 - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1> - "ES_JAVA_OPTS=-Xms512m -Xmx512m" @@ -121,7 +123,7 @@ services: - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt - xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key - volumes: ['esdata_01:/usr/share/elasticsearch/data', './certs:$CERTS_DIR'] + volumes: ['data01:/usr/share/elasticsearch/data', 'certs:$CERTS_DIR'] ports: - 9200:9200 healthcheck: @@ -135,7 +137,7 @@ services: image: {docker-image} environment: - node.name=es02 - - discovery.seed_hosts=es01 + - discovery.seed_hosts=es01,es02 - cluster.initial_master_nodes=es01,es02 - ELASTIC_PASSWORD=$ELASTIC_PASSWORD - "ES_JAVA_OPTS=-Xms512m -Xmx512m" @@ -150,14 +152,14 @@ services: - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - xpack.security.transport.ssl.certificate=$CERTS_DIR/es02/es02.crt - xpack.security.transport.ssl.key=$CERTS_DIR/es02/es02.key - volumes: ['esdata_02:/usr/share/elasticsearch/data', './certs:$CERTS_DIR'] + volumes: ['data02:/usr/share/elasticsearch/data', 'certs:$CERTS_DIR'] wait_until_ready: image: {docker-image} command: /usr/bin/true depends_on: {"es01": {"condition": "service_healthy"}} -volumes: {"esdata_01": {"driver": "local"}, "esdata_02": {"driver": "local"}} +volumes: {"data01", "data02", "certs"} ---- <1> Bootstrap `elastic` with the password defined in `.env`. See @@ -175,7 +177,7 @@ endif::[] -- ["source","sh"] ---- -docker-compose -f create-certs.yml up +docker-compose -f create-certs.yml run --rm create_certs ---- -- . Start two {es} nodes configured for SSL/TLS: @@ -189,9 +191,9 @@ docker-compose up -d . Access the {es} API over SSL/TLS using the bootstrapped password: + -- -["source","sh"] +["source","sh",subs="attributes"] ---- -curl --cacert certs/ca/ca.crt -u elastic:PleaseChangeMe https://localhost:9200 +docker run --rm -v es_certs:/certs --network=es_default {docker-image} curl --cacert /certs/ca/ca.crt -u elastic:PleaseChangeMe https://es01:9200 ---- // NOTCONSOLE -- @@ -210,3 +212,13 @@ auto --batch \ --url https://localhost:9200" ---- -- + +[float] +==== Tear everything down +To remove all the Docker resources created by the example, issue: +-- +["source","sh"] +---- +docker-compose down -v +---- +-- diff --git a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc b/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc deleted file mode 100644 index 61ebd3e682594..0000000000000 --- a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc +++ /dev/null @@ -1,68 +0,0 @@ -[role="xpack"] -[[separating-node-client-traffic]] -=== Separating node-to-node and client traffic - -Elasticsearch has the feature of so called -{ref}/modules-transport.html[TCP transport profiles] -that allows it to bind to several ports and addresses. The {es} -{security-features} extend on this functionality to enhance the security of the -cluster by enabling the separation of node-to-node transport traffic from client -transport traffic. This is important if the client transport traffic is not -trusted and could potentially be malicious. To separate the node-to-node traffic -from the client traffic, add the following to `elasticsearch.yml`: - -[source, yaml] --------------------------------------------------- -transport.profiles.client: <1> - port: 9500-9600 <2> - xpack.security: - type: client <3> --------------------------------------------------- -<1> `client` is the name of this example profile -<2> The port range that will be used by transport clients to communicate with - this cluster -<3> Categorizes the profile as a `client`. This accounts for additional security - filters by denying request attempts on for internal cluster operations - (e.g shard level actions and ping requests) from this profile. - -If supported by your environment, an internal network can be used for node-to-node -traffic and public network can be used for client traffic by adding the following -to `elasticsearch.yml`: - -[source, yaml] --------------------------------------------------- -transport.profiles.default.bind_host: 10.0.0.1 <1> -transport.profiles.client.bind_host: 1.1.1.1 <2> --------------------------------------------------- -<1> The bind address for the network that will be used for node-to-node communication -<2> The bind address for the network used for client communication - -If separate networks are not available, then -{stack-ov}/ip-filtering.html[IP Filtering] can -be enabled to limit access to the profiles. - -When using SSL for transport, a different set of certificates can also be used -for the client traffic by adding the following to `elasticsearch.yml`: - -[source, yaml] --------------------------------------------------- -transport.profiles.client.xpack.security.ssl.truststore: - path: /path/to/another/truststore - password: x-pack-test-password - -transport.profiles.client.xpack.security.ssl.keystore: - path: /path/to/another/keystore - password: x-pack-test-password --------------------------------------------------- - -To change the default behavior that requires certificates for transport clients, -set the following value in the `elasticsearch.yml` file: - -[source, yaml] --------------------------------------------------- -transport.profiles.client.xpack.security.ssl.client_authentication: none --------------------------------------------------- - -This setting keeps certificate authentication active for node-to-node traffic, -but removes the requirement to distribute a signed certificate to transport -clients. \ No newline at end of file diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc index 245852b209609..942c076a33a0d 100644 --- a/docs/reference/setup/important-settings/discovery-settings.asciidoc +++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc @@ -39,9 +39,9 @@ first election. In <>, with no discovery settings configured, this step is automatically performed by the nodes themselves. As this auto-bootstrapping is <>, when you start a brand new cluster in <>, you must explicitly list the names or IP addresses of the -master-eligible nodes whose votes should be counted in the very first election. -This list is set using the `cluster.initial_master_nodes` setting. +mode>>, you must explicitly list the master-eligible nodes whose votes should be +counted in the very first election. This list is set using the +`cluster.initial_master_nodes` setting. [source,yaml] -------------------------------------------------- diff --git a/docs/reference/sql/functions/geo.asciidoc b/docs/reference/sql/functions/geo.asciidoc index 72f69af85529f..fc9a85ce97e4d 100644 --- a/docs/reference/sql/functions/geo.asciidoc +++ b/docs/reference/sql/functions/geo.asciidoc @@ -147,7 +147,7 @@ ST_Y( .Description: -Returns the the latitude of the first point in the geometry. +Returns the latitude of the first point in the geometry. ["source","sql",subs="attributes,macros"] -------------------------------------------------- @@ -206,4 +206,4 @@ Returns the distance between geometries in meters. Both geometries have to be po ["source","sql",subs="attributes,macros"] -------------------------------------------------- include-tagged::{sql-specs}/docs/geo.csv-spec[distance] --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- diff --git a/docs/reference/upgrade.asciidoc b/docs/reference/upgrade.asciidoc index e5e447aff75f6..3bd35197dcd5f 100644 --- a/docs/reference/upgrade.asciidoc +++ b/docs/reference/upgrade.asciidoc @@ -7,8 +7,8 @@ process so upgrading does not interrupt service. Rolling upgrades are supported: * Between minor versions -* From 5.6 to 6.7 -* From 6.7 to {version} +* From 5.6 to 6.8 +* From 6.8 to {version} {es} can read indices created in the previous major version. If you have indices created in 5.x or before, you must reindex or delete them diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 1865f005dccc8..52b14d642b620 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -5,7 +5,7 @@ To upgrade directly to {es} {version} from versions 6.0-6.6, you must shut down all nodes in the cluster, upgrade each node to {version}, and restart the cluster. NOTE: If you are running a version prior to 6.0, -https://www.elastic.co/guide/en/elastic-stack/6.7/upgrading-elastic-stack.html[upgrade to 6.7] +{stack-ref-68}/upgrading-elastic-stack.html[upgrade to 6.8] and reindex your old indices or bring up a new {version} cluster and <>. diff --git a/docs/reference/upgrade/reindex_upgrade.asciidoc b/docs/reference/upgrade/reindex_upgrade.asciidoc index 7fc6c320263c3..faa8fbc1639b4 100644 --- a/docs/reference/upgrade/reindex_upgrade.asciidoc +++ b/docs/reference/upgrade/reindex_upgrade.asciidoc @@ -36,7 +36,7 @@ been deleted. [[reindex-upgrade-inplace]] === Reindex in place -You can use the Upgrade Assistant in {kib} 6.7 to automatically reindex 5.x +You can use the Upgrade Assistant in {kib} 6.8 to automatically reindex 5.x indices you need to carry forward to {version}. To manually reindex your old indices in place: @@ -103,7 +103,7 @@ endif::include-xpack[] You can use <> to migrate indices from your old cluster to a new {version} cluster. This enables you move to {version} -from a pre-6.7 cluster without interrupting service. +from a pre-6.8 cluster without interrupting service. [WARNING] ============================================= @@ -196,4 +196,4 @@ monitor progress of the reindex job with the <>: `30s` and `1`). .. Once reindexing is complete and the status of the new index is `green`, - you can delete the old index. \ No newline at end of file + you can delete the old index. diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 2cf1061e67ba7..7ad4a0bb904c1 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -10,8 +10,8 @@ running the older version. Rolling upgrades are supported: * Between minor versions -* {stack-ref-67}/upgrading-elastic-stack.html[From 5.6 to 6.7] -* {stack-ref-70}/upgrading-elastic-stack.html[From 6.7 to 7.0] +* {stack-ref-68}/upgrading-elastic-stack.html[From 5.6 to 6.8] +* {stack-ref-70}/upgrading-elastic-stack.html[From 6.8 to 7.0] * From {prev-major-version} to {version} Upgrading directly to {version} from 6.6 or earlier requires a diff --git a/gradle.properties b/gradle.properties index 491770edd7c52..63b1dc3cd7288 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,3 +1,3 @@ org.gradle.daemon=true -org.gradle.jvmargs=-Xmx2g -XX:+HeapDumpOnOutOfMemoryError -Xss2m +org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m options.forkOptions.memoryMaximumSize=2g diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 47216b872e431..9fabbb9bd78d9 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.5-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=14cd15fc8cc8705bd69dcfa3c8fefb27eb7027f5de4b47a8b279218f76895a91 +distributionSha256Sum=302b7df46730ce75c582542c056c9bf5cac2b94fbf2cc656d0e37e41e8a5d371 diff --git a/gradlew b/gradlew index b0d6d0ab5deb5..8e25e6c19d574 100755 --- a/gradlew +++ b/gradlew @@ -7,7 +7,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/gradlew.bat b/gradlew.bat index 15e1ee37a70d7..24467a141f791 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -5,7 +5,7 @@ @rem you may not use this file except in compliance with the License. @rem You may obtain a copy of the License at @rem -@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem https://www.apache.org/licenses/LICENSE-2.0 @rem @rem Unless required by applicable law or agreed to in writing, software @rem distributed under the License is distributed on an "AS IS" BASIS, diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java index cb8e2c4cb33e1..ad9881ab72fba 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Circle.java @@ -49,8 +49,6 @@ public Circle(final double lat, final double lon, final double alt, final double if (radiusMeters < 0 ) { throw new IllegalArgumentException("Circle radius [" + radiusMeters + "] cannot be negative"); } - GeometryUtils.checkLatitude(lat); - GeometryUtils.checkLongitude(lon); } @Override diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/GeometryUtils.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/GeometryUtils.java deleted file mode 100644 index c7bfa16b16a8d..0000000000000 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/GeometryUtils.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.geo.geometry; - -/** - * Geometry-related utility methods - */ -public final class GeometryUtils { - /** - * Minimum longitude value. - */ - static final double MIN_LON_INCL = -180.0D; - - /** - * Maximum longitude value. - */ - static final double MAX_LON_INCL = 180.0D; - - /** - * Minimum latitude value. - */ - static final double MIN_LAT_INCL = -90.0D; - - /** - * Maximum latitude value. - */ - static final double MAX_LAT_INCL = 90.0D; - - // No instance: - private GeometryUtils() { - } - - /** - * validates latitude value is within standard +/-90 coordinate bounds - */ - static void checkLatitude(double latitude) { - if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) { - throw new IllegalArgumentException( - "invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL); - } - } - - /** - * validates longitude value is within standard +/-180 coordinate bounds - */ - static void checkLongitude(double longitude) { - if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) { - throw new IllegalArgumentException( - "invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL); - } - } - - public static double checkAltitude(final boolean ignoreZValue, double zValue) { - if (ignoreZValue == false) { - throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] " - + "parameter is [" + ignoreZValue + "]"); - } - return zValue; - } - -} diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java index c2c9cb4b83a18..20f4314246950 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Line.java @@ -59,10 +59,6 @@ public Line(double[] lats, double[] lons, double[] alts) { if (alts != null && alts.length != lats.length) { throw new IllegalArgumentException("alts and lats must be equal length"); } - for (int i = 0; i < lats.length; i++) { - GeometryUtils.checkLatitude(lats[i]); - GeometryUtils.checkLongitude(lons[i]); - } } public int length() { diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java index 248f433b96a13..88fd5eb06fe79 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Point.java @@ -42,8 +42,6 @@ public Point(double lat, double lon) { } public Point(double lat, double lon, double alt) { - GeometryUtils.checkLatitude(lat); - GeometryUtils.checkLongitude(lon); this.lat = lat; this.lon = lon; this.alt = alt; diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java index ca7ec2e57c98d..75ba25721e755 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/geometry/Rectangle.java @@ -71,10 +71,6 @@ public Rectangle(double minLat, double maxLat, double minLon, double maxLon) { * Constructs a bounding box by first validating the provided latitude and longitude coordinates */ public Rectangle(double minLat, double maxLat, double minLon, double maxLon, double minAlt, double maxAlt) { - GeometryUtils.checkLatitude(minLat); - GeometryUtils.checkLatitude(maxLat); - GeometryUtils.checkLongitude(minLon); - GeometryUtils.checkLongitude(maxLon); this.minLon = minLon; this.maxLon = maxLon; this.minLat = minLat; @@ -90,17 +86,6 @@ public Rectangle(double minLat, double maxLat, double minLon, double maxLon, dou } } - public double getWidth() { - if (crossesDateline()) { - return GeometryUtils.MAX_LON_INCL - minLon + maxLon - GeometryUtils.MIN_LON_INCL; - } - return maxLon - minLon; - } - - public double getHeight() { - return maxLat - minLat; - } - public double getMinLat() { return minLat; } @@ -156,21 +141,6 @@ public String toString() { return b.toString(); } - /** - * Returns true if this bounding box crosses the dateline - */ - public boolean crossesDateline() { - return maxLon < minLon; - } - - /** returns true if rectangle (defined by minLat, maxLat, minLon, maxLon) contains the lat lon point */ - public boolean containsPoint(final double lat, final double lon) { - if (lat >= minLat && lat <= maxLat) { - return crossesDateline() ? lon >= minLon || lon <= maxLon : lon >= minLon && lon <= maxLon; - } - return false; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeographyValidator.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeographyValidator.java new file mode 100644 index 0000000000000..756792358abd4 --- /dev/null +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeographyValidator.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo.utils; + +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; + +/** + * Validator that checks that lats are between -90 and +90 and lons are between -180 and +180 and altitude is present only if + * ignoreZValue is set to true + */ +public class GeographyValidator implements GeometryValidator { + + /** + * Minimum longitude value. + */ + private static final double MIN_LON_INCL = -180.0D; + + /** + * Maximum longitude value. + */ + private static final double MAX_LON_INCL = 180.0D; + + /** + * Minimum latitude value. + */ + private static final double MIN_LAT_INCL = -90.0D; + + /** + * Maximum latitude value. + */ + private static final double MAX_LAT_INCL = 90.0D; + + private final boolean ignoreZValue; + + public GeographyValidator(boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + } + + /** + * validates latitude value is within standard +/-90 coordinate bounds + */ + protected void checkLatitude(double latitude) { + if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) { + throw new IllegalArgumentException( + "invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL); + } + } + + /** + * validates longitude value is within standard +/-180 coordinate bounds + */ + protected void checkLongitude(double longitude) { + if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) { + throw new IllegalArgumentException( + "invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL); + } + } + + protected void checkAltitude(double zValue) { + if (ignoreZValue == false && Double.isNaN(zValue) == false) { + throw new IllegalArgumentException("found Z value [" + zValue + "] but [ignore_z_value] " + + "parameter is [" + ignoreZValue + "]"); + } + } + + @Override + public void validate(Geometry geometry) { + geometry.visit(new GeometryVisitor() { + + @Override + public Void visit(Circle circle) throws RuntimeException { + checkLatitude(circle.getLat()); + checkLongitude(circle.getLon()); + checkAltitude(circle.getAlt()); + return null; + } + + @Override + public Void visit(GeometryCollection collection) throws RuntimeException { + for (Geometry g : collection) { + g.visit(this); + } + return null; + } + + @Override + public Void visit(Line line) throws RuntimeException { + for (int i = 0; i < line.length(); i++) { + checkLatitude(line.getLat(i)); + checkLongitude(line.getLon(i)); + checkAltitude(line.getAlt(i)); + } + return null; + } + + @Override + public Void visit(LinearRing ring) throws RuntimeException { + for (int i = 0; i < ring.length(); i++) { + checkLatitude(ring.getLat(i)); + checkLongitude(ring.getLon(i)); + checkAltitude(ring.getAlt(i)); + } + return null; + } + + @Override + public Void visit(MultiLine multiLine) throws RuntimeException { + return visit((GeometryCollection) multiLine); + } + + @Override + public Void visit(MultiPoint multiPoint) throws RuntimeException { + return visit((GeometryCollection) multiPoint); + } + + @Override + public Void visit(MultiPolygon multiPolygon) throws RuntimeException { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Void visit(Point point) throws RuntimeException { + checkLatitude(point.getLat()); + checkLongitude(point.getLon()); + checkAltitude(point.getAlt()); + return null; + } + + @Override + public Void visit(Polygon polygon) throws RuntimeException { + polygon.getPolygon().visit(this); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + polygon.getHole(i).visit(this); + } + return null; + } + + @Override + public Void visit(Rectangle rectangle) throws RuntimeException { + checkLatitude(rectangle.getMinLat()); + checkLatitude(rectangle.getMaxLat()); + checkLongitude(rectangle.getMinLon()); + checkLongitude(rectangle.getMaxLon()); + checkAltitude(rectangle.getMinAlt()); + checkAltitude(rectangle.getMaxAlt()); + return null; + } + }); + } +} diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeometryValidator.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeometryValidator.java new file mode 100644 index 0000000000000..2caf6738ed469 --- /dev/null +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/GeometryValidator.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo.utils; + +import org.elasticsearch.geo.geometry.Geometry; + +/** + * Generic geometry validator that can be used by the parser to verify the validity of the parsed geometry + */ +public interface GeometryValidator { + + /** + * Validates the geometry and throws IllegalArgumentException if the geometry is not valid + */ + void validate(Geometry geometry); + +} diff --git a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java index 007bb036cec85..4fd4bdb6fd150 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java +++ b/libs/geo/src/main/java/org/elasticsearch/geo/utils/WellKnownText.java @@ -22,7 +22,6 @@ import org.elasticsearch.geo.geometry.Circle; import org.elasticsearch.geo.geometry.Geometry; import org.elasticsearch.geo.geometry.GeometryCollection; -import org.elasticsearch.geo.geometry.GeometryUtils; import org.elasticsearch.geo.geometry.GeometryVisitor; import org.elasticsearch.geo.geometry.Line; import org.elasticsearch.geo.geometry.LinearRing; @@ -58,11 +57,11 @@ public class WellKnownText { private final String EOL = "END-OF-LINE"; private final boolean coerce; - private final boolean ignoreZValue; + private final GeometryValidator validator; - public WellKnownText(boolean coerce, boolean ignoreZValue) { + public WellKnownText(boolean coerce, GeometryValidator validator) { this.coerce = coerce; - this.ignoreZValue = ignoreZValue; + this.validator = validator; } public String toWKT(Geometry geometry) { @@ -243,7 +242,9 @@ public Geometry fromWKT(String wkt) throws IOException, ParseException { tokenizer.whitespaceChars('\r', '\r'); tokenizer.whitespaceChars('\n', '\n'); tokenizer.commentChar('#'); - return parseGeometry(tokenizer); + Geometry geometry = parseGeometry(tokenizer); + validator.validate(geometry); + return geometry; } finally { reader.close(); } @@ -297,7 +298,7 @@ private Point parsePoint(StreamTokenizer stream) throws IOException, ParseExcept double lat = nextNumber(stream); Point pt; if (isNumberNext(stream)) { - pt = new Point(lat, lon, GeometryUtils.checkAltitude(ignoreZValue, nextNumber(stream))); + pt = new Point(lat, lon, nextNumber(stream)); } else { pt = new Point(lat, lon); } @@ -318,7 +319,7 @@ private void parseCoordinate(StreamTokenizer stream, ArrayList lats, Arr lons.add(nextNumber(stream)); lats.add(nextNumber(stream)); if (isNumberNext(stream)) { - alts.add(GeometryUtils.checkAltitude(ignoreZValue, nextNumber(stream))); + alts.add(nextNumber(stream)); } if (alts.isEmpty() == false && alts.size() != lons.size()) { throw new ParseException("coordinate dimensions do not match: " + tokenString(stream), stream.lineno()); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java index 47d0f4285ad01..073bff4cb7575 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/BaseGeometryTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.test.AbstractWireTestCase; @@ -53,7 +54,7 @@ protected Writeable.Reader instanceReader() { @SuppressWarnings("unchecked") @Override protected T copyInstance(T instance, Version version) throws IOException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); String text = wkt.toWKT(instance); try { return (T) wkt.fromWKT(text); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java index 8bad65db616ca..e8912a39fb435 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/CircleTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -36,7 +38,7 @@ protected Circle createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("circle (20.0 10.0 15.0)", wkt.toWKT(new Circle(10, 20, 15))); assertEquals(new Circle(10, 20, 15), wkt.fromWKT("circle (20.0 10.0 15.0)")); @@ -48,13 +50,14 @@ public void testBasicSerialization() throws IOException, ParseException { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Circle(10, 20, -1)); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(10, 20, -1))); assertEquals("Circle radius [-1.0] cannot be negative", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Circle(100, 20, 1)); + ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(100, 20, 1))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Circle(10, 200, 1)); + ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Circle(10, 200, 1))); assertEquals("invalid longitude 200.0; must be between -180.0 and 180.0", ex.getMessage()); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java index 905d0f3c1257d..c78c47dfbcd96 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryCollectionTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -35,7 +36,7 @@ protected GeometryCollection createTestInstance(boolean hasAlt) { public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("geometrycollection (point (20.0 10.0),point EMPTY)", wkt.toWKT(new GeometryCollection(Arrays.asList(new Point(10, 20), Point.EMPTY)))); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryValidatorTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryValidatorTests.java new file mode 100644 index 0000000000000..c747fc2df50a7 --- /dev/null +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/GeometryValidatorTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.geo.geometry; + +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; +import org.elasticsearch.geo.utils.WellKnownText; +import org.elasticsearch.test.ESTestCase; + +public class GeometryValidatorTests extends ESTestCase { + + public static class NoopValidator implements GeometryValidator { + + @Override + public void validate(Geometry geometry) { + + } + } + + public static class OneValidator extends GeographyValidator { + /** + * Minimum longitude value. + */ + private static final double MIN_LON_INCL = -1D; + + /** + * Maximum longitude value. + */ + private static final double MAX_LON_INCL = 1D; + + /** + * Minimum latitude value. + */ + private static final double MIN_LAT_INCL = -1D; + + /** + * Maximum latitude value. + */ + private static final double MAX_LAT_INCL = 1D; + + /** + * Minimum altitude value. + */ + private static final double MIN_ALT_INCL = -1D; + + /** + * Maximum altitude value. + */ + private static final double MAX_ALT_INCL = 1D; + + public OneValidator() { + super(true); + } + + @Override + protected void checkLatitude(double latitude) { + if (Double.isNaN(latitude) || latitude < MIN_LAT_INCL || latitude > MAX_LAT_INCL) { + throw new IllegalArgumentException( + "invalid latitude " + latitude + "; must be between " + MIN_LAT_INCL + " and " + MAX_LAT_INCL); + } + } + + @Override + protected void checkLongitude(double longitude) { + if (Double.isNaN(longitude) || longitude < MIN_LON_INCL || longitude > MAX_LON_INCL) { + throw new IllegalArgumentException( + "invalid longitude " + longitude + "; must be between " + MIN_LON_INCL + " and " + MAX_LON_INCL); + } + } + + @Override + protected void checkAltitude(double zValue) { + if (Double.isNaN(zValue) == false && (zValue < MIN_ALT_INCL || zValue > MAX_ALT_INCL)) { + throw new IllegalArgumentException( + "invalid altitude " + zValue + "; must be between " + MIN_ALT_INCL + " and " + MAX_ALT_INCL); + } + } + } + + public void testNoopValidator() throws Exception { + WellKnownText parser = new WellKnownText(true, new NoopValidator()); + parser.fromWKT("CIRCLE (10000 20000 30000)"); + parser.fromWKT("POINT (10000 20000)"); + parser.fromWKT("LINESTRING (10000 20000, 0 0)"); + parser.fromWKT("POLYGON ((300 100, 400 200, 500 300, 300 100), (50 150, 250 150, 200 100))"); + parser.fromWKT("MULTIPOINT (10000 20000, 20000 30000)"); + } + + public void testOneValidator() throws Exception { + WellKnownText parser = new WellKnownText(true, new OneValidator()); + parser.fromWKT("POINT (0 1)"); + parser.fromWKT("POINT (0 1 0.5)"); + IllegalArgumentException ex; + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("CIRCLE (1 2 3)")); + assertEquals("invalid latitude 2.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("POINT (2 1)")); + assertEquals("invalid longitude 2.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("LINESTRING (1 -1 0, 0 0 2)")); + assertEquals("invalid altitude 2.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("POLYGON ((0.3 0.1, 0.4 0.2, 5 0.3, 0.3 0.1))")); + assertEquals("invalid longitude 5.0; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT( + "POLYGON ((0.3 0.1, 0.4 0.2, 0.5 0.3, 0.3 0.1), (0.5 1.5, 2.5 1.5, 2.0 1.0))")); + assertEquals("invalid latitude 1.5; must be between -1.0 and 1.0", ex.getMessage()); + ex = expectThrows(IllegalArgumentException.class, () -> parser.fromWKT("MULTIPOINT (0 1, -2 1)")); + assertEquals("invalid longitude -2.0; must be between -1.0 and 1.0", ex.getMessage()); + } + + +} diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java index 0f59940f973f0..b9f8cb37f5422 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LineTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -31,7 +33,7 @@ protected Line createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("linestring (3.0 1.0, 4.0 2.0)", wkt.toWKT(new Line(new double[]{1, 2}, new double[]{3, 4}))); assertEquals(new Line(new double[]{1, 2}, new double[]{3, 4}), wkt.fromWKT("linestring (3 1, 4 2)")); @@ -45,19 +47,23 @@ public void testBasicSerialization() throws IOException, ParseException { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1}, new double[]{3})); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Line(new double[]{1}, new double[]{3}))); assertEquals("at least two points in the line is required", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Line(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3}))); assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Line(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Line(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); } public void testWKTValidation() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(randomBoolean(), false).fromWKT("linestring (3 1 6, 4 2 5)")); + () -> new WellKnownText(randomBoolean(), new GeographyValidator(false)).fromWKT("linestring (3 1 6, 4 2 5)")); assertEquals("found Z value [6.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java index 9327e2046d5fe..07e9e866233e7 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/LinearRingTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import org.elasticsearch.test.ESTestCase; @@ -26,30 +28,35 @@ public class LinearRingTests extends ESTestCase { public void testBasicSerialization() { UnsupportedOperationException ex = expectThrows(UnsupportedOperationException.class, - () -> new WellKnownText(true, true).toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}))); + () -> new WellKnownText(true, new GeographyValidator(true)) + .toWKT(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("line ring cannot be serialized using WKT", ex.getMessage()); } public void testInitValidation() { + GeometryValidator validator = new GeographyValidator(true); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new LinearRing(new double[]{1, 2, 3}, new double[]{3, 4, 5})); + () -> validator.validate(new LinearRing(new double[]{1, 2, 3}, new double[]{3, 4, 5}))); assertEquals("first and last points of the linear ring must be the same (it must close itself): lats[0]=1.0 lats[2]=3.0 " + "lons[0]=3.0 lons[2]=5.0", ex.getMessage()); ex = expectThrows(IllegalArgumentException.class, - () -> new LinearRing(new double[]{1, 2, 1}, new double[]{3, 4, 3}, new double[]{1, 2, 3})); + () -> validator.validate(new LinearRing(new double[]{1, 2, 1}, new double[]{3, 4, 3}, new double[]{1, 2, 3}))); assertEquals("first and last points of the linear ring must be the same (it must close itself): lats[0]=1.0 lats[2]=1.0 " + "lons[0]=3.0 lons[2]=3.0 alts[0]=1.0 alts[2]=3.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1}, new double[]{3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new LinearRing(new double[]{1}, new double[]{3}))); assertEquals("at least two points in the line is required", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 500, 3}))); assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new LinearRing(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3})); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new LinearRing(new double[]{1, 100, 3, 1}, new double[]{3, 4, 5, 3}))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java index 22e0c4459a3f7..9ed782e65cc06 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiLineTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -40,7 +41,7 @@ protected MultiLine createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("multilinestring ((3.0 1.0, 4.0 2.0))", wkt.toWKT( new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}))))); assertEquals(new MultiLine(Collections.singletonList(new Line(new double[]{1, 2}, new double[]{3, 4}))), diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java index d3f8b5738cb9a..c170adf9c9411 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPointTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -41,7 +42,7 @@ protected MultiPoint createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("multipoint (2.0 1.0)", wkt.toWKT( new MultiPoint(Collections.singletonList(new Point(1, 2))))); assertEquals(new MultiPoint(Collections.singletonList(new Point(1 ,2))), diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java index fb4d8821ac3f4..9918dfa546c82 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/MultiPolygonTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -40,7 +41,7 @@ protected MultiPolygon createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("multipolygon (((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0)))", wkt.toWKT(new MultiPolygon(Collections.singletonList( new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))))); diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java index 4b590a3beb563..82e8fc40e75e9 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PointTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -31,7 +33,7 @@ protected Point createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("point (20.0 10.0)", wkt.toWKT(new Point(10, 20))); assertEquals(new Point(10, 20), wkt.fromWKT("point (20.0 10.0)")); @@ -43,16 +45,17 @@ public void testBasicSerialization() throws IOException, ParseException { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Point(100, 10)); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Point(100, 10))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Point(10, 500)); + ex = expectThrows(IllegalArgumentException.class, () -> validator.validate(new Point(10, 500))); assertEquals("invalid longitude 500.0; must be between -180.0 and 180.0", ex.getMessage()); } public void testWKTValidation() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(randomBoolean(), false).fromWKT("point (20.0 10.0 100.0)")); + () -> new WellKnownText(randomBoolean(), new GeographyValidator(false)).fromWKT("point (20.0 10.0 100.0)")); assertEquals("found Z value [100.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java index 33a5325c87b8e..adbe1f38cdcc0 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/PolygonTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -32,7 +33,7 @@ protected Polygon createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("polygon ((3.0 1.0, 4.0 2.0, 5.0 3.0, 3.0 1.0))", wkt.toWKT(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})))); assertEquals(new Polygon(new LinearRing(new double[]{1, 2, 3, 1}, new double[]{3, 4, 5, 3})), @@ -73,16 +74,17 @@ public void testInitValidation() { public void testWKTValidation() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(false, true).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3))")); + () -> new WellKnownText(false, new GeographyValidator(true)).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3))")); assertEquals("first and last points of the linear ring must be the same (it must close itself): " + "lats[0]=1.0 lats[2]=3.0 lons[0]=3.0 lons[2]=5.0 alts[0]=5.0 alts[2]=3.0", ex.getMessage()); ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(randomBoolean(), false).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3, 3 1 5))")); + () -> new WellKnownText(randomBoolean(), new GeographyValidator(false)).fromWKT("polygon ((3 1 5, 4 2 4, 5 3 3, 3 1 5))")); assertEquals("found Z value [5.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); ex = expectThrows(IllegalArgumentException.class, - () -> new WellKnownText(false, randomBoolean()).fromWKT("polygon ((3 1, 4 2, 5 3, 3 1), (0.5 1.5, 2.5 1.5, 2.0 1.0))")); + () -> new WellKnownText(false, new GeographyValidator(randomBoolean())).fromWKT( + "polygon ((3 1, 4 2, 5 3, 3 1), (0.5 1.5, 2.5 1.5, 2.0 1.0))")); assertEquals("first and last points of the linear ring must be the same (it must close itself): " + "lats[0]=1.5 lats[2]=1.0 lons[0]=0.5 lons[2]=2.0", ex.getMessage()); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java index afbf9f1ae8af6..8bd1494eb34a9 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geo/geometry/RectangleTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.geo.geometry; +import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.geo.utils.GeometryValidator; import org.elasticsearch.geo.utils.WellKnownText; import java.io.IOException; @@ -32,7 +34,7 @@ protected Rectangle createTestInstance(boolean hasAlt) { } public void testBasicSerialization() throws IOException, ParseException { - WellKnownText wkt = new WellKnownText(true, true); + WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true)); assertEquals("bbox (10.0, 20.0, 40.0, 30.0)", wkt.toWKT(new Rectangle(30, 40, 10, 20))); assertEquals(new Rectangle(30, 40, 10, 20), wkt.fromWKT("bbox (10.0, 20.0, 40.0, 30.0)")); @@ -41,16 +43,21 @@ public void testBasicSerialization() throws IOException, ParseException { } public void testInitValidation() { - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(100, 1, 2, 3)); + GeometryValidator validator = new GeographyValidator(true); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(1, 100, 2, 3))); assertEquals("invalid latitude 100.0; must be between -90.0 and 90.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(1, 2, 200, 3)); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(1, 2, 200, 3))); assertEquals("invalid longitude 200.0; must be between -180.0 and 180.0", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(2, 1, 2, 3)); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(2, 1, 2, 3))); assertEquals("max lat cannot be less than min lat", ex.getMessage()); - ex = expectThrows(IllegalArgumentException.class, () -> new Rectangle(1, 2, 2, 3, 5, Double.NaN)); + ex = expectThrows(IllegalArgumentException.class, + () -> validator.validate(new Rectangle(1, 2, 2, 3, 5, Double.NaN))); assertEquals("only one altitude value is specified", ex.getMessage()); } } diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index ca3634805195d..b7ae54e54c3af 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -20,7 +20,7 @@ dependencies { compile 'org.jruby.joni:joni:2.1.6' // joni dependencies: - compile 'org.jruby.jcodings:jcodings:1.0.12' + compile 'org.jruby.jcodings:jcodings:1.0.44' if (isEclipse == false || project.path == ":libs:elasticsearch-grok-tests") { testCompile(project(":test:framework")) { diff --git a/libs/grok/licenses/jcodings-1.0.12.jar.sha1 b/libs/grok/licenses/jcodings-1.0.12.jar.sha1 deleted file mode 100644 index b097e32ece493..0000000000000 --- a/libs/grok/licenses/jcodings-1.0.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bc17079fcaa8823ea8cd0d4c66516335b558db8 \ No newline at end of file diff --git a/libs/grok/licenses/jcodings-1.0.44.jar.sha1 b/libs/grok/licenses/jcodings-1.0.44.jar.sha1 new file mode 100644 index 0000000000000..4449009d3395e --- /dev/null +++ b/libs/grok/licenses/jcodings-1.0.44.jar.sha1 @@ -0,0 +1 @@ +a6884b2fd8fd9a56874db05afaa22435043a2e3e \ No newline at end of file diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java index 1d0af24ae2cba..fff07f625ff13 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java @@ -61,6 +61,7 @@ public class NioSelector implements Closeable { private final AtomicBoolean isClosed = new AtomicBoolean(false); private final CompletableFuture isRunningFuture = new CompletableFuture<>(); private final AtomicReference thread = new AtomicReference<>(null); + private final AtomicBoolean wokenUp = new AtomicBoolean(false); public NioSelector(EventHandler eventHandler) throws IOException { this(eventHandler, Selector.open()); @@ -153,7 +154,7 @@ void singleLoop() { preSelect(); long nanosUntilNextTask = taskScheduler.nanosUntilNextTask(System.nanoTime()); int ready; - if (nanosUntilNextTask == 0) { + if (wokenUp.getAndSet(false) || nanosUntilNextTask == 0) { ready = selector.selectNow(); } else { long millisUntilNextTask = TimeUnit.NANOSECONDS.toMillis(nanosUntilNextTask); @@ -221,13 +222,10 @@ void processKey(SelectionKey selectionKey) { if (selectionKey.isAcceptable()) { assert context instanceof ServerChannelContext : "Only server channels can receive accept events"; ServerChannelContext serverChannelContext = (ServerChannelContext) context; - int ops = selectionKey.readyOps(); - if ((ops & SelectionKey.OP_ACCEPT) != 0) { - try { - eventHandler.acceptChannel(serverChannelContext); - } catch (IOException e) { - eventHandler.acceptException(serverChannelContext, e); - } + try { + eventHandler.acceptChannel(serverChannelContext); + } catch (IOException e) { + eventHandler.acceptException(serverChannelContext, e); } } else { assert context instanceof SocketChannelContext : "Only sockets channels can receive non-accept events"; @@ -279,29 +277,36 @@ private void handleTask(Runnable task) { /** * Queues a write operation to be handled by the event loop. This can be called by any thread and is the - * api available for non-selector threads to schedule writes. + * api available for non-selector threads to schedule writes. When invoked from the selector thread the write will be executed + * right away. * * @param writeOperation to be queued */ public void queueWrite(WriteOperation writeOperation) { - queuedWrites.offer(writeOperation); - if (isOpen() == false) { - boolean wasRemoved = queuedWrites.remove(writeOperation); - if (wasRemoved) { - writeOperation.getListener().accept(null, new ClosedSelectorException()); - } + if (isOnCurrentThread()) { + writeToChannel(writeOperation); } else { - wakeup(); + queuedWrites.offer(writeOperation); + if (isOpen() == false) { + boolean wasRemoved = queuedWrites.remove(writeOperation); + if (wasRemoved) { + writeOperation.getListener().accept(null, new ClosedSelectorException()); + } + } else { + wakeup(); + } } } public void queueChannelClose(NioChannel channel) { ChannelContext context = channel.getContext(); assert context.getSelector() == this : "Must schedule a channel for closure with its selector"; - channelsToClose.offer(context); if (isOnCurrentThread() == false) { + channelsToClose.offer(context); ensureSelectorOpenForEnqueuing(channelsToClose, context); wakeup(); + } else { + closeChannel(context); } } @@ -313,9 +318,13 @@ public void queueChannelClose(NioChannel channel) { */ public void scheduleForRegistration(NioChannel channel) { ChannelContext context = channel.getContext(); - channelsToRegister.add(context); - ensureSelectorOpenForEnqueuing(channelsToRegister, context); - wakeup(); + if (isOnCurrentThread() == false) { + channelsToRegister.add(context); + ensureSelectorOpenForEnqueuing(channelsToRegister, context); + wakeup(); + } else { + registerChannel(context); + } } /** @@ -326,7 +335,7 @@ public void scheduleForRegistration(NioChannel channel) { * * @param writeOperation to be queued in a channel's buffer */ - public void writeToChannel(WriteOperation writeOperation) { + private void writeToChannel(WriteOperation writeOperation) { assertOnSelectorThread(); SocketChannelContext context = writeOperation.getChannel(); // If the channel does not currently have anything that is ready to flush, we should flush after @@ -380,8 +389,10 @@ private void cleanupPendingWrites() { } private void wakeup() { - // TODO: Do we need the wakeup optimizations that some other libraries use? - selector.wakeup(); + assert isOnCurrentThread() == false; + if (wokenUp.compareAndSet(false, true)) { + selector.wakeup(); + } } private void handleWrite(SocketChannelContext context) { @@ -414,30 +425,38 @@ private void attemptConnect(SocketChannelContext context, boolean connectEvent) private void setUpNewChannels() { ChannelContext newChannel; while ((newChannel = this.channelsToRegister.poll()) != null) { - assert newChannel.getSelector() == this : "The channel must be registered with the selector with which it was created"; - try { - if (newChannel.isOpen()) { - eventHandler.handleRegistration(newChannel); - if (newChannel instanceof SocketChannelContext) { - attemptConnect((SocketChannelContext) newChannel, false); - } - } else { - eventHandler.registrationException(newChannel, new ClosedChannelException()); + registerChannel(newChannel); + } + } + + private void registerChannel(ChannelContext newChannel) { + assert newChannel.getSelector() == this : "The channel must be registered with the selector with which it was created"; + try { + if (newChannel.isOpen()) { + eventHandler.handleRegistration(newChannel); + if (newChannel instanceof SocketChannelContext) { + attemptConnect((SocketChannelContext) newChannel, false); } - } catch (Exception e) { - eventHandler.registrationException(newChannel, e); + } else { + eventHandler.registrationException(newChannel, new ClosedChannelException()); } + } catch (Exception e) { + eventHandler.registrationException(newChannel, e); } } private void closePendingChannels() { ChannelContext channelContext; while ((channelContext = channelsToClose.poll()) != null) { - try { - eventHandler.handleClose(channelContext); - } catch (Exception e) { - eventHandler.closeException(channelContext, e); - } + closeChannel(channelContext); + } + } + + private void closeChannel(final ChannelContext channelContext) { + try { + eventHandler.handleClose(channelContext); + } catch (Exception e) { + eventHandler.closeException(channelContext, e); } } @@ -470,7 +489,7 @@ private void handleQueuedWrites() { * @param the object type */ private void ensureSelectorOpenForEnqueuing(ConcurrentLinkedQueue queue, O objectAdded) { - if (isOpen() == false && isOnCurrentThread() == false) { + if (isOpen() == false) { if (queue.remove(objectAdded)) { throw new IllegalStateException("selector is already closed"); } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java index 21de98e096c04..f77ccb17aef39 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -129,13 +129,7 @@ public void sendMessage(Object message, BiConsumer listener) { WriteOperation writeOperation = readWriteHandler.createWriteOperation(this, message, listener); - NioSelector selector = getSelector(); - if (selector.isOnCurrentThread() == false) { - selector.queueWrite(writeOperation); - return; - } - - selector.writeToChannel(writeOperation); + getSelector().queueWrite(writeOperation); } public void queueWriteOperation(WriteOperation writeOperation) { @@ -269,7 +263,7 @@ protected int readFromChannel(InboundChannelBuffer channelBuffer) throws IOExcep // Currently we limit to 64KB. This is a trade-off which means more syscalls, in exchange for less // copying. - private final int WRITE_LIMIT = 1 << 16; + private static final int WRITE_LIMIT = 1 << 16; protected int flushToChannel(FlushOperation flushOperation) throws IOException { ByteBuffer ioBuffer = getSelector().getIoBuffer(); diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java index 7a641315fe285..55d2e645cadee 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.nio; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.ESTestCase; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -108,14 +110,14 @@ public void testQueueChannelForClosed() throws IOException { } @SuppressWarnings({"unchecked", "rawtypes"}) - public void testCloseException() throws IOException { + public void testCloseException() throws IOException, InterruptedException { IOException ioException = new IOException(); NioChannel channel = mock(NioChannel.class); ChannelContext context = mock(ChannelContext.class); when(channel.getContext()).thenReturn(context); when(context.getSelector()).thenReturn(selector); - selector.queueChannelClose(channel); + executeOnNewThread(() -> selector.queueChannelClose(channel)); doThrow(ioException).when(eventHandler).handleClose(context); @@ -198,9 +200,10 @@ public void testIOExceptionWhileSelect() throws IOException { verify(eventHandler).selectorException(ioException); } - public void testSelectorClosedIfOpenAndEventLoopNotRunning() throws IOException { + public void testSelectorClosedIfOpenAndEventLoopNotRunning() throws Exception { when(rawSelector.isOpen()).thenReturn(true); - selector.close(); + executeOnNewThread(() -> selector.close()); + verify(rawSelector).close(); } @@ -222,8 +225,7 @@ public void testClosedServerChannelWillNotBeRegistered() { } public void testRegisterServerChannelFailsDueToException() throws Exception { - selector.scheduleForRegistration(serverChannel); - + executeOnNewThread(() -> selector.scheduleForRegistration(serverChannel)); ClosedChannelException closedChannelException = new ClosedChannelException(); doThrow(closedChannelException).when(eventHandler).handleRegistration(serverChannelContext); @@ -242,16 +244,18 @@ public void testClosedSocketChannelWillNotBeRegistered() throws Exception { verify(eventHandler, times(0)).handleConnect(channelContext); } - public void testRegisterSocketChannelFailsDueToException() throws Exception { - selector.scheduleForRegistration(channel); + public void testRegisterSocketChannelFailsDueToException() throws InterruptedException { + executeOnNewThread(() -> { + selector.scheduleForRegistration(channel); - ClosedChannelException closedChannelException = new ClosedChannelException(); - doThrow(closedChannelException).when(eventHandler).handleRegistration(channelContext); + ClosedChannelException closedChannelException = new ClosedChannelException(); + doThrow(closedChannelException).when(eventHandler).handleRegistration(channelContext); - selector.preSelect(); + selector.preSelect(); - verify(eventHandler).registrationException(channelContext, closedChannelException); - verify(eventHandler, times(0)).handleConnect(channelContext); + verify(eventHandler).registrationException(channelContext, closedChannelException); + verify(eventHandler, times(0)).handleConnect(channelContext); + }); } public void testAcceptEvent() throws IOException { @@ -292,17 +296,17 @@ public void testSuccessfullyRegisterChannelWillAttemptConnect() throws Exception } public void testQueueWriteWhenNotRunning() throws Exception { - selector.close(); - - selector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); - + executeOnNewThread(() -> { + selector.close(); + selector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); + }); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); } public void testQueueWriteChannelIsClosed() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - selector.queueWrite(writeOperation); + executeOnNewThread(() -> selector.queueWrite(writeOperation)); when(channelContext.isOpen()).thenReturn(false); selector.preSelect(); @@ -315,7 +319,7 @@ public void testQueueWriteSelectionKeyThrowsException() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); - selector.queueWrite(writeOperation); + executeOnNewThread(() -> selector.queueWrite(writeOperation)); when(channelContext.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); @@ -327,7 +331,7 @@ public void testQueueWriteSelectionKeyThrowsException() throws Exception { public void testQueueWriteSuccessful() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - selector.queueWrite(writeOperation); + executeOnNewThread(() -> selector.queueWrite(writeOperation)); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); @@ -343,7 +347,7 @@ public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { assertEquals(0, (selectionKey.interestOps() & SelectionKey.OP_WRITE)); when(channelContext.readyForFlush()).thenReturn(true); - selector.writeToChannel(writeOperation); + selector.queueWrite(writeOperation); verify(channelContext).queueWriteOperation(writeOperation); verify(eventHandler, times(0)).handleWrite(channelContext); @@ -357,7 +361,7 @@ public void testShouldFlushIfNoPendingFlushes() throws Exception { assertEquals(0, (selectionKey.interestOps() & SelectionKey.OP_WRITE)); when(channelContext.readyForFlush()).thenReturn(false); - selector.writeToChannel(writeOperation); + selector.queueWrite(writeOperation); verify(channelContext).queueWriteOperation(writeOperation); verify(eventHandler).handleWrite(channelContext); @@ -374,7 +378,7 @@ public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws when(channelContext.getSelectionKey()).thenReturn(selectionKey); when(channelContext.readyForFlush()).thenReturn(false); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); - selector.writeToChannel(writeOperation); + selector.queueWrite(writeOperation); verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(eventHandler, times(0)).handleWrite(channelContext); @@ -477,14 +481,17 @@ public void testWillCallPostHandleAfterChannelHandling() throws Exception { public void testCleanup() throws Exception { NioSocketChannel unregisteredChannel = mock(NioSocketChannel.class); SocketChannelContext unregisteredContext = mock(SocketChannelContext.class); + when(unregisteredContext.getSelector()).thenReturn(selector); when(unregisteredChannel.getContext()).thenReturn(unregisteredContext); - selector.scheduleForRegistration(channel); + executeOnNewThread(() -> selector.scheduleForRegistration(channel)); selector.preSelect(); - selector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); - selector.scheduleForRegistration(unregisteredChannel); + executeOnNewThread(() -> { + selector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); + selector.scheduleForRegistration(unregisteredChannel); + }); TestSelectionKey testSelectionKey = new TestSelectionKey(0); testSelectionKey.attach(channelContext); @@ -496,4 +503,20 @@ public void testCleanup() throws Exception { verify(eventHandler).handleClose(channelContext); verify(eventHandler).handleClose(unregisteredContext); } + + private static void executeOnNewThread(CheckedRunnable runnable) throws InterruptedException { + final Thread thread = new Thread(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + runnable.run(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }); + thread.start(); + thread.join(); + } } diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java index c0c203f728fda..5563ccc43063b 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java @@ -175,7 +175,7 @@ public void testSendMessageFromSameThreadIsQueuedInChannel() { when(readWriteHandler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); context.sendMessage(buffers, listener); - verify(selector).writeToChannel(writeOpCaptor.capture()); + verify(selector).queueWrite(writeOpCaptor.capture()); WriteOperation writeOp = writeOpCaptor.getValue(); assertSame(writeOperation, writeOp); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index f095b766ee1d5..a655f42a36c26 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -83,6 +83,7 @@ import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter; +import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenFilter; @@ -110,6 +111,7 @@ import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -237,7 +239,7 @@ public Map> getTokenFilters() { filters.put("dutch_stem", DutchStemTokenFilterFactory::new); filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); filters.put("edgeNGram", EdgeNGramTokenFilterFactory::new); - filters.put("elision", ElisionTokenFilterFactory::new); + filters.put("elision", requiresAnalysisSettings(ElisionTokenFilterFactory::new)); filters.put("fingerprint", FingerprintTokenFilterFactory::new); filters.put("flatten_graph", FlattenGraphTokenFilterFactory::new); filters.put("french_stem", FrenchStemTokenFilterFactory::new); @@ -320,9 +322,6 @@ public Map> getTokenizers() { @Override public List getPreBuiltAnalyzerProviderFactories() { List analyzers = new ArrayList<>(); - // TODO remove in 8.0 - analyzers.add(new PreBuiltAnalyzerProviderFactory("standard_html_strip", CachingStrategy.ELASTICSEARCH, - () -> new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET))); analyzers.add(new PreBuiltAnalyzerProviderFactory("pattern", CachingStrategy.ELASTICSEARCH, () -> new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET))); @@ -393,10 +392,6 @@ public List getPreConfiguredTokenFilters() { input -> new CommonGramsFilter(input, CharArraySet.EMPTY_SET))); filters.add(PreConfiguredTokenFilter.singleton("czech_stem", false, CzechStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("decimal_digit", true, DecimalDigitFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("delimited_payload_filter", false, input -> - new DelimitedPayloadTokenFilter(input, - DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, - DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER))); filters.add(PreConfiguredTokenFilter.singleton("delimited_payload", false, input -> new DelimitedPayloadTokenFilter(input, DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, @@ -455,13 +450,15 @@ public List getPreConfiguredTokenFilters() { | WordDelimiterFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null))); - filters.add(PreConfiguredTokenFilter.singleton("word_delimiter_graph", false, false, input -> - new WordDelimiterGraphFilter(input, + filters.add(PreConfiguredTokenFilter.singletonWithVersion("word_delimiter_graph", false, false, (input, version) -> { + boolean adjustOffsets = version.onOrAfter(Version.V_7_3_0); + return new WordDelimiterGraphFilter(input, adjustOffsets, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS - | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null))); + | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null); + })); return filters; } @@ -475,8 +472,12 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram", - () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE))); + tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("edge_ngram", (version) -> { + if (version.onOrAfter(Version.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API @@ -485,8 +486,12 @@ public List getPreConfiguredTokenizers() { // Temporary shim for aliases. TODO deprecate after they are moved tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.singleton("edgeNGram", - () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE))); + tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("edgeNGram", (version) -> { + if (version.onOrAfter(Version.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); return tokenizers; diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java index 52cb69952b836..39d042caa8c25 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java @@ -36,6 +36,9 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implem ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); this.articles = Analysis.parseArticles(env, settings); + if (this.articles == null) { + throw new IllegalArgumentException("elision filter requires [articles] or [articles_path] setting"); + } } @Override diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java deleted file mode 100644 index a35a0ea2a4a0b..0000000000000 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.analysis.common; - -import org.apache.lucene.analysis.CharArraySet; -import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.StopFilter; -import org.apache.lucene.analysis.StopwordAnalyzerBase; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.en.EnglishAnalyzer; -import org.apache.lucene.analysis.standard.StandardTokenizer; - -public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { - - /** - * @deprecated use {@link StandardHtmlStripAnalyzer#StandardHtmlStripAnalyzer(CharArraySet)} instead - */ - @Deprecated - public StandardHtmlStripAnalyzer() { - super(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); - } - /** - * @deprecated in 6.5, can not create in 7.0, and we remove this in 8.0 - */ - @Deprecated - StandardHtmlStripAnalyzer(CharArraySet stopwords) { - super(stopwords); - } - - @Override - protected TokenStreamComponents createComponents(final String fieldName) { - final Tokenizer src = new StandardTokenizer(); - TokenStream tok = new LowerCaseFilter(src); - if (!stopwords.isEmpty()) { - tok = new StopFilter(tok, stopwords); - } - return new TokenStreamComponents(src, tok); - } - -} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java index 75d4eca4254f8..5d6135549b882 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.Analysis; +import org.elasticsearch.index.analysis.AnalysisMode; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.CustomAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; @@ -50,6 +51,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { private final boolean lenient; protected final Settings settings; protected final Environment environment; + private final boolean updateable; SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { @@ -65,9 +67,15 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { this.expand = settings.getAsBoolean("expand", true); this.lenient = settings.getAsBoolean("lenient", false); this.format = settings.get("format", ""); + this.updateable = settings.getAsBoolean("updateable", false); this.environment = env; } + @Override + public AnalysisMode getAnalysisMode() { + return this.updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL; + } + @Override public TokenStream create(TokenStream tokenStream) { throw new IllegalStateException("Call createPerAnalyzerSynonymFactory to specialize this factory for an analysis chain first"); @@ -98,6 +106,11 @@ public TokenFilterFactory getSynonymFilter() { // which doesn't support stacked input tokens return IDENTITY_FILTER; } + + @Override + public AnalysisMode getAnalysisMode() { + return updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL; + } }; } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java new file mode 100644 index 0000000000000..0172f7cbc2657 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.Collections; + +public class EdgeNGramTokenizerTests extends ESTokenStreamTestCase { + + private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + return new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings); + } + + public void testPreConfiguredTokenizer() throws IOException { + + // Before 7.3 we return ngrams of length 1 only + { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, + VersionUtils.getPreviousVersion(Version.V_7_3_0)); + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t"}); + } + } + + // Check deprecated name as well + { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, + VersionUtils.getPreviousVersion(Version.V_7_3_0)); + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edgeNGram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t"}); + } + } + + // Afterwards, we return ngrams of length 1 and 2, to match the default factory settings + { + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edge_ngram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t", "te"}); + } + } + + // Check deprecated name as well + { + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edgeNGram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[]{"t", "te"}); + + } + } + + } + +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ElisionFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ElisionFilterFactoryTests.java new file mode 100644 index 0000000000000..dbfd49d5649d5 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ElisionFilterFactoryTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.analysis.AnalysisTestsHelper; +import org.elasticsearch.test.ESTokenStreamTestCase; + +import java.io.IOException; + +public class ElisionFilterFactoryTests extends ESTokenStreamTestCase { + + public void testElisionFilterWithNoArticles() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.filter.elision.type", "elision") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin())); + + assertEquals("elision filter requires [articles] or [articles_path] setting", e.getMessage()); + } + +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index d799674f231a1..c8e3699ea840d 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -20,14 +20,24 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisTestsHelper; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; +import java.util.Collections; public class WordDelimiterGraphTokenFilterFactoryTests extends BaseWordDelimiterTokenFilterFactoryTestCase { @@ -107,4 +117,51 @@ public void testAdjustingOffsets() throws IOException { assertTokenStreamContents(tokenFilter.create(tokenizer), expected, expectedStartOffsets, expectedEndOffsets, null, expectedIncr, expectedPosLen, null); } + + public void testPreconfiguredFilter() throws IOException { + // Before 7.3 we don't adjust offsets + { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, VersionUtils.getPreviousVersion(Version.V_7_3_0))) + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + try (IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings)) { + + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "h100", new String[]{"h", "100"}, new int[]{ 0, 0 }, new int[]{ 4, 4 }); + + } + } + + // Afger 7.3 we do adjust offsets + { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + try (IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings)) { + + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "h100", new String[]{"h", "100"}, new int[]{ 0, 1 }, new int[]{ 1, 4 }); + + } + } + } } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml index 15de6fe664786..2904cc3e95b58 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml @@ -67,15 +67,6 @@ - length: { tokens: 1 } - match: { tokens.0.token: a1 b2 c3 d4 } ---- -"standard_html_strip": - - do: - catch: /\[standard_html_strip\] analyzer is not supported for new indices, use a custom analyzer using \[standard\] tokenizer and \[html_strip\] char_filter, plus \[lowercase\] filter/ - indices.analyze: - body: - text: - analyzer: standard_html_strip - --- "pattern": - do: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index 3486b9defd9d2..92d0dce7b6201 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -587,6 +587,20 @@ - length: { tokens: 1 } - match: { tokens.0.token: avion } + - do: + catch: bad_request + indices.create: + index: test2 + body: + settings: + analysis: + filter: + my_elision: + type: elision + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } + - match: { error.reason: "elision filter requires [articles] or [articles_path] setting" } + --- "stemmer": - do: diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 1141a4cf7e891..f6eadab8014d2 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.ingest.common; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.StreamableResponseActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.node.NodeClient; @@ -45,7 +45,7 @@ import static org.elasticsearch.ingest.common.IngestCommonPlugin.GROK_PATTERNS; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class GrokProcessorGetAction extends Action { +public class GrokProcessorGetAction extends StreamableResponseActionType { static final GrokProcessorGetAction INSTANCE = new GrokProcessorGetAction(); static final String NAME = "cluster:admin/ingest/processor/grok/get"; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java index a9a44d0471586..a096a89951e7c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable; -public class MultiSearchTemplateAction extends Action { +public class MultiSearchTemplateAction extends ActionType { public static final MultiSearchTemplateAction INSTANCE = new MultiSearchTemplateAction(); public static final String NAME = "indices:data/read/msearch/template"; @@ -31,11 +31,6 @@ private MultiSearchTemplateAction() { super(NAME); } - @Override - public MultiSearchTemplateResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return MultiSearchTemplateResponse::new; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java index 5d905ec39e1ab..2e8417c993990 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable; -public class SearchTemplateAction extends Action { +public class SearchTemplateAction extends ActionType { public static final SearchTemplateAction INSTANCE = new SearchTemplateAction(); public static final String NAME = "indices:data/read/search/template"; @@ -31,11 +31,6 @@ private SearchTemplateAction() { super(NAME); } - @Override - public SearchTemplateResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return SearchTemplateResponse::new; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 96a03fe5914bd..92cd4f575feec 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -23,14 +23,19 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.inject.Module; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.painless.action.PainlessContextAction; import org.elasticsearch.painless.action.PainlessExecuteAction; import org.elasticsearch.painless.spi.PainlessExtension; @@ -45,7 +50,10 @@ import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; import java.util.ArrayList; import java.util.Arrays; @@ -103,8 +111,13 @@ public ScriptEngine getScriptEngine(Settings settings, Collection createGuiceModules() { - return Collections.singleton(b -> b.bind(PainlessScriptEngine.class).toInstance(painlessScriptEngine.get())); + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + // this is a hack to bind the painless script engine in guice (all components are added to guice), so that + // the painless context api. this is a temporary measure until transport actions do no require guice + return Collections.singletonList(painlessScriptEngine.get()); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java index c947de1fd82d3..5e941a2d0e2e0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.painless.action; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -64,7 +64,7 @@ * retrieves all available information about the API for this specific context * */ -public class PainlessContextAction extends Action { +public class PainlessContextAction extends ActionType { public static final PainlessContextAction INSTANCE = new PainlessContextAction(); private static final String NAME = "cluster:admin/scripts/painless/context"; @@ -75,11 +75,6 @@ private PainlessContextAction() { super(NAME); } - @Override - public Response newResponse() { - throw new UnsupportedOperationException(); - } - @Override public Writeable.Reader getResponseReader() { return Response::new; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 32b61f9c38ba4..d0a5d5cc9611b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -30,7 +30,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; @@ -89,7 +89,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -public class PainlessExecuteAction extends Action { +public class PainlessExecuteAction extends ActionType { public static final PainlessExecuteAction INSTANCE = new PainlessExecuteAction(); private static final String NAME = "cluster:admin/scripts/painless/execute"; @@ -99,8 +99,8 @@ private PainlessExecuteAction() { } @Override - public Response newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + public Writeable.Reader getResponseReader() { + return Response::new; } public static class Request extends SingleShardRequest implements ToXContentObject { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTimeTests.java similarity index 89% rename from modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java rename to modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTimeTests.java index 58357cce3ac96..38ceae74e053a 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTimeTests.java @@ -22,7 +22,7 @@ import java.time.ZoneId; import java.time.ZonedDateTime; -public class DateTests extends ScriptTestCase { +public class DateTimeTests extends ScriptTestCase { public void testLongToZonedDateTime() { assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( @@ -177,4 +177,18 @@ public void compareZonedDateTimes() { "return zdt1.isAfter(zdt2);" )); } + + public void testTimeZone() { + assertEquals(ZonedDateTime.of(1983, 10, 13, 15, 15, 30, 0, ZoneId.of("America/Los_Angeles")), exec( + "ZonedDateTime utc = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "return utc.withZoneSameInstant(ZoneId.of('America/Los_Angeles'));")); + + assertEquals("Thu, 13 Oct 1983 15:15:30 -0700", exec( + "String gmtString = 'Thu, 13 Oct 1983 22:15:30 GMT';" + + "ZonedDateTime gmtZdt = ZonedDateTime.parse(gmtString," + + "DateTimeFormatter.RFC_1123_DATE_TIME);" + + "ZonedDateTime pstZdt =" + + "gmtZdt.withZoneSameInstant(ZoneId.of('America/Los_Angeles'));" + + "return pstZdt.format(DateTimeFormatter.RFC_1123_DATE_TIME);")); + } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 5b4dc61090042..a86f93ce40549 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -296,9 +296,17 @@ private static BytesReference randomSource(Set usedFields) { } } + /** + * Test that this query is never cacheable + */ @Override - protected boolean isCacheable(PercolateQueryBuilder queryBuilder) { - return false; + public void testCacheability() throws IOException { + PercolateQueryBuilder queryBuilder = createTestQueryBuilder(); + QueryShardContext context = createShardContext(); + assert context.isCacheable(); + QueryBuilder rewritten = rewriteQuery(queryBuilder, new QueryShardContext(context)); + assertNotNull(rewritten.toQuery(context)); + assertFalse("query should not be cacheable: " + queryBuilder.toString(), context.isCacheable()); } @Override diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java index 54e89fe0e98b8..07de8c8a22cad 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; /** - * Action for explaining evaluating search ranking results. + * ActionType for explaining evaluating search ranking results. */ -public class RankEvalAction extends Action { +public class RankEvalAction extends StreamableResponseActionType { public static final RankEvalAction INSTANCE = new RankEvalAction(); public static final String NAME = "indices:data/read/rank_eval"; diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java index 4108a817f046e..1bfb576c37911 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequestBuilder.java @@ -19,13 +19,13 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; public class RankEvalRequestBuilder extends ActionRequestBuilder { - public RankEvalRequestBuilder(ElasticsearchClient client, Action action, + public RankEvalRequestBuilder(ElasticsearchClient client, ActionType action, RankEvalRequest request) { super(client, action, request); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index 8111aac39451b..63451abb7ccd7 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; @@ -38,7 +38,7 @@ public abstract class AbstractBaseReindexRestHandler< Request extends AbstractBulkByScrollRequest, - A extends Action + A extends ActionType > extends BaseRestHandler { private final A action; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java index 240ccde350532..3124d9cb0be0c 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -19,11 +19,10 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -39,7 +38,7 @@ */ public abstract class AbstractBulkByQueryRestHandler< Request extends AbstractBulkByScrollRequest, - A extends Action> extends AbstractBaseReindexRestHandler { + A extends ActionType> extends AbstractBaseReindexRestHandler { protected AbstractBulkByQueryRestHandler(Settings settings, A action) { super(settings, action); @@ -53,7 +52,7 @@ protected void parseInternalRequest(Request internal, RestRequest restRequest, SearchRequest searchRequest = internal.getSearchRequest(); try (XContentParser parser = extractRequestSpecificFields(restRequest, bodyConsumers)) { - RestSearchAction.parseSearchRequest(searchRequest, restRequest, parser, size -> setMaxDocsFromSearchSize(internal, size)); + RestSearchAction.parseSearchRequest(searchRequest, restRequest, parser, size -> failOnSizeSpecified()); } searchRequest.source().size(restRequest.paramAsInt("scroll_size", searchRequest.source().size())); @@ -96,8 +95,7 @@ private XContentParser extractRequestSpecificFields(RestRequest restRequest, } } - private void setMaxDocsFromSearchSize(Request request, int size) { - LoggingDeprecationHandler.INSTANCE.usedDeprecatedName("size", "max_docs"); - setMaxDocsValidateIdentical(request, size); + private static void failOnSizeSpecified() { + throw new IllegalArgumentException("invalid parameter [size], use [max_docs] instead"); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java index dfb8deac58c3b..6df73414e81f3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -61,7 +61,7 @@ private BulkByScrollParallelizationHelper() {} static > void startSlicedAction( Request request, BulkByScrollTask task, - Action action, + ActionType action, ActionListener listener, Client client, DiscoveryNode node, @@ -85,7 +85,7 @@ static > void startSlicedAc private static > void sliceConditionally( Request request, BulkByScrollTask task, - Action action, + ActionType action, ActionListener listener, Client client, DiscoveryNode node, @@ -118,7 +118,7 @@ private static int countSlicesBasedOnShards(ClusterSearchShardsResponse response private static > void sendSubRequests( Client client, - Action action, + ActionType action, String localNodeId, BulkByScrollTask task, Request request, diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java index 3cb2c60c62373..cf04d6d856ddb 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleAction.java @@ -19,11 +19,11 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.common.io.stream.Writeable; -public class RethrottleAction extends Action { +public class RethrottleAction extends ActionType { public static final RethrottleAction INSTANCE = new RethrottleAction(); public static final String NAME = "cluster:admin/reindex/rethrottle"; @@ -31,11 +31,6 @@ private RethrottleAction() { super(NAME); } - @Override - public ListTasksResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return ListTasksResponse::new; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java index 25407e6dc93d5..648eb6e441b1a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RethrottleRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.tasks.TasksRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -29,7 +29,7 @@ */ public class RethrottleRequestBuilder extends TasksRequestBuilder { public RethrottleRequestBuilder(ElasticsearchClient client, - Action action) { + ActionType action) { super(client, action, new RethrottleRequest()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index bdedc65b7a6d3..3d28ce3bcbc96 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -761,7 +761,7 @@ private class MyMockClient extends FilterClient { @Override @SuppressWarnings("unchecked") protected - void doExecute(Action action, Request request, ActionListener listener) { + void doExecute(ActionType action, Request request, ActionListener listener) { if (false == expectedHeaders.equals(threadPool().getThreadContext().getHeaders())) { listener.onFailure( new RuntimeException("Expected " + expectedHeaders + " but got " + threadPool().getThreadContext().getHeaders())); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index 7b8e46471e61e..d401efaae4bf0 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -107,11 +107,7 @@ private void randomRequest(AbstractBulkByScrollRequest request) { request.getSearchRequest().indices("test"); request.getSearchRequest().source().size(between(1, 1000)); if (randomBoolean()) { - if (randomBoolean()) { - request.setMaxDocs(between(1, Integer.MAX_VALUE)); - } else { - request.setSize(between(1, Integer.MAX_VALUE)); - } + request.setMaxDocs(between(1, Integer.MAX_VALUE)); } request.setAbortOnVersionConflict(random().nextBoolean()); request.setRefresh(rarely()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 844c6b8351993..0ab100a856fc1 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BackoffPolicy; -import org.elasticsearch.index.reindex.ScrollableHitSource.Response; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.HeapBufferedAsyncResponseConsumer; import org.elasticsearch.client.RestClient; @@ -53,6 +52,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.reindex.ScrollableHitSource.Response; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; @@ -123,41 +123,20 @@ public void tearDown() throws Exception { } public void testLookupRemoteVersion() throws Exception { + assertLookupRemoteVersion(Version.fromString("0.20.5"), "main/0_20_5.json"); + assertLookupRemoteVersion(Version.fromString("0.90.13"), "main/0_90_13.json"); + assertLookupRemoteVersion(Version.fromString("1.7.5"), "main/1_7_5.json"); + assertLookupRemoteVersion(Version.fromId(2030399), "main/2_3_3.json"); + // assert for V_5_0_0 (no qualifier) since we no longer consider qualifier in Version since 7 + assertLookupRemoteVersion(Version.fromId(5000099), "main/5_0_0_alpha_3.json"); + // V_5_0_0 since we no longer consider qualifier in Version + assertLookupRemoteVersion(Version.fromId(5000099), "main/with_unknown_fields.json"); + } + + private void assertLookupRemoteVersion(Version expected, String s) throws Exception { AtomicBoolean called = new AtomicBoolean(); - sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/0_20_5.json").lookupRemoteVersion(v -> { - assertEquals(Version.fromString("0.20.5"), v); - called.set(true); - }); - assertTrue(called.get()); - called.set(false); - sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/0_90_13.json").lookupRemoteVersion(v -> { - assertEquals(Version.fromString("0.90.13"), v); - called.set(true); - }); - assertTrue(called.get()); - called.set(false); - sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/1_7_5.json").lookupRemoteVersion(v -> { - assertEquals(Version.fromString("1.7.5"), v); - called.set(true); - }); - assertTrue(called.get()); - called.set(false); - sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/2_3_3.json").lookupRemoteVersion(v -> { - assertEquals(Version.fromId(2030399), v); - called.set(true); - }); - assertTrue(called.get()); - called.set(false); - sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> { - // assert for V_5_0_0 (no qualifier) since we no longer consider qualifier in Version since 7 - assertEquals(Version.fromId(5000099), v); - called.set(true); - }); - assertTrue(called.get()); - called.set(false); - sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/with_unknown_fields.json").lookupRemoteVersion(v -> { - // V_5_0_0 since we no longer consider qualifier in Version - assertEquals(Version.fromId(5000099), v); + sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, s).lookupRemoteVersion(v -> { + assertEquals(expected, v); called.set(true); }); assertTrue(called.get()); diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml index c8be7eedd3a2b..1763baebe0277 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml @@ -279,52 +279,6 @@ - match: {count: 1} ---- -"Limit by size": - - skip: - version: " - 7.2.99" - reason: "deprecation warnings only emitted on 7.3+" - features: warnings - - - do: - index: - index: twitter - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: twitter - id: 2 - body: { "user": "kimchy" } - - do: - indices.refresh: {} - - - do: - warnings: - - Deprecated field [size] used, expected [max_docs] instead - delete_by_query: - index: twitter - size: 1 - body: - query: - match_all: {} - - - match: {deleted: 1} - - match: {version_conflicts: 0} - - match: {batches: 1} - - match: {failures: []} - - match: {throttled_millis: 0} - - gte: { took: 0 } - - - do: - indices.refresh: {} - - - do: - count: - index: twitter - - - match: {count: 1} - --- "Limit by size pre 7.3": - skip: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml index 2b1a9514aef19..45de10b370669 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml @@ -30,22 +30,6 @@ query: match_all: {} ---- -"invalid size fails": - - do: - index: - index: test - id: 1 - body: { "text": "test" } - - do: - catch: /\[max_docs\] parameter cannot be negative, found \[-4\]/ - delete_by_query: - index: test - size: -4 - body: - query: - match_all: {} - --- "invalid max_docs fails": - skip: @@ -66,27 +50,6 @@ query: match_all: {} ---- -"both max_docs and size fails": - - skip: - version: " - 7.2.99" - reason: "max_docs introduced in 7.3.0" - - - do: - index: - index: test - id: 1 - body: { "text": "test" } - - do: - catch: /\[max_docs\] set to two different values \[4\] and \[5\]/ - delete_by_query: - index: test - size: 4 - max_docs: 5 - body: - query: - match_all: {} - --- "invalid scroll_size fails": - do: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml index 2c58f122bfeca..44971c49c358a 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yml @@ -95,21 +95,26 @@ conflicts: cat --- -"invalid size fails": +"specifying size fails": + - skip: + version: " - 7.99.99" + reason: "size supported until 8" + - do: index: - index: test - id: 1 - body: { "text": "test" } + index: test + id: 1 + body: { "text": "test" } + - do: - catch: /\[max_docs\] parameter cannot be negative, found \[-4\]/ + catch: /invalid parameter \[size\], use \[max_docs\] instead/ reindex: body: source: index: test dest: index: dest - size: -4 + size: 1 --- "invalid max_docs in body fails": @@ -153,28 +158,6 @@ dest: index: dest ---- -"inconsistent max_docs and size fails": - - skip: - version: " - 7.2.99" - reason: "max_docs introduced in 7.3.0" - - - do: - index: - index: test - id: 1 - body: { "text": "test" } - - do: - catch: /\[max_docs\] set to two different values \[4\] and \[5\]/ - reindex: - body: - source: - index: test - dest: - index: dest - size: 4 - max_docs: 5 - --- "inconsistent max_docs in body and max_docs in URL fails": - skip: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/30_search.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/30_search.yml index 908ab55673c56..709b9c0d17340 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/30_search.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/30_search.yml @@ -31,52 +31,6 @@ index: target - match: { hits.total: 1 } ---- -"Sorting and size combined": - - skip: - version: " - 7.2.99" - reason: "deprecation warnings only emitted on 7.3+" - features: warnings - - - do: - index: - index: test - id: 1 - body: { "order": 1 } - - do: - index: - index: test - id: 2 - body: { "order": 2 } - - do: - indices.refresh: {} - - - do: - warnings: - - Deprecated field [size] used, expected [max_docs] instead - reindex: - refresh: true - body: - size: 1 - source: - index: test - sort: order - dest: - index: target - - - do: - search: - rest_total_hits_as_int: true - index: target - - match: { hits.total: 1 } - - - do: - search: - rest_total_hits_as_int: true - index: target - q: order:1 - - match: { hits.total: 1 } - --- "Sorting and size combined pre 7.3": - skip: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yml index dccf58a51b1ae..8354fc0aaf322 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yml @@ -217,72 +217,6 @@ metric: search - match: {indices.source.total.search.open_contexts: 0} ---- -"Reindex from remote with size": - - skip: - version: "7.3.0 - " - reason: "7.3 should use max_docs or get deprecation warning" - - - do: - index: - index: source - id: 1 - body: { "text": "test" } - refresh: true - - do: - index: - index: source - id: 2 - body: { "text": "test" } - refresh: true - - # Fetch the http host. We use the host of the master because we know there will always be a master. - - do: - cluster.state: {} - - set: { master_node: master } - - do: - nodes.info: - metric: [ http ] - - is_true: nodes.$master.http.publish_address - - set: {nodes.$master.http.publish_address: host} - - do: - reindex: - refresh: true - body: - size: 1 - source: - remote: - host: http://${host} - index: source - dest: - index: dest - - match: {created: 1} - - match: {updated: 0} - - match: {version_conflicts: 0} - - match: {batches: 1} - - match: {failures: []} - - match: {throttled_millis: 0} - - gte: { took: 0 } - - is_false: task - - is_false: deleted - - - do: - search: - rest_total_hits_as_int: true - index: dest - body: - query: - match: - text: test - - match: {hits.total: 1} - - # Make sure reindex closed all the scroll contexts - - do: - indices.stats: - index: source - metric: search - - match: {indices.source.total.search.open_contexts: 0} - --- "Reindex from remote with max_docs": - skip: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml index 67ee48d414c1b..2a3696a4005c7 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml @@ -217,39 +217,6 @@ - match: {failures: []} - gte: { took: 0 } ---- -"Limit by size": - - skip: - version: " - 7.2.99" - reason: "deprecation warnings only emitted on 7.3+" - features: warnings - - - do: - index: - index: twitter - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: twitter - id: 2 - body: { "user": "kimchy" } - - do: - indices.refresh: {} - - - do: - warnings: - - Deprecated field [size] used, expected [max_docs] instead - update_by_query: - index: twitter - size: 1 - - match: {updated: 1} - - match: {version_conflicts: 0} - - match: {batches: 1} - - match: {failures: []} - - match: {throttled_millis: 0} - - gte: { took: 0 } - --- "Limit by size pre 7.3": - skip: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yml index ddc09fe9d9a61..21644b3932984 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yml @@ -11,19 +11,6 @@ index: test conflicts: cat ---- -"invalid size fails": - - do: - index: - index: test - id: 1 - body: { "text": "test" } - - do: - catch: /\[max_docs\] parameter cannot be negative, found \[-4\]/ - update_by_query: - index: test - size: -4 - --- "invalid max_docs in URL fails": - skip: @@ -59,27 +46,6 @@ body: max_docs: -4 ---- -"inconsistent max_docs and size fails": - - skip: - version: " - 7.2.99" - reason: "max_docs introduced in 7.3.0" - - - do: - index: - index: test - id: 1 - body: { "text": "test" } - - do: - catch: /\[max_docs\] set to two different values \[4\] and \[5\]/ - delete_by_query: - index: test - size: 4 - max_docs: 5 - body: - query: - match_all: {} - --- "inconsistent max_docs in body and max_docs in URL fails": - skip: diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 8397549f384fe..218c9aca2920a 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -22,6 +22,8 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture import org.elasticsearch.gradle.test.RestIntegTestTask +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -74,9 +76,9 @@ integTest.enabled = false setting 'discovery.seed_providers', 'ec2' setting 'network.host', '_ec2_' - setting 'discovery.ec2.endpoint', { "http://${-> fixture.addressAndPort}" } + setting 'discovery.ec2.endpoint', { "http://${-> fixture.addressAndPort}" }, IGNORE_VALUE - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${-> fixture.addressAndPort}" } + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${-> fixture.addressAndPort}" }, IGNORE_VALUE } } @@ -103,7 +105,7 @@ ec2FixtureContainerCredentials.env 'ACTIVATE_CONTAINER_CREDENTIALS', true testClusters.integTestContainerCredentials { environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', - { "http://${-> tasks.findByName("ec2FixtureContainerCredentials").addressAndPort}/ecs_credentials_endpoint" } + { "http://${-> tasks.findByName("ec2FixtureContainerCredentials").addressAndPort}/ecs_credentials_endpoint" }, IGNORE_VALUE } // Extra config for InstanceProfile diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 16a65c762f6f7..b0231ff7f3c90 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -17,9 +17,12 @@ * under the License. */ + import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -54,13 +57,13 @@ testClusters.integTest { numberOfNodes = gceNumberOfNodes plugin file(project(':plugins:discovery-gce').bundlePlugin.archiveFile) // use gce fixture for Auth calls instead of http://metadata.google.internal - environment 'GCE_METADATA_HOST', { "http://${gceFixture.addressAndPort}" } + environment 'GCE_METADATA_HOST', { "http://${gceFixture.addressAndPort}" }, IGNORE_VALUE // allows to configure hidden settings (`cloud.gce.host` and `cloud.gce.root_url`) systemProperty 'es.allow_reroute_gce_settings', 'true' setting 'discovery.seed_providers', 'gce' // use gce fixture for metadata server calls instead of http://metadata.google.internal - setting 'cloud.gce.host', { "http://${gceFixture.addressAndPort}" } + setting 'cloud.gce.host', { "http://${gceFixture.addressAndPort}" }, IGNORE_VALUE // use gce fixture for API calls instead of https://www.googleapis.com - setting 'cloud.gce.root_url', { "http://${gceFixture.addressAndPort}" } + setting 'cloud.gce.root_url', { "http://${gceFixture.addressAndPort}" }, IGNORE_VALUE } diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle b/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle index 0c2f68d34836b..5f96fd9dc305b 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle +++ b/plugins/repository-azure/qa/microsoft-azure-storage/build.gradle @@ -17,9 +17,13 @@ * under the License. */ + import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture +import static org.elasticsearch.gradle.PropertyNormalization.DEFAULT +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -79,9 +83,9 @@ testClusters.integTest { // Use a closure on the string to delay evaluation until tests are executed. The endpoint_suffix is used // in a hacky way to change the protocol and endpoint. We must fix that. setting 'azure.client.integration_test.endpoint_suffix', - { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${azureStorageFixture.addressAndPort }" } + { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${azureStorageFixture.addressAndPort }" }, IGNORE_VALUE String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0) - setting 'thread_pool.repository_azure.max', (Math.abs(Long.parseUnsignedLong(firstPartOfSeed, 16) % 10) + 1).toString() + setting 'thread_pool.repository_azure.max', (Math.abs(Long.parseUnsignedLong(firstPartOfSeed, 16) % 10) + 1).toString(), System.getProperty('ignore.tests.seed') == null ? DEFAULT : IGNORE_VALUE } else { println "Using an external service to test the repository-azure plugin" } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index ef4cd45bfcc6f..12113542dee44 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -39,8 +39,6 @@ import java.net.HttpURLConnection; import java.net.URISyntaxException; import java.nio.file.NoSuchFileException; -import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; @@ -130,56 +128,26 @@ public void deleteBlob(String blobName) throws IOException { @Override public void delete() throws IOException { - PlainActionFuture result = PlainActionFuture.newFuture(); - asyncDelete(result); try { - result.actionGet(); - } catch (Exception e) { - throw new IOException("Exception during container delete", e); - } - } - - private void asyncDelete(ActionListener listener) throws IOException { - final Collection childContainers = children().values(); - if (childContainers.isEmpty() == false) { - final ActionListener childListener = new GroupedActionListener<>( - ActionListener.wrap(v -> asyncDeleteBlobsIgnoringIfNotExists( - new ArrayList<>(listBlobs().keySet()), listener), listener::onFailure), childContainers.size()); - for (BlobContainer container : childContainers) { - threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME).submit(new ActionRunnable<>(childListener) { - @Override - protected void doRun() throws Exception { - ((AzureBlobContainer) container).asyncDelete(childListener); - } - }); - } - } else { - asyncDeleteBlobsIgnoringIfNotExists(new ArrayList<>(listBlobs().keySet()), listener); + blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME)); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); } } @Override public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { final PlainActionFuture result = PlainActionFuture.newFuture(); - asyncDeleteBlobsIgnoringIfNotExists(blobNames, result); - try { - result.actionGet(); - } catch (Exception e) { - throw new IOException("Exception during bulk delete", e); - } - } - - private void asyncDeleteBlobsIgnoringIfNotExists(List blobNames, ActionListener callback) { if (blobNames.isEmpty()) { - callback.onResponse(null); + result.onResponse(null); } else { final GroupedActionListener listener = - new GroupedActionListener<>(ActionListener.map(callback, v -> null), blobNames.size()); + new GroupedActionListener<>(ActionListener.map(result, v -> null), blobNames.size()); final ExecutorService executor = threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME); // Executing deletes in parallel since Azure SDK 8 is using blocking IO while Azure does not provide a bulk delete API endpoint // TODO: Upgrade to newer non-blocking Azure SDK 11 and execute delete requests in parallel that way. for (String blobName : blobNames) { - executor.submit(new ActionRunnable<>(listener) { + executor.execute(new ActionRunnable<>(listener) { @Override protected void doRun() throws IOException { deleteBlobIgnoringIfNotExists(blobName); @@ -188,6 +156,11 @@ protected void doRun() throws IOException { }); } } + try { + result.actionGet(); + } catch (Exception e) { + throw new IOException("Exception during bulk delete", e); + } } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 5d3f6c8570374..a7d9bb93a5125 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -36,6 +36,7 @@ import java.nio.file.FileAlreadyExistsException; import java.util.Collections; import java.util.Map; +import java.util.concurrent.Executor; import java.util.function.Function; import java.util.stream.Collectors; @@ -91,6 +92,10 @@ public void deleteBlob(String blob) throws URISyntaxException, StorageException service.deleteBlob(clientName, container, blob); } + public void deleteBlobDirectory(String path, Executor executor) throws URISyntaxException, StorageException, IOException { + service.deleteBlobDirectory(clientName, container, path, executor); + } + public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { return service.getInputStream(clientName, container, blob); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index dabb0fd17cad1..f4ee7b9dbcad9 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -39,6 +39,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; @@ -47,6 +48,7 @@ import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import java.io.IOException; import java.io.InputStream; @@ -55,11 +57,16 @@ import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import static java.util.Collections.emptyMap; @@ -185,6 +192,50 @@ public void deleteBlob(String account, String container, String blob) throws URI }); } + void deleteBlobDirectory(String account, String container, String path, Executor executor) + throws URISyntaxException, StorageException, IOException { + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + final Collection exceptions = Collections.synchronizedList(new ArrayList<>()); + final AtomicLong outstanding = new AtomicLong(1L); + final PlainActionFuture result = PlainActionFuture.newFuture(); + SocketAccess.doPrivilegedVoidException(() -> { + for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true)) { + // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ + // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / + final String blobPath = blobItem.getUri().getPath().substring(1 + container.length() + 1); + outstanding.incrementAndGet(); + executor.execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + deleteBlob(account, container, blobPath); + } + + @Override + public void onFailure(Exception e) { + exceptions.add(e); + } + + @Override + public void onAfter() { + if (outstanding.decrementAndGet() == 0) { + result.onResponse(null); + } + } + }); + } + }); + if (outstanding.decrementAndGet() == 0) { + result.onResponse(null); + } + result.actionGet(); + if (exceptions.isEmpty() == false) { + final IOException ex = new IOException("Deleting directory [" + path + "] failed"); + exceptions.forEach(ex::addSuppressed); + throw ex; + } + } + public InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, StorageException, IOException { final Tuple> client = client(account); diff --git a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle index 4f201a812f2b2..d2411c7577692 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle +++ b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle @@ -17,12 +17,15 @@ * under the License. */ + import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture import java.security.KeyPair import java.security.KeyPairGenerator +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -90,13 +93,13 @@ integTest { testClusters.integTest { plugin file(project(':plugins:repository-gcs').bundlePlugin.archiveFile) - keystore 'gcs.client.integration_test.credentials_file', serviceAccountFile + keystore 'gcs.client.integration_test.credentials_file', serviceAccountFile, IGNORE_VALUE if (useFixture) { tasks.integTest.dependsOn createServiceAccountFile, googleCloudStorageFixture /* Use a closure on the string to delay evaluation until tests are executed */ - setting 'gcs.client.integration_test.endpoint', { "http://${googleCloudStorageFixture.addressAndPort}" } - setting 'gcs.client.integration_test.token_uri', { "http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token" } + setting 'gcs.client.integration_test.endpoint', { "http://${googleCloudStorageFixture.addressAndPort}" }, IGNORE_VALUE + setting 'gcs.client.integration_test.token_uri', { "http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token" }, IGNORE_VALUE } else { println "Using an external service to test the repository-gcs plugin" } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 713850c10de98..3060867b01b08 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -24,6 +24,8 @@ import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + apply plugin: 'elasticsearch.test.fixtures' esplugin { @@ -66,7 +68,14 @@ dependencies { hdfsFixture project(':test:fixtures:hdfs-fixture') // Set the keytab files in the classpath so that we can access them from test code without the security manager // freaking out. - testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab']) + testRuntime files(project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent) +} + +normalization { + runtimeClasspath { + // ignore generated keytab files for the purposes of build avoidance + ignore '*.keytab' + } } dependencyLicenses { @@ -149,7 +158,7 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec systemProperty "java.security.krb5.conf", krb5conf extraConfigFile( "repository-hdfs/krb5.keytab", - file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}") + file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"), IGNORE_VALUE ) } } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 083acf288a83b..b6bf60c1c92e5 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,8 +1,9 @@ -import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture import org.elasticsearch.gradle.test.RestIntegTestTask +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -33,7 +34,6 @@ versions << [ dependencies { compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}" - compile "com.amazonaws:aws-java-sdk-kms:${versions.aws}" compile "com.amazonaws:aws-java-sdk-core:${versions.aws}" compile "com.amazonaws:jmespath-java:${versions.aws}" compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" @@ -169,6 +169,13 @@ if (useFixture) { File minioAddressFile = new File(project.buildDir, 'generated-resources/s3Fixture.address') + normalization { + runtimeClasspath { + // ignore generated address file for the purposes of build avoidance + ignore 's3Fixture.address' + } + } + thirdPartyTest { dependsOn tasks.bundlePlugin, tasks.postProcessFixture outputs.file(minioAddressFile) @@ -196,7 +203,7 @@ if (useFixture) { testClusters.integTestMinio { keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - setting 's3.client.integration_test_permanent.endpoint', minioAddress + setting 's3.client.integration_test_permanent.endpoint', minioAddress, IGNORE_VALUE plugin file(tasks.bundlePlugin.archiveFile) } @@ -276,12 +283,12 @@ testClusters.integTest { keystore 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken if (useFixture) { - setting 's3.client.integration_test_permanent.endpoint', { "http://${s3Fixture.addressAndPort}" } - setting 's3.client.integration_test_temporary.endpoint', { "http://${s3Fixture.addressAndPort}" } - setting 's3.client.integration_test_ec2.endpoint', { "http://${s3Fixture.addressAndPort}" } + setting 's3.client.integration_test_permanent.endpoint', { "http://${s3Fixture.addressAndPort}" }, IGNORE_VALUE + setting 's3.client.integration_test_temporary.endpoint', { "http://${s3Fixture.addressAndPort}" }, IGNORE_VALUE + setting 's3.client.integration_test_ec2.endpoint', { "http://${s3Fixture.addressAndPort}" }, IGNORE_VALUE // to redirect InstanceProfileCredentialsProvider to custom auth point - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${s3Fixture.addressAndPort}" } + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${s3Fixture.addressAndPort}" }, IGNORE_VALUE } else { println "Using an external service to test the repository-s3 plugin" } @@ -303,9 +310,9 @@ if (useFixture) { check.dependsOn(integTestECS) testClusters.integTestECS { - setting 's3.client.integration_test_ecs.endpoint', { "http://${s3Fixture.addressAndPort}" } + setting 's3.client.integration_test_ecs.endpoint', { "http://${s3Fixture.addressAndPort}" }, IGNORE_VALUE plugin file(tasks.bundlePlugin.archiveFile) - environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "http://${s3Fixture.addressAndPort}/ecs_credentials_endpoint" } + environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "http://${s3Fixture.addressAndPort}/ecs_credentials_endpoint" }, IGNORE_VALUE } } diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 deleted file mode 100644 index 65c85dc87b184..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.562.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fdf4daf1960fe760e7a950dd28a05c5abc12788 \ No newline at end of file diff --git a/qa/ccs-unavailable-clusters/build.gradle b/qa/ccs-unavailable-clusters/build.gradle index ea80ee983b876..749623b26c329 100644 --- a/qa/ccs-unavailable-clusters/build.gradle +++ b/qa/ccs-unavailable-clusters/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 140df6e283ab8..a40f6366e6515 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -24,14 +25,17 @@ esplugin { classname 'org.elasticsearch.DieWithDignityPlugin' } -integTestRunner { +integTest.runner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' - nonInputProperties.systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - nonInputProperties.systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" + nonInputProperties.systemProperty 'log', "${-> testClusters.integTest.singleNode().getServerLog()}" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } +testClusters.integTest { + systemProperty "die.with.dignity.test", "whatever" +} + test.enabled = false check.dependsOn integTest diff --git a/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java b/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java index ed1e3d3879a5d..8027eeb8948d0 100644 --- a/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java +++ b/qa/die-with-dignity/src/main/java/org/elasticsearch/DieWithDignityPlugin.java @@ -36,6 +36,10 @@ public class DieWithDignityPlugin extends Plugin implements ActionPlugin { + public DieWithDignityPlugin() { + assert System.getProperty("die.with.dignity.test") != null : "test should pass the `die.with.dignity.test` property"; + } + @Override public List getRestHandlers( final Settings settings, diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index d83edfb2c997a..c6350f92ae78f 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -19,12 +19,10 @@ package org.elasticsearch.qa.die_with_dignity; -import org.apache.http.ConnectionClosedException; -import org.apache.lucene.util.Constants; import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; -import org.hamcrest.Matcher; import java.io.BufferedReader; import java.io.IOException; @@ -36,51 +34,26 @@ import java.util.List; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; public class DieWithDignityIT extends ESRestTestCase { public void testDieWithDignity() throws Exception { - // deleting the PID file prevents stopping the cluster from failing since it occurs if and only if the PID file exists - final Path pidFile = PathUtils.get(System.getProperty("pidfile")); - final List pidFileLines = Files.readAllLines(pidFile); - assertThat(pidFileLines, hasSize(1)); - final int pid = Integer.parseInt(pidFileLines.get(0)); - Files.delete(pidFile); - IOException e = expectThrows(IOException.class, - () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); - Matcher failureMatcher = instanceOf(ConnectionClosedException.class); - if (Constants.WINDOWS) { - /* - * If the other side closes the connection while we're waiting to fill our buffer - * we can get IOException with the message below. It seems to only come up on - * Windows and it *feels* like it could be a ConnectionClosedException but - * upstream does not consider this a bug: - * https://issues.apache.org/jira/browse/HTTPASYNC-134 - * - * So we catch it here and consider it "ok". - */ - failureMatcher = either(failureMatcher) - .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); - } - assertThat(e, failureMatcher); + expectThrows( + IOException.class, + () -> client().performRequest(new Request("GET", "/_die_with_dignity")) + ); // the Elasticsearch process should die and disappear from the output of jps assertBusy(() -> { final String jpsPath = PathUtils.get(System.getProperty("runtime.java.home"), "bin/jps").toString(); - final Process process = new ProcessBuilder().command(jpsPath).start(); - assertThat(process.waitFor(), equalTo(0)); + final Process process = new ProcessBuilder().command(jpsPath, "-v").start(); + try (InputStream is = process.getInputStream(); BufferedReader in = new BufferedReader(new InputStreamReader(is, "UTF-8"))) { String line; while ((line = in.readLine()) != null) { - final int currentPid = Integer.parseInt(line.split("\\s+")[0]); - assertThat(line, pid, not(equalTo(currentPid))); + assertThat(line, line, not(containsString("-Ddie.with.dignity.test"))); } } }); @@ -95,9 +68,9 @@ public void testDieWithDignity() throws Exception { try { while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { final String line = it.next(); - if (line.matches(".*ERROR.*o\\.e\\.ExceptionsHelper.*node-0.*fatal error.*")) { + if (line.matches(".*ERROR.*o\\.e\\.ExceptionsHelper.*integTest-0.*fatal error.*")) { fatalError = true; - } else if (line.matches(".*ERROR.*o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler.*node-0.*" + } else if (line.matches(".*ERROR.*o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler.*integTest-0.*" + "fatal error in thread \\[Thread-\\d+\\], exiting.*")) { fatalErrorInThreadExiting = true; assertTrue(it.hasNext()); @@ -127,4 +100,14 @@ protected boolean preserveClusterUponCompletion() { return true; } + @Override + protected final Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()) + // increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "1s") + .build(); + } + } diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 2f9239e5c2f22..38c1b3e1a9aa9 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -23,6 +23,7 @@ * threads, etc. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 7f923d03f7166..1913c86fc9c92 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -19,42 +19,43 @@ import org.elasticsearch.gradle.test.RestIntegTestTask +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(":client:rest-high-level") } -task remoteClusterTest(type: RestIntegTestTask) { +task 'remote-cluster'(type: RestIntegTestTask) { mustRunAfter(precommit) + runner { + systemProperty 'tests.rest.suite', 'remote_cluster' + } } -remoteClusterTestCluster { - numNodes = 2 - clusterName = 'remote-cluster' - setting 'cluster.remote.connect', false +testClusters.'remote-cluster' { + numberOfNodes = 2 + setting 'cluster.remote.connect', 'false' } -remoteClusterTestRunner { - systemProperty 'tests.rest.suite', 'remote_cluster' +task mixedClusterTest(type: RestIntegTestTask) { + useCluster testClusters.'remote-cluster' + runner { + dependsOn 'remote-cluster' + systemProperty 'tests.rest.suite', 'multi_cluster' + } } -task mixedClusterTest(type: RestIntegTestTask) {} - -mixedClusterTestCluster { - dependsOn remoteClusterTestRunner - setting 'cluster.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" - setting 'cluster.remote.connections_per_cluster', 1 - setting 'cluster.remote.connect', true +testClusters.mixedClusterTest { + setting 'cluster.remote.my_remote_cluster.seeds', + { "\"${testClusters.'remote-cluster'.getAllTransportPortURI().get(0)}\"" } + setting 'cluster.remote.connections_per_cluster', '1' + setting 'cluster.remote.connect', 'true' } -mixedClusterTestRunner { - systemProperty 'tests.rest.suite', 'multi_cluster' - finalizedBy 'remoteClusterTestCluster#node0.stop','remoteClusterTestCluster#node1.stop' -} task integTest { - dependsOn = [mixedClusterTest] + dependsOn mixedClusterTest } test.enabled = false // no unit tests for multi-cluster-search, only integration tests diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index ef1a97fc7abaf..90fc9e3950147 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' @@ -26,7 +27,7 @@ dependencies { testCompile project(path: ':plugins:transport-nio', configuration: 'runtime') // for http } -integTestRunner { +integTest.runner { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/qa/smoke-test-ingest-with-all-dependencies/build.gradle b/qa/smoke-test-ingest-with-all-dependencies/build.gradle index 9267f90cd7e0a..9f5c40ac93798 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/build.gradle +++ b/qa/smoke-test-ingest-with-all-dependencies/build.gradle @@ -17,6 +17,7 @@ * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 38a8a13fba9fc..d4aadea8b3d80 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -86,10 +86,6 @@ "type" : "time", "description" : "Explicit timeout for each search request. Defaults to no timeout." }, - "size": { - "type" : "number", - "description" : "Deprecated, please use `max_docs` instead" - }, "max_docs": { "type" : "number", "description" : "Maximum number of documents to process (default: all documents)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 69d36f44140a3..4048b4a55d962 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -90,10 +90,6 @@ "type" : "time", "description" : "Explicit timeout for each search request. Defaults to no timeout." }, - "size": { - "type" : "number", - "description" : "Deprecated, please use `max_docs` instead" - }, "max_docs": { "type" : "number", "description" : "Maximum number of documents to process (default: all documents)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index 83b9c429bbdbf..016bd5912d3a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -77,3 +77,47 @@ setup: indices.stats: metric: [ translog ] - gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 } + +--- +"Translog stats on closed indices": + - skip: + version: " - 7.2.99" + reason: "closed indices have translog stats starting version 7.3.0" + + - do: + index: + index: test + id: 1 + body: { "foo": "bar" } + + - do: + index: + index: test + id: 2 + body: { "foo": "bar" } + + - do: + index: + index: test + id: 3 + body: { "foo": "bar" } + + - do: + indices.stats: + metric: [ translog ] + - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.uncommitted_operations: 3 } + + - do: + indices.close: + index: test + wait_for_active_shards: 1 + - is_true: acknowledged + + - do: + indices.stats: + metric: [ translog ] + expand_wildcards: all + forbid_closed_indices: false + - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml index cde56fa41e3d9..76274e9034d62 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml @@ -92,16 +92,19 @@ setup: aggs: users: top_hits: - sort: "users.last.keyword" + sort: + users.last.keyword: + nested: + path: users seq_no_primary_term: true - match: { hits.total: 2 } - length: { aggregations.groups.buckets.0.users.hits.hits: 2 } - - match: { aggregations.groups.buckets.0.users.hits.hits.0._id: "1" } + - match: { aggregations.groups.buckets.0.users.hits.hits.0._id: "2" } - match: { aggregations.groups.buckets.0.users.hits.hits.0._index: my-index } - gte: { aggregations.groups.buckets.0.users.hits.hits.0._seq_no: 0 } - gte: { aggregations.groups.buckets.0.users.hits.hits.0._primary_term: 1 } - - match: { aggregations.groups.buckets.0.users.hits.hits.1._id: "2" } + - match: { aggregations.groups.buckets.0.users.hits.hits.1._id: "1" } - match: { aggregations.groups.buckets.0.users.hits.hits.1._index: my-index } - gte: { aggregations.groups.buckets.0.users.hits.hits.1._seq_no: 0 } - gte: { aggregations.groups.buckets.0.users.hits.hits.1._primary_term: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_rare_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_rare_terms.yml new file mode 100644 index 0000000000000..a82caddd9cfd4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/280_rare_terms.yml @@ -0,0 +1,316 @@ +setup: + - skip: + version: " - 7.2.99" + reason: RareTerms added in 7.3.0 + - do: + indices.create: + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + str: + type: keyword + ip: + type: ip + boolean: + type: boolean + integer: + type: long + number: + type: long + date: + type: date + + + - do: + cluster.health: + wait_for_status: green + +--- +"Basic test": + - do: + index: + index: test_1 + id: 1 + body: { "str" : "abc" } + + - do: + index: + index: test_1 + id: 2 + body: { "str": "abc" } + + - do: + index: + index: test_1 + id: 3 + body: { "str": "bcd" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "str_terms" : { "rare_terms" : { "field" : "str", "max_doc_count" : 1 } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.str_terms.buckets: 1 } + - match: { aggregations.str_terms.buckets.0.key: "bcd" } + - is_false: aggregations.str_terms.buckets.0.key_as_string + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + +--- +"IP test": + - do: + index: + index: test_1 + id: 1 + body: { "ip": "::1" } + + - do: + index: + index: test_1 + id: 2 + body: { "ip": "127.0.0.1" } + + - do: + index: + index: test_1 + id: 3 + body: { "ip": "::1" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip" } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.ip_terms.buckets: 1 } + - match: { aggregations.ip_terms.buckets.0.key: "127.0.0.1" } + - is_false: aggregations.ip_terms.buckets.0.key_as_string + - match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip", "include" : [ "127.0.0.1" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.ip_terms.buckets: 1 } + - match: { aggregations.ip_terms.buckets.0.key: "127.0.0.1" } + - is_false: aggregations.ip_terms.buckets.0.key_as_string + - match: { aggregations.ip_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip", "exclude" : [ "127.0.0.1" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.ip_terms.buckets: 0 } + + - do: + catch: request + search: + index: test_1 + body: { "size" : 0, "aggs" : { "ip_terms" : { "rare_terms" : { "field" : "ip", "exclude" : "127.*" } } } } + + + +--- +"Boolean test": + - do: + index: + index: test_1 + id: 1 + body: { "boolean": true } + + - do: + index: + index: test_1 + id: 2 + body: { "boolean": false } + + - do: + index: + index: test_1 + id: 3 + body: { "boolean": true } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "boolean_terms" : { "rare_terms" : { "field" : "boolean" } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.boolean_terms.buckets: 1 } + - match: { aggregations.boolean_terms.buckets.0.key: 0 } + - match: { aggregations.boolean_terms.buckets.0.key_as_string: "false" } + - match: { aggregations.boolean_terms.buckets.0.doc_count: 1 } + +--- +"Integer test": + - do: + index: + index: test_1 + id: 1 + body: { "integer": 1234 } + + - do: + index: + index: test_1 + id: 2 + body: { "integer": 5678 } + + - do: + index: + index: test_1 + id: 3 + body: { "integer": 1234 } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "integer_terms" : { "rare_terms" : { "field" : "integer" } } } } + + - match: { hits.total.value: 3 } + + - length: { aggregations.integer_terms.buckets: 1 } + + - match: { aggregations.integer_terms.buckets.0.key: 5678 } + - is_false: aggregations.integer_terms.buckets.0.key_as_string + - match: { aggregations.integer_terms.buckets.0.doc_count: 1 } + +--- +"Date test": + - do: + index: + index: test_1 + id: 1 + body: { "date": "2016-05-03" } + + - do: + index: + index: test_1 + id: 2 + body: { "date": "2014-09-01" } + + - do: + index: + index: test_1 + id: 3 + body: { "date": "2016-05-03" } + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "date" } } } } + + - match: { hits.total.value: 3 } + + - length: { aggregations.date_terms.buckets: 1 } + - match: { aggregations.date_terms.buckets.0.key: 1409529600000 } + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "date", "include" : [ "2014-09-01" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.date_terms.buckets: 1 } + - match: { aggregations.date_terms.buckets.0.key_as_string: "2014-09-01T00:00:00.000Z" } + - match: { aggregations.date_terms.buckets.0.doc_count: 1 } + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "date", "exclude" : [ "2014-09-01" ] } } } } + + - match: { hits.total.value: 3 } + - length: { aggregations.date_terms.buckets: 0 } + +--- +"Unmapped strings": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "string_terms" : { "rare_terms" : { "field" : "unmapped_string"} } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.string_terms.buckets: 0 } + +--- +"Unmapped booleans": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "boolean_terms" : { "rare_terms" : { "field" : "unmapped_boolean" } } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.boolean_terms.buckets: 0 } + +--- +"Unmapped dates": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "date_terms" : { "rare_terms" : { "field" : "unmapped_date"} } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.date_terms.buckets: 0 } + +--- +"Unmapped longs": + + - do: + index: + index: test_1 + id: 1 + body: {} + + - do: + indices.refresh: {} + + - do: + search: + body: { "size" : 0, "aggs" : { "long_terms" : { "rare_terms" : { "field" : "unmapped_long", "value_type" : "long" } } } } + + - match: { hits.total.value: 1 } + - length: { aggregations.long_terms.buckets: 0 } + + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml index 46bf2cada8e4d..82aa0883008a8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/230_interval_query.yml @@ -384,3 +384,43 @@ setup: - match: { hits.total.value: 1 } - match: { hits.hits.0._id: "4" } +--- +"Test prefix": + - skip: + version: " - 8.0.0" + reason: "TODO: change to 7.3 in backport" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: cold + - prefix: + prefix: out + - match: { hits.total.value: 3 } + +--- +"Test wildcard": + - skip: + version: " - 8.0.0" + reason: "TODO: change to 7.3 in backport" + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: cold + - wildcard: + pattern: out?ide + - match: { hits.total.value: 3 } + diff --git a/server/src/main/java/org/apache/lucene/util/CombinedBitSet.java b/server/src/main/java/org/apache/lucene/util/CombinedBitSet.java new file mode 100644 index 0000000000000..cb1bd819ab24a --- /dev/null +++ b/server/src/main/java/org/apache/lucene/util/CombinedBitSet.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.lucene.util; + +import org.apache.lucene.search.DocIdSetIterator; + +/** + * A {@link BitSet} implementation that combines two instances of {@link BitSet} and {@link Bits} + * to provide a single merged view. + */ +public final class CombinedBitSet extends BitSet implements Bits { + private final BitSet first; + private final Bits second; + private final int length; + + public CombinedBitSet(BitSet first, Bits second) { + this.first = first; + this.second = second; + this.length = first.length(); + } + + public BitSet getFirst() { + return first; + } + + /** + * This implementation is slow and requires to iterate over all bits to compute + * the intersection. Use {@link #approximateCardinality()} for + * a fast approximation. + */ + @Override + public int cardinality() { + int card = 0; + for (int i = 0; i < length; i++) { + card += get(i) ? 1 : 0; + } + return card; + } + + @Override + public int approximateCardinality() { + return first.cardinality(); + } + + @Override + public int prevSetBit(int index) { + assert index >= 0 && index < length : "index=" + index + ", numBits=" + length(); + int prev = first.prevSetBit(index); + while (prev != -1 && second.get(prev) == false) { + if (prev == 0) { + return -1; + } + prev = first.prevSetBit(prev-1); + } + return prev; + } + + @Override + public int nextSetBit(int index) { + assert index >= 0 && index < length : "index=" + index + " numBits=" + length(); + int next = first.nextSetBit(index); + while (next != DocIdSetIterator.NO_MORE_DOCS && second.get(next) == false) { + if (next == length() - 1) { + return DocIdSetIterator.NO_MORE_DOCS; + } + next = first.nextSetBit(next+1); + } + return next; + } + + @Override + public long ramBytesUsed() { + return first.ramBytesUsed(); + } + + @Override + public boolean get(int index) { + return first.get(index) && second.get(index); + } + + @Override + public int length() { + return length; + } + + @Override + public void set(int i) { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + public void clear(int i) { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + public void clear(int startIndex, int endIndex) { + throw new UnsupportedOperationException("not implemented"); + } +} diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index ec29ddfdd2fa2..daa224c8636ea 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -51,7 +51,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_0_1 = new Version(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_1_0 = new Version(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_1_1 = new Version(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_1_2 = new Version(7010299, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_2_0 = new Version(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 746ffd29213cc..9ccd4c9fdfdb0 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -206,7 +206,10 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; +import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.persistent.RemovePersistentTaskAction; @@ -412,9 +415,8 @@ public void register(ActionHandler handler) { } public void register( - Action action, Class> transportAction, - Class... supportTransportActions) { - register(new ActionHandler<>(action, transportAction, supportTransportActions)); + ActionType action, Class> transportAction) { + register(new ActionHandler<>(action, transportAction)); } } ActionRegistry actions = new ActionRegistry(); @@ -460,8 +462,8 @@ public void reg actions.register(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class); actions.register(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class); actions.register(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class); - actions.register(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, - TransportGetFieldMappingsIndexAction.class); + actions.register(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class); + actions.register(TransportGetFieldMappingsIndexAction.TYPE, TransportGetFieldMappingsIndexAction.class); actions.register(PutMappingAction.INSTANCE, TransportPutMappingAction.class); actions.register(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class); actions.register(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class); @@ -484,14 +486,14 @@ public void reg actions.register(IndexAction.INSTANCE, TransportIndexAction.class); actions.register(GetAction.INSTANCE, TransportGetAction.class); actions.register(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class); - actions.register(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class, - TransportShardMultiTermsVectorAction.class); + actions.register(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class); + actions.register(TransportShardMultiTermsVectorAction.TYPE, TransportShardMultiTermsVectorAction.class); actions.register(DeleteAction.INSTANCE, TransportDeleteAction.class); actions.register(UpdateAction.INSTANCE, TransportUpdateAction.class); - actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class, - TransportShardMultiGetAction.class); - actions.register(BulkAction.INSTANCE, TransportBulkAction.class, - TransportShardBulkAction.class); + actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class); + actions.register(TransportShardMultiGetAction.TYPE, TransportShardMultiGetAction.class); + actions.register(BulkAction.INSTANCE, TransportBulkAction.class); + actions.register(TransportShardBulkAction.TYPE, TransportShardBulkAction.class); actions.register(SearchAction.INSTANCE, TransportSearchAction.class); actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); actions.register(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); @@ -505,8 +507,8 @@ public void reg actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class); actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class); - actions.register(FieldCapabilitiesAction.INSTANCE, TransportFieldCapabilitiesAction.class, - TransportFieldCapabilitiesIndexAction.class); + actions.register(FieldCapabilitiesAction.INSTANCE, TransportFieldCapabilitiesAction.class); + actions.register(TransportFieldCapabilitiesIndexAction.TYPE, TransportFieldCapabilitiesIndexAction.class); actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); @@ -526,6 +528,11 @@ public void reg actions.register(RetentionLeaseActions.Renew.INSTANCE, RetentionLeaseActions.Renew.TransportAction.class); actions.register(RetentionLeaseActions.Remove.INSTANCE, RetentionLeaseActions.Remove.TransportAction.class); + // internal actions + actions.register(GlobalCheckpointSyncAction.TYPE, GlobalCheckpointSyncAction.class); + actions.register(RetentionLeaseBackgroundSyncAction.TYPE, RetentionLeaseBackgroundSyncAction.class); + actions.register(RetentionLeaseSyncAction.TYPE, RetentionLeaseSyncAction.class); + return unmodifiableMap(actions.getRegistry()); } @@ -685,17 +692,14 @@ protected void configure() { bind(AutoCreateIndex.class).toInstance(autoCreateIndex); bind(TransportLivenessAction.class).asEagerSingleton(); - // register Action -> transportAction Map used by NodeClient + // register ActionType -> transportAction Map used by NodeClient @SuppressWarnings("rawtypes") - MapBinder transportActionsBinder - = MapBinder.newMapBinder(binder(), Action.class, TransportAction.class); + MapBinder transportActionsBinder + = MapBinder.newMapBinder(binder(), ActionType.class, TransportAction.class); for (ActionHandler action : actions.values()) { // bind the action as eager singleton, so the map binder one will reuse it bind(action.getTransportAction()).asEagerSingleton(); transportActionsBinder.addBinding(action.getAction()).to(action.getTransportAction()).asEagerSingleton(); - for (Class supportAction : action.getSupportTransportActions()) { - bind(supportAction).asEagerSingleton(); - } } } diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index a3aa8ac2a5222..166bec9e065b0 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -26,11 +26,11 @@ public abstract class ActionRequestBuilder { - protected final Action action; + protected final ActionType action; protected final Request request; protected final ElasticsearchClient client; - protected ActionRequestBuilder(ElasticsearchClient client, Action action, Request request) { + protected ActionRequestBuilder(ElasticsearchClient client, ActionType action, Request request) { Objects.requireNonNull(action, "action must not be null"); this.action = action; this.request = request; diff --git a/server/src/main/java/org/elasticsearch/action/Action.java b/server/src/main/java/org/elasticsearch/action/ActionType.java similarity index 75% rename from server/src/main/java/org/elasticsearch/action/Action.java rename to server/src/main/java/org/elasticsearch/action/ActionType.java index f0df6202072a4..02f8f3c6fc22f 100644 --- a/server/src/main/java/org/elasticsearch/action/Action.java +++ b/server/src/main/java/org/elasticsearch/action/ActionType.java @@ -26,15 +26,27 @@ /** * A generic action. Should strive to make it a singleton. */ -public abstract class Action { +public class ActionType { private final String name; + private final Writeable.Reader responseReader; /** * @param name The name of the action, must be unique across actions. + * @deprecated Pass a {@link Writeable.Reader} with {@link } */ - protected Action(String name) { + @Deprecated + protected ActionType(String name) { + this(name, null); + } + + /** + * @param name The name of the action, must be unique across actions. + * @param responseReader A reader for the response type + */ + public ActionType(String name, Writeable.Reader responseReader) { this.name = name; + this.responseReader = responseReader; } /** @@ -44,23 +56,11 @@ public String name() { return this.name; } - /** - * Creates a new response instance. - * @deprecated Implement {@link #getResponseReader()} instead and make this method throw an - * {@link UnsupportedOperationException} - */ - @Deprecated - public abstract Response newResponse(); - /** * Get a reader that can create a new instance of the class from a {@link org.elasticsearch.common.io.stream.StreamInput} */ public Writeable.Reader getResponseReader() { - return in -> { - Response response = newResponse(); - response.readFrom(in); - return response; - }; + return responseReader; } /** @@ -72,7 +72,7 @@ public TransportRequestOptions transportOptions(Settings settings) { @Override public boolean equals(Object o) { - return o instanceof Action && name.equals(((Action) o).name()); + return o instanceof ActionType && name.equals(((ActionType) o).name()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java b/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java new file mode 100644 index 0000000000000..b8206bb03f89d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/StreamableResponseActionType.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.common.io.stream.Writeable; + +/** + * An action for with the response type implements {@link org.elasticsearch.common.io.stream.Streamable}. + * @deprecated Use {@link ActionType} directly and provide a {@link Writeable.Reader} + */ +@Deprecated +public abstract class StreamableResponseActionType extends ActionType { + + protected StreamableResponseActionType(String name) { + super(name); + } + + /** + * Creates a new response instance. + * @deprecated Implement {@link #getResponseReader()} instead and make this method throw an + * {@link UnsupportedOperationException} + */ + @Deprecated + public abstract Response newResponse(); + + @Override + public final Writeable.Reader getResponseReader() { + return in -> { + Response response = newResponse(); + response.readFrom(in); + return response; + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java index a4c3e17e80208..4319a745ba816 100644 --- a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java +++ b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java @@ -30,10 +30,10 @@ public class TransportActionNodeProxy { private final TransportService transportService; - private final Action action; + private final ActionType action; private final TransportRequestOptions transportOptions; - public TransportActionNodeProxy(Settings settings, Action action, TransportService transportService) { + public TransportActionNodeProxy(Settings settings, ActionType action, TransportService transportService) { this.action = action; this.transportService = transportService; this.transportOptions = action.transportOptions(settings); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java index 19d5378b305ed..acaaed9eaa985 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; /** - * Action for explaining shard allocation for a shard in the cluster + * ActionType for explaining shard allocation for a shard in the cluster */ -public class ClusterAllocationExplainAction extends Action { +public class ClusterAllocationExplainAction extends StreamableResponseActionType { public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction(); public static final String NAME = "cluster:monitor/allocation/explain"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index e3ee0dd7b1524..2fe4982acc63e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -41,6 +41,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -88,7 +89,7 @@ protected ClusterAllocationExplainResponse newResponse() { } @Override - protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state, + protected void masterOperation(Task task, final ClusterAllocationExplainRequest request, final ClusterState state, final ActionListener listener) { final RoutingNodes routingNodes = state.getRoutingNodes(); final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java index 6fdd324fa47c6..a2f0c721b5da4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsAction.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.action.admin.cluster.configuration; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable.Reader; -public class AddVotingConfigExclusionsAction extends Action { +public class AddVotingConfigExclusionsAction extends ActionType { public static final AddVotingConfigExclusionsAction INSTANCE = new AddVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/add_exclusions"; @@ -29,11 +29,6 @@ private AddVotingConfigExclusionsAction() { super(NAME); } - @Override - public AddVotingConfigExclusionsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Reader getResponseReader() { return AddVotingConfigExclusionsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java index 49b578f48adf8..6091800693f49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsAction.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.action.admin.cluster.configuration; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.Writeable.Reader; -public class ClearVotingConfigExclusionsAction extends Action { +public class ClearVotingConfigExclusionsAction extends ActionType { public static final ClearVotingConfigExclusionsAction INSTANCE = new ClearVotingConfigExclusionsAction(); public static final String NAME = "cluster:admin/voting_config/clear_exclusions"; @@ -29,11 +29,6 @@ private ClearVotingConfigExclusionsAction() { super(NAME); } - @Override - public ClearVotingConfigExclusionsResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Reader getResponseReader() { return ClearVotingConfigExclusionsResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index 512321c7e4432..a6474510e88f3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; @@ -78,7 +79,7 @@ protected AddVotingConfigExclusionsResponse read(StreamInput in) throws IOExcept } @Override - protected void masterOperation(AddVotingConfigExclusionsRequest request, ClusterState state, + protected void masterOperation(Task task, AddVotingConfigExclusionsRequest request, ClusterState state, ActionListener listener) throws Exception { resolveVotingConfigExclusionsAndCheckMaximum(request, state); // throws IAE if no nodes matched or maximum exceeded diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 7cd19a824be05..3b441e7ea9db7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; @@ -72,7 +73,7 @@ protected ClearVotingConfigExclusionsResponse read(StreamInput in) throws IOExce } @Override - protected void masterOperation(ClearVotingConfigExclusionsRequest request, ClusterState initialState, + protected void masterOperation(Task task, ClearVotingConfigExclusionsRequest request, ClusterState initialState, ActionListener listener) throws Exception { final long startTimeMillis = threadPool.relativeTimeInMillis(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java index 0cd148ee231e0..e8f5ecfaf5b66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; -public class ClusterHealthAction extends Action { +public class ClusterHealthAction extends StreamableResponseActionType { public static final ClusterHealthAction INSTANCE = new ClusterHealthAction(); public static final String NAME = "cluster:monitor/health"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 0caae77d7dead..08b8730b02419 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -77,13 +77,6 @@ protected ClusterHealthResponse newResponse() { return new ClusterHealthResponse(); } - @Override - protected final void masterOperation(ClusterHealthRequest request, ClusterState state, - ActionListener listener) throws Exception { - logger.warn("attempt to execute a cluster health operation without a task"); - throw new UnsupportedOperationException("task parameter is required for this operation"); - } - @Override protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java index 4ea7ee5bc3bbe..4833625d29522 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; -public class NodesHotThreadsAction extends Action { +public class NodesHotThreadsAction extends StreamableResponseActionType { public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction(); public static final String NAME = "cluster:monitor/nodes/hot_threads"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 6321813f189fb..4f85177b6e671 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.monitor.jvm.HotThreads; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -54,8 +55,8 @@ protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request, } @Override - protected NodeRequest newNodeRequest(String nodeId, NodesHotThreadsRequest request) { - return new NodeRequest(nodeId, request); + protected NodeRequest newNodeRequest(NodesHotThreadsRequest request) { + return new NodeRequest(request); } @Override @@ -64,7 +65,7 @@ protected NodeHotThreads newNodeResponse() { } @Override - protected NodeHotThreads nodeOperation(NodeRequest request) { + protected NodeHotThreads nodeOperation(NodeRequest request, Task task) { HotThreads hotThreads = new HotThreads() .busiestThreads(request.request.threads) .type(request.request.type) @@ -85,8 +86,7 @@ public static class NodeRequest extends BaseNodeRequest { public NodeRequest() { } - NodeRequest(String nodeId, NodesHotThreadsRequest request) { - super(nodeId); + NodeRequest(NodesHotThreadsRequest request) { this.request = request; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java index edc5ed7e83f0f..e94390d8f92d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; -public class NodesInfoAction extends Action { +public class NodesInfoAction extends StreamableResponseActionType { public static final NodesInfoAction INSTANCE = new NodesInfoAction(); public static final String NAME = "cluster:monitor/nodes/info"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index a1f9790af9351..903f6adb7b931 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.node.NodeService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -56,8 +57,8 @@ protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest, } @Override - protected NodeInfoRequest newNodeRequest(String nodeId, NodesInfoRequest request) { - return new NodeInfoRequest(nodeId, request); + protected NodeInfoRequest newNodeRequest(NodesInfoRequest request) { + return new NodeInfoRequest(request); } @Override @@ -66,7 +67,7 @@ protected NodeInfo newNodeResponse() { } @Override - protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { + protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest, Task task) { NodesInfoRequest request = nodeRequest.request; return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(), request.transport(), request.http(), request.plugins(), request.ingest(), request.indices()); @@ -79,8 +80,7 @@ public static class NodeInfoRequest extends BaseNodeRequest { public NodeInfoRequest() { } - public NodeInfoRequest(String nodeId, NodesInfoRequest request) { - super(nodeId); + public NodeInfoRequest(NodesInfoRequest request) { this.request = request; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java index 19e8fc1929c5d..e22595c187092 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.cluster.node.reload; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; public class NodesReloadSecureSettingsAction - extends Action { + extends StreamableResponseActionType { public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index f2fef743a0d37..27860b52557e7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -35,6 +35,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -68,8 +69,8 @@ protected NodesReloadSecureSettingsResponse newResponse(NodesReloadSecureSetting } @Override - protected NodeRequest newNodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) { - return new NodeRequest(nodeId, request); + protected NodeRequest newNodeRequest(NodesReloadSecureSettingsRequest request) { + return new NodeRequest(request); } @Override @@ -78,7 +79,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse newNodeResponse() { } @Override - protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest) { + protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest, Task task) { try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file if (keystore == null) { @@ -116,8 +117,7 @@ public static class NodeRequest extends BaseNodeRequest { public NodeRequest() { } - NodeRequest(String nodeId, NodesReloadSecureSettingsRequest request) { - super(nodeId); + NodeRequest(NodesReloadSecureSettingsRequest request) { this.request = request; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java index bc8c81ef1e0f5..dbe7deed74a73 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; -public class NodesStatsAction extends Action { +public class NodesStatsAction extends StreamableResponseActionType { public static final NodesStatsAction INSTANCE = new NodesStatsAction(); public static final String NAME = "cluster:monitor/nodes/stats"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 1028da916a2c1..f399304a8a10e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.node.NodeService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -55,8 +56,8 @@ protected NodesStatsResponse newResponse(NodesStatsRequest request, List { +public class CancelTasksAction extends ActionType { public static final CancelTasksAction INSTANCE = new CancelTasksAction(); public static final String NAME = "cluster:admin/tasks/cancel"; @@ -34,11 +34,6 @@ private CancelTasksAction() { super(NAME); } - @Override - public CancelTasksResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - @Override public Writeable.Reader getResponseReader() { return CancelTasksResponse::new; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java index 4e88963de4c98..978e07555b517 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.node.tasks.get; -import org.elasticsearch.action.Action; +import org.elasticsearch.action.StreamableResponseActionType; /** - * Action for retrieving a list of currently running tasks + * ActionType for retrieving a list of currently running tasks */ -public class GetTaskAction extends Action { +public class GetTaskAction extends StreamableResponseActionType { public static final String TASKS_ORIGIN = "tasks"; public static final GetTaskAction INSTANCE = new GetTaskAction(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index d1d72da544560..2b0ac0233be29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -56,7 +56,7 @@ import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction.waitForCompletionTimeout; /** - * Action to get a single task. If the task isn't running then it'll try to request the status from request index. + * ActionType to get a single task. If the task isn't running then it'll try to request the status from request index. * * The general flow is: *