Skip to content

Commit

Permalink
Merge remote-tracking branch 'elastic/master' into retry-sending-snap…
Browse files Browse the repository at this point in the history
…shot-shard-status-updates
  • Loading branch information
original-brownbear committed Jan 24, 2020
2 parents 3378d3d + 44d5ad9 commit d3b05a1
Show file tree
Hide file tree
Showing 84 changed files with 1,276 additions and 687 deletions.
1 change: 1 addition & 0 deletions .ci/bwcVersions
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,5 @@ BWC_VERSION:
- "7.5.2"
- "7.5.3"
- "7.6.0"
- "7.7.0"
- "8.0.0"
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ class BuildPlugin implements Plugin<Project> {
dependencyNode.appendNode('groupId', dependency.group)
dependencyNode.appendNode('artifactId', dependency.getDependencyProject().convention.getPlugin(BasePluginConvention).archivesBaseName)
dependencyNode.appendNode('version', dependency.version)
dependencyNode.appendNode('scope', 'runtime')
dependencyNode.appendNode('scope', 'compile')
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
package org.elasticsearch.gradle.testclusters;

import org.elasticsearch.gradle.tool.Boilerplate;
import org.gradle.api.provider.Provider;
import org.gradle.api.services.internal.BuildServiceRegistryInternal;
import org.gradle.api.tasks.CacheableTask;
import org.gradle.api.tasks.Internal;
import org.gradle.api.tasks.Nested;
import org.gradle.api.tasks.testing.Test;
import org.gradle.internal.resources.ResourceLock;
import org.gradle.internal.resources.SharedResource;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;

import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.THROTTLE_SERVICE_NAME;

/**
* Customized version of Gradle {@link Test} task which tracks a collection of {@link ElasticsearchCluster} as a task input. We must do this
Expand Down Expand Up @@ -47,4 +57,19 @@ public Collection<ElasticsearchCluster> getClusters() {
return clusters;
}

@Override
@Internal
public List<ResourceLock> getSharedResources() {
List<ResourceLock> locks = new ArrayList<>(super.getSharedResources());
BuildServiceRegistryInternal serviceRegistry = getServices().get(BuildServiceRegistryInternal.class);
Provider<TestClustersThrottle> throttleProvider = Boilerplate.getBuildService(serviceRegistry, THROTTLE_SERVICE_NAME);
SharedResource resource = serviceRegistry.forService(throttleProvider);

int nodeCount = clusters.stream().mapToInt(cluster -> cluster.getNodes().size()).sum();
if (nodeCount > 0) {
locks.add(resource.getResourceLock(Math.min(nodeCount, resource.getMaxUsages())));
}

return Collections.unmodifiableList(locks);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import org.elasticsearch.gradle.DistributionDownloadPlugin;
import org.elasticsearch.gradle.ReaperPlugin;
import org.elasticsearch.gradle.ReaperService;
import org.elasticsearch.gradle.tool.Boilerplate;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
Expand All @@ -30,53 +31,50 @@
import org.gradle.api.invocation.Gradle;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.provider.Provider;
import org.gradle.api.tasks.TaskState;

import java.io.File;

public class TestClustersPlugin implements Plugin<Project> {

private static final String LIST_TASK_NAME = "listTestClusters";
public static final String EXTENSION_NAME = "testClusters";
private static final String REGISTRY_EXTENSION_NAME = "testClustersRegistry";
public static final String THROTTLE_SERVICE_NAME = "testClustersThrottle";

private static final String LIST_TASK_NAME = "listTestClusters";
private static final String REGISTRY_SERVICE_NAME = "testClustersRegistry";
private static final Logger logger = Logging.getLogger(TestClustersPlugin.class);

private ReaperService reaper;

@Override
public void apply(Project project) {
project.getPlugins().apply(DistributionDownloadPlugin.class);

project.getRootProject().getPluginManager().apply(ReaperPlugin.class);
reaper = project.getRootProject().getExtensions().getByType(ReaperService.class);

ReaperService reaper = project.getRootProject().getExtensions().getByType(ReaperService.class);

// enable the DSL to describe clusters
NamedDomainObjectContainer<ElasticsearchCluster> container = createTestClustersContainerExtension(project);
NamedDomainObjectContainer<ElasticsearchCluster> container = createTestClustersContainerExtension(project, reaper);

// provide a task to be able to list defined clusters.
createListClustersTask(project, container);

if (project.getRootProject().getExtensions().findByName(REGISTRY_EXTENSION_NAME) == null) {
TestClustersRegistry registry = project.getRootProject()
.getExtensions()
.create(REGISTRY_EXTENSION_NAME, TestClustersRegistry.class);

// When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
// that are defined in the build script and the ones that will actually be used in this invocation of gradle
// we use this information to determine when the last task that required the cluster executed so that we can
// terminate the cluster right away and free up resources.
configureClaimClustersHook(project.getGradle(), registry);
// register cluster registry as a global build service
project.getGradle().getSharedServices().registerIfAbsent(REGISTRY_SERVICE_NAME, TestClustersRegistry.class, spec -> {});

// Before each task, we determine if a cluster needs to be started for that task.
configureStartClustersHook(project.getGradle(), registry);
// register throttle so we only run at most max-workers/2 nodes concurrently
project.getGradle()
.getSharedServices()
.registerIfAbsent(
THROTTLE_SERVICE_NAME,
TestClustersThrottle.class,
spec -> spec.getMaxParallelUsages().set(project.getGradle().getStartParameter().getMaxWorkerCount() / 2)
);

// After each task we determine if there are clusters that are no longer needed.
configureStopClustersHook(project.getGradle(), registry);
}
// register cluster hooks
project.getRootProject().getPluginManager().apply(TestClustersHookPlugin.class);
}

private NamedDomainObjectContainer<ElasticsearchCluster> createTestClustersContainerExtension(Project project) {
private NamedDomainObjectContainer<ElasticsearchCluster> createTestClustersContainerExtension(Project project, ReaperService reaper) {
// Create an extensions that allows describing clusters
NamedDomainObjectContainer<ElasticsearchCluster> container = project.container(
ElasticsearchCluster.class,
Expand All @@ -95,52 +93,78 @@ private void createListClustersTask(Project project, NamedDomainObjectContainer<
);
}

private static void configureClaimClustersHook(Gradle gradle, TestClustersRegistry registry) {
// Once we know all the tasks that need to execute, we claim all the clusters that belong to those and count the
// claims so we'll know when it's safe to stop them.
gradle.getTaskGraph().whenReady(taskExecutionGraph -> {
taskExecutionGraph.getAllTasks()
.stream()
.filter(task -> task instanceof TestClustersAware)
.map(task -> (TestClustersAware) task)
.flatMap(task -> task.getClusters().stream())
.forEach(registry::claimCluster);
});
}
static class TestClustersHookPlugin implements Plugin<Project> {
@Override
public void apply(Project project) {
if (project != project.getRootProject()) {
throw new IllegalStateException(this.getClass().getName() + " can only be applied to the root project.");
}

Provider<TestClustersRegistry> registryProvider = Boilerplate.getBuildService(
project.getGradle().getSharedServices(),
REGISTRY_SERVICE_NAME
);
TestClustersRegistry registry = registryProvider.get();

// When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
// that are defined in the build script and the ones that will actually be used in this invocation of gradle
// we use this information to determine when the last task that required the cluster executed so that we can
// terminate the cluster right away and free up resources.
configureClaimClustersHook(project.getGradle(), registry);

// Before each task, we determine if a cluster needs to be started for that task.
configureStartClustersHook(project.getGradle(), registry);

// After each task we determine if there are clusters that are no longer needed.
configureStopClustersHook(project.getGradle(), registry);
}

private static void configureStartClustersHook(Gradle gradle, TestClustersRegistry registry) {
gradle.addListener(new TaskActionListener() {
@Override
public void beforeActions(Task task) {
if (task instanceof TestClustersAware == false) {
return;
private static void configureClaimClustersHook(Gradle gradle, TestClustersRegistry registry) {
// Once we know all the tasks that need to execute, we claim all the clusters that belong to those and count the
// claims so we'll know when it's safe to stop them.
gradle.getTaskGraph().whenReady(taskExecutionGraph -> {
taskExecutionGraph.getAllTasks()
.stream()
.filter(task -> task instanceof TestClustersAware)
.map(task -> (TestClustersAware) task)
.flatMap(task -> task.getClusters().stream())
.forEach(registry::claimCluster);
});
}

private static void configureStartClustersHook(Gradle gradle, TestClustersRegistry registry) {
gradle.addListener(new TaskActionListener() {
@Override
public void beforeActions(Task task) {
if (task instanceof TestClustersAware == false) {
return;
}
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
TestClustersAware awareTask = (TestClustersAware) task;
awareTask.beforeStart();
awareTask.getClusters().forEach(registry::maybeStartCluster);
}
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
TestClustersAware awareTask = (TestClustersAware) task;
awareTask.beforeStart();
awareTask.getClusters().forEach(registry::maybeStartCluster);
}

@Override
public void afterActions(Task task) {}
});
}
@Override
public void afterActions(Task task) {}
});
}

private static void configureStopClustersHook(Gradle gradle, TestClustersRegistry registry) {
gradle.addListener(new TaskExecutionListener() {
@Override
public void afterExecute(Task task, TaskState state) {
if (task instanceof TestClustersAware == false) {
return;
private static void configureStopClustersHook(Gradle gradle, TestClustersRegistry registry) {
gradle.addListener(new TaskExecutionListener() {
@Override
public void afterExecute(Task task, TaskState state) {
if (task instanceof TestClustersAware == false) {
return;
}
// always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been
// and caused the cluster to start.
((TestClustersAware) task).getClusters().forEach(cluster -> registry.stopCluster(cluster, state.getFailure() != null));
}
// always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been
// and caused the cluster to start.
((TestClustersAware) task).getClusters().forEach(cluster -> registry.stopCluster(cluster, state.getFailure() != null));
}

@Override
public void beforeExecute(Task task) {}
});
@Override
public void beforeExecute(Task task) {}
});
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@

import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.services.BuildService;
import org.gradle.api.services.BuildServiceParameters;

import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

public class TestClustersRegistry {
public abstract class TestClustersRegistry implements BuildService<BuildServiceParameters.None> {
private static final Logger logger = Logging.getLogger(TestClustersRegistry.class);
private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure";
private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false"));
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
package org.elasticsearch.gradle.testclusters;

import org.gradle.api.services.BuildService;
import org.gradle.api.services.BuildServiceParameters;

public abstract class TestClustersThrottle implements BuildService<BuildServiceParameters.None> {}
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,17 @@
package org.elasticsearch.gradle.tool;

import org.gradle.api.Action;
import org.gradle.api.GradleException;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.PolymorphicDomainObjectContainer;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.UnknownTaskException;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.provider.Provider;
import org.gradle.api.services.BuildService;
import org.gradle.api.services.BuildServiceRegistration;
import org.gradle.api.services.BuildServiceRegistry;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.TaskContainer;
import org.gradle.api.tasks.TaskProvider;
Expand Down Expand Up @@ -102,4 +107,14 @@ public static TaskProvider<?> findByName(TaskContainer tasks, String name) {

return task;
}

@SuppressWarnings("unchecked")
public static <T extends BuildService<?>> Provider<T> getBuildService(BuildServiceRegistry registry, String name) {
BuildServiceRegistration<?, ?> registration = registry.getRegistrations().findByName(name);
if (registration == null) {
throw new GradleException("Unable to find build service with name '" + name + "'.");
}

return (Provider<T>) registration.getService();
}
}
1 change: 1 addition & 0 deletions distribution/docker/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,7 @@ subprojects { Project subProject ->
def tarFile = "${parent.projectDir}/build/elasticsearch${oss ? '-oss' : ''}_test.${VersionProperties.elasticsearch}.docker.tar"

final Task exportDockerImageTask = task(exportTaskName, type: LoggedExec) {
inputs.file("${parent.projectDir}/build/markers/${buildTaskName}.marker")
executable 'docker'
outputs.file(tarFile)
args "save",
Expand Down
27 changes: 21 additions & 6 deletions distribution/docker/src/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,17 @@ RUN chmod 0775 config data logs
COPY config/elasticsearch.yml config/log4j2.properties config/
RUN chmod 0660 config/elasticsearch.yml config/log4j2.properties

# `tini` is a tiny but valid init for containers. This is used to cleanly
# control how ES and any child processes are shut down.
#
# The tini GitHub page gives instructions for verifying the binary using
# gpg, but the keyservers are slow to return the key and this can fail the
# build. Instead, we check the binary against a checksum that we have
# computed.
ADD https://github.com/krallin/tini/releases/download/v0.18.0/tini /tini
COPY config/tini.sha512 /tini.sha512
RUN sha512sum -c /tini.sha512 && chmod +x /tini

################################################################################
# Build stage 1 (the actual elasticsearch image):
# Copy elasticsearch from stage 0
Expand All @@ -45,6 +56,8 @@ FROM centos:7

ENV ELASTIC_CONTAINER true

COPY --from=builder /tini /tini

RUN for iter in {1..10}; do yum update --setopt=tsflags=nodocs -y && \
yum install --setopt=tsflags=nodocs -y nc shadow-utils zip unzip && \
yum clean all && exit_code=0 && break || exit_code=\$? && echo "yum error: retry \$iter in 10s" && sleep 10; done; \
Expand All @@ -65,14 +78,14 @@ RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /usr/share/elasticsearch/jdk

ENV PATH /usr/share/elasticsearch/bin:\$PATH

COPY --chown=1000:0 bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
COPY bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh

# Openshift overrides USER and uses ones with randomly uid>1024 and gid=0
# Allow ENTRYPOINT (and ES) to run even with a different user
RUN chgrp 0 /usr/local/bin/docker-entrypoint.sh && \
chmod g=u /etc/passwd && \
RUN chmod g=u /etc/passwd && \
chmod 0775 /usr/local/bin/docker-entrypoint.sh

# Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks.
RUN find / -xdev -perm -4000 -exec chmod ug-s {} +

EXPOSE 9200 9300

LABEL org.label-schema.build-date="${build_date}" \
Expand All @@ -95,7 +108,9 @@ LABEL org.label-schema.build-date="${build_date}" \
org.opencontainers.image.vendor="Elastic" \
org.opencontainers.image.version="${version}"

ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
USER elasticsearch:root

ENTRYPOINT ["/tini", "--", "/usr/local/bin/docker-entrypoint.sh"]
# Dummy overridable parameter parsed by entrypoint
CMD ["eswrapper"]

Expand Down
Loading

0 comments on commit d3b05a1

Please sign in to comment.