From eb410908e6a2dc496aa0e3772b2401749dd82b26 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Wed, 19 Jan 2022 17:04:17 +0100 Subject: [PATCH 01/44] fix(kafka-runner): missing server-type condition for scheduler in order to monitor their stream --- .../main/java/io/kestra/runner/kafka/KafkaStreamEndpoint.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaStreamEndpoint.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaStreamEndpoint.java index fcf06ba648a..0f3bfd0dcc0 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaStreamEndpoint.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaStreamEndpoint.java @@ -19,7 +19,7 @@ import java.util.stream.Collectors; @Endpoint(id = "kafkastreams", defaultSensitive = false) -@Requires(property = "kestra.server-type", pattern = "(EXECUTOR|STANDALONE)") +@Requires(property = "kestra.server-type", pattern = "(EXECUTOR|STANDALONE|SCHEDULER)") @KafkaQueueEnabled public class KafkaStreamEndpoint implements ApplicationEventListener { private Map streams; From 4ee56643f9f1e957a1e9d5e37d7c84f086932523 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Thu, 20 Jan 2022 12:38:52 +0100 Subject: [PATCH 02/44] fix(ui): remove table window scrollbar --- ui/src/styles/layout/bootstrap.scss | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ui/src/styles/layout/bootstrap.scss b/ui/src/styles/layout/bootstrap.scss index f60f7233d99..6e719a29ecb 100644 --- a/ui/src/styles/layout/bootstrap.scss +++ b/ui/src/styles/layout/bootstrap.scss @@ -224,6 +224,10 @@ code[class*="language-"], pre[class*="language-"] { table { font-size: $font-size-sm; + + thead th .sr-only { + display: none; + } } .table-responsive { From 79bcc338af9665d1668e7f40df6cae04a8c2a7d1 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 21 Jan 2022 09:58:36 +0100 Subject: [PATCH 03/44] chore(refactor): add some helpers tools --- .../kestra/core/repositories/ArrayListTotal.java | 14 ++++++++++++++ .../kestra/webserver/utils/AutocompleteUtils.java | 12 +++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/io/kestra/core/repositories/ArrayListTotal.java b/core/src/main/java/io/kestra/core/repositories/ArrayListTotal.java index 1342b45fd18..de1f3963ba1 100644 --- a/core/src/main/java/io/kestra/core/repositories/ArrayListTotal.java +++ b/core/src/main/java/io/kestra/core/repositories/ArrayListTotal.java @@ -6,6 +6,9 @@ import java.util.ArrayList; import java.util.List; +import java.util.function.Function; + +import static java.util.stream.Collectors.toCollection; @Getter @NoArgsConstructor @@ -25,8 +28,19 @@ public static ArrayListTotal of(Pageable pageable, List list) { return new ArrayListTotal(list.subList(from, to), size); } + public ArrayListTotal(long total) { + this.total = total; + } + public ArrayListTotal(List list, long total) { super(list); this.total = total; } + + public ArrayListTotal map(Function map) { + return this + .stream() + .map(map) + .collect(toCollection(() -> new ArrayListTotal(this.total))); + } } diff --git a/webserver/src/main/java/io/kestra/webserver/utils/AutocompleteUtils.java b/webserver/src/main/java/io/kestra/webserver/utils/AutocompleteUtils.java index 80f8c4fe34d..99a765457af 100644 --- a/webserver/src/main/java/io/kestra/webserver/utils/AutocompleteUtils.java +++ b/webserver/src/main/java/io/kestra/webserver/utils/AutocompleteUtils.java @@ -1,16 +1,15 @@ package io.kestra.webserver.utils; import io.micronaut.http.exceptions.HttpStatusException; -import io.kestra.core.repositories.ArrayListTotal; -import java.util.ArrayList; import java.util.List; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; public class AutocompleteUtils { @SafeVarargs - public static List from(List... lists) throws HttpStatusException { + public static List map(Function map, List... lists) throws HttpStatusException { Stream stream = Stream.empty(); for (List list : lists) { @@ -22,6 +21,13 @@ public static List from(List... lists) throws HttpStatusException { return stream .distinct() + .map(map) .collect(Collectors.toList()); } + + @SafeVarargs + public static List from(List... lists) throws HttpStatusException { + return AutocompleteUtils + .map(o -> o, lists); + } } From 9bd50b964b894878ca97c235f475411a1661133b Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 21 Jan 2022 18:48:19 +0100 Subject: [PATCH 04/44] feat(core): add a default lru cache on pebble template --- cli/src/main/resources/application.yml | 3 ++ .../kestra/core/runners/VariableRenderer.java | 33 ++++++++++------ .../core/runners/pebble/PebbleLruCache.java | 38 +++++++++++++++++++ 3 files changed, 63 insertions(+), 11 deletions(-) create mode 100644 core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java diff --git a/cli/src/main/resources/application.yml b/cli/src/main/resources/application.yml index dafcbb056d3..e792618eac3 100644 --- a/cli/src/main/resources/application.yml +++ b/cli/src/main/resources/application.yml @@ -232,6 +232,9 @@ kestra: variables: env-vars-prefix: KESTRA_ globals: {} + disable-handlebars: true + cacheEnabled: true + cacheSize: 1000 metrics: prefix: kestra diff --git a/core/src/main/java/io/kestra/core/runners/VariableRenderer.java b/core/src/main/java/io/kestra/core/runners/VariableRenderer.java index 292bab2eace..a74184f2b3c 100644 --- a/core/src/main/java/io/kestra/core/runners/VariableRenderer.java +++ b/core/src/main/java/io/kestra/core/runners/VariableRenderer.java @@ -8,7 +8,6 @@ import com.mitchellbosecke.pebble.PebbleEngine; import com.mitchellbosecke.pebble.error.AttributeNotFoundException; import com.mitchellbosecke.pebble.error.PebbleException; -import com.mitchellbosecke.pebble.error.RootAttributeNotFoundException; import com.mitchellbosecke.pebble.extension.AbstractExtension; import com.mitchellbosecke.pebble.template.PebbleTemplate; import io.kestra.core.exceptions.IllegalVariableEvaluationException; @@ -16,8 +15,9 @@ import io.kestra.core.runners.handlebars.helpers.*; import io.kestra.core.runners.pebble.ExtensionCustomizer; import io.kestra.core.runners.pebble.JsonWriter; +import io.kestra.core.runners.pebble.PebbleLruCache; import io.micronaut.context.ApplicationContext; -import io.micronaut.context.annotation.Value; +import io.micronaut.context.annotation.ConfigurationProperties; import java.io.IOException; import java.io.StringWriter; @@ -25,22 +25,20 @@ import java.util.*; import jakarta.inject.Inject; import jakarta.inject.Singleton; +import lombok.Getter; @Singleton public class VariableRenderer { private Handlebars handlebars; private final PebbleEngine pebbleEngine; - private final boolean disableHandleBars; + private final VariableConfiguration variableConfiguration; @SuppressWarnings("unchecked") @Inject - public VariableRenderer( - ApplicationContext applicationContext, - @Value("${kestra.variables.disable-handlebars:true}") boolean disableHandleBars - ) { - this.disableHandleBars = disableHandleBars; + public VariableRenderer(ApplicationContext applicationContext, VariableConfiguration variableConfiguration) { + this.variableConfiguration = variableConfiguration; - if (!disableHandleBars) { + if (!variableConfiguration.getDisableHandlebars()) { this.handlebars = new Handlebars() .with(EscapingStrategy.NOOP) .registerHelpers(ConditionalHelpers.class) @@ -73,13 +71,18 @@ public VariableRenderer( PebbleEngine.Builder pebbleBuilder = new PebbleEngine.Builder() .registerExtensionCustomizer(ExtensionCustomizer::new) .strictVariables(true) - .cacheActive(false) + .cacheActive(variableConfiguration.getCacheEnabled()) + .newLineTrimming(false) .autoEscaping(false); applicationContext.getBeansOfType(AbstractExtension.class) .forEach(pebbleBuilder::extension); + if (variableConfiguration.getCacheEnabled()) { + pebbleBuilder.templateCache(new PebbleLruCache(variableConfiguration.getCacheSize())); + } + pebbleEngine = pebbleBuilder.build(); } @@ -100,7 +103,7 @@ public String recursiveRender(String inline, Map variables) thro compiledTemplate.evaluate(writer, variables); current = writer.toString(); } catch (IOException | PebbleException e) { - if (disableHandleBars) { + if (variableConfiguration.disableHandlebars) { if (e instanceof PebbleException) { throw properPebbleException((PebbleException) e); } @@ -179,4 +182,12 @@ public List render(List list, Map variables) thr return result; } + + @Getter + @ConfigurationProperties("kestra.variables") + public static class VariableConfiguration { + Boolean disableHandlebars; + Boolean cacheEnabled; + Integer cacheSize; + } } diff --git a/core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java b/core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java new file mode 100644 index 00000000000..ea6104d9593 --- /dev/null +++ b/core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java @@ -0,0 +1,38 @@ +package io.kestra.core.runners.pebble; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.mitchellbosecke.pebble.cache.PebbleCache; +import com.mitchellbosecke.pebble.template.PebbleTemplate; +import lombok.extern.slf4j.Slf4j; + +import java.util.concurrent.ExecutionException; +import java.util.function.Function; + +@Slf4j +public class PebbleLruCache implements PebbleCache { + Cache cache; + + public PebbleLruCache(int maximumSize) { + cache = CacheBuilder.newBuilder() + .initialCapacity(250) + .maximumSize(maximumSize) + .build(); + } + + @Override + public PebbleTemplate computeIfAbsent(Object key, Function mappingFunction) { + try { + return cache.get(key, () -> mappingFunction.apply(key)); + } catch (ExecutionException e) { + log.warn("Error during cache compute", e); + + return mappingFunction.apply(key); + } + } + + @Override + public void invalidateAll() { + cache.invalidateAll(); + } +} From 00ce27d0219c4efabceedf96398cba67bae03d26 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 21 Jan 2022 18:54:32 +0100 Subject: [PATCH 05/44] feat(runner-kafka): performance optimization of KafkaFlowListeners store is fetch by scheduler every second, reading fom rocksdb and deserialize all flows, keep a cache in memory --- .../runner/kafka/KafkaFlowListeners.java | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java index b628d5fab00..b9edc5cfb41 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java @@ -1,6 +1,5 @@ package io.kestra.runner.kafka; -import com.google.common.collect.Streams; import io.kestra.runner.kafka.services.*; import lombok.Getter; import lombok.NoArgsConstructor; @@ -13,7 +12,6 @@ import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.errors.InvalidStateStoreException; import org.apache.kafka.streams.kstream.*; -import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.QueryableStoreTypes; import org.apache.kafka.streams.state.ReadOnlyKeyValueStore; @@ -40,6 +38,7 @@ public class KafkaFlowListeners implements FlowListenersInterface { private SafeKeyValueStore store; private final List>> consumers = new ArrayList<>(); private final KafkaStreamService.Stream stream; + private List flows; @Inject public KafkaFlowListeners( @@ -72,6 +71,9 @@ public KafkaFlowListeners( log.warn(e.getMessage(), e); } } else { + synchronized (this) { + flows = null; + } this.send(new ArrayList<>()); } }); @@ -180,6 +182,10 @@ public Topology topology() { .filter((key, value) -> value != null) .toStream() .peek((key, value) -> { + synchronized (this) { + flows = null; + } + send(flows()); }); @@ -199,10 +205,17 @@ public List flows() { return Collections.emptyList(); } - return this.store - .toStream() - .filter(flow -> flow != null && !flow.isDeleted()) - .collect(Collectors.toList()); + synchronized (this) { + if (this.flows == null) { + this.flows = this.store + .toStream() + .filter(flow -> flow != null && !flow.isDeleted()) + .collect(Collectors.toList()); + } + } + + //noinspection ConstantConditions + return this.flows == null ? new ArrayList<>() : this.flows; } private void send(List flows) { From 5675a3fcfc486ebdb64ee82b589f0c133e31fb1c Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 21 Jan 2022 19:08:35 +0100 Subject: [PATCH 06/44] fix(core): provide default value for variable configuration mostly for unit test --- cli/src/main/resources/application.yml | 4 ++-- .../kestra/core/runners/VariableRenderer.java | 22 +++++++++++++------ .../core/runners/pebble/PebbleLruCache.java | 5 ++--- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/cli/src/main/resources/application.yml b/cli/src/main/resources/application.yml index e792618eac3..f11d3e73e73 100644 --- a/cli/src/main/resources/application.yml +++ b/cli/src/main/resources/application.yml @@ -233,8 +233,8 @@ kestra: env-vars-prefix: KESTRA_ globals: {} disable-handlebars: true - cacheEnabled: true - cacheSize: 1000 + cache-enabled: true + cache-size: 1000 metrics: prefix: kestra diff --git a/core/src/main/java/io/kestra/core/runners/VariableRenderer.java b/core/src/main/java/io/kestra/core/runners/VariableRenderer.java index a74184f2b3c..873a23abf4b 100644 --- a/core/src/main/java/io/kestra/core/runners/VariableRenderer.java +++ b/core/src/main/java/io/kestra/core/runners/VariableRenderer.java @@ -23,6 +23,8 @@ import java.io.StringWriter; import java.io.Writer; import java.util.*; + +import io.micronaut.core.annotation.Nullable; import jakarta.inject.Inject; import jakarta.inject.Singleton; import lombok.Getter; @@ -35,10 +37,10 @@ public class VariableRenderer { @SuppressWarnings("unchecked") @Inject - public VariableRenderer(ApplicationContext applicationContext, VariableConfiguration variableConfiguration) { - this.variableConfiguration = variableConfiguration; + public VariableRenderer(ApplicationContext applicationContext, @Nullable VariableConfiguration variableConfiguration) { + this.variableConfiguration = variableConfiguration != null ? variableConfiguration : new VariableConfiguration(); - if (!variableConfiguration.getDisableHandlebars()) { + if (!this.variableConfiguration.getDisableHandlebars()) { this.handlebars = new Handlebars() .with(EscapingStrategy.NOOP) .registerHelpers(ConditionalHelpers.class) @@ -71,7 +73,7 @@ public VariableRenderer(ApplicationContext applicationContext, VariableConfigura PebbleEngine.Builder pebbleBuilder = new PebbleEngine.Builder() .registerExtensionCustomizer(ExtensionCustomizer::new) .strictVariables(true) - .cacheActive(variableConfiguration.getCacheEnabled()) + .cacheActive(this.variableConfiguration.getCacheEnabled()) .newLineTrimming(false) .autoEscaping(false); @@ -79,8 +81,8 @@ public VariableRenderer(ApplicationContext applicationContext, VariableConfigura applicationContext.getBeansOfType(AbstractExtension.class) .forEach(pebbleBuilder::extension); - if (variableConfiguration.getCacheEnabled()) { - pebbleBuilder.templateCache(new PebbleLruCache(variableConfiguration.getCacheSize())); + if (this.variableConfiguration.getCacheEnabled()) { + pebbleBuilder.templateCache(new PebbleLruCache(this.variableConfiguration.getCacheSize())); } pebbleEngine = pebbleBuilder.build(); @@ -103,7 +105,7 @@ public String recursiveRender(String inline, Map variables) thro compiledTemplate.evaluate(writer, variables); current = writer.toString(); } catch (IOException | PebbleException e) { - if (variableConfiguration.disableHandlebars) { + if (this.variableConfiguration.disableHandlebars) { if (e instanceof PebbleException) { throw properPebbleException((PebbleException) e); } @@ -186,6 +188,12 @@ public List render(List list, Map variables) thr @Getter @ConfigurationProperties("kestra.variables") public static class VariableConfiguration { + public VariableConfiguration() { + this.disableHandlebars = true; + this.cacheEnabled = true; + this.cacheSize = 1000; + } + Boolean disableHandlebars; Boolean cacheEnabled; Integer cacheSize; diff --git a/core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java b/core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java index ea6104d9593..ce629fb7852 100644 --- a/core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java +++ b/core/src/main/java/io/kestra/core/runners/pebble/PebbleLruCache.java @@ -24,9 +24,8 @@ public PebbleLruCache(int maximumSize) { public PebbleTemplate computeIfAbsent(Object key, Function mappingFunction) { try { return cache.get(key, () -> mappingFunction.apply(key)); - } catch (ExecutionException e) { - log.warn("Error during cache compute", e); - + } catch (Exception e) { + // we retry the mapping function in order to let the exception be thrown instead of being capture by cache return mappingFunction.apply(key); } } From 0b0e8def0f2c7a104723d3386ba8a319b609ba1a Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Sun, 23 Jan 2022 22:38:13 +0100 Subject: [PATCH 07/44] fix(core): state file must be named to be easily use on different task from the same flow --- .../io/kestra/core/runners/RunContext.java | 25 +++++----------- .../core/storages/StorageInterface.java | 29 ++++++++++++++++++- .../controllers/ExecutionController.java | 26 ++++++++++------- 3 files changed, 52 insertions(+), 28 deletions(-) diff --git a/core/src/main/java/io/kestra/core/runners/RunContext.java b/core/src/main/java/io/kestra/core/runners/RunContext.java index f2a744e6452..33eb96826d1 100644 --- a/core/src/main/java/io/kestra/core/runners/RunContext.java +++ b/core/src/main/java/io/kestra/core/runners/RunContext.java @@ -390,37 +390,28 @@ private URI putTempFile(File file, String prefix, String name) throws IOExceptio } @SuppressWarnings("unchecked") - private String taskStateFilePathPrefix(Task task) { + private String taskStateFilePathPrefix(String name) { Map taskrun = (Map) this.getVariables().get("taskrun"); - List paths = new ArrayList<>(Arrays.asList( - "tasks", + return "/" + this.storageInterface.statePrefix( ((Map) this.getVariables().get("flow")).get("namespace"), ((Map) this.getVariables().get("flow")).get("id"), - task.getId() - )); - - if (taskrun.containsKey("value")) { - paths.add(taskrun.get("value")); - } - - return "/" + String.join( - "/", - paths + name, + taskrun.getOrDefault("value", null) ); } - public InputStream getTaskStateFile(Task task, String name) throws IOException { - URI uri = URI.create(this.taskStateFilePathPrefix(task)); + public InputStream getTaskStateFile(String state, String name) throws IOException { + URI uri = URI.create(this.taskStateFilePathPrefix(state)); URI resolve = uri.resolve(uri.getPath() + "/" + name); return this.storageInterface.get(resolve); } - public URI putTaskStateFile(File file, Task task, String name) throws IOException { + public URI putTaskStateFile(File file, String state, String name) throws IOException { return this.putTempFile( file, - this.taskStateFilePathPrefix(task), + this.taskStateFilePathPrefix(state), name ); } diff --git a/core/src/main/java/io/kestra/core/storages/StorageInterface.java b/core/src/main/java/io/kestra/core/storages/StorageInterface.java index 5ef5998b86c..f06fa98ca6e 100644 --- a/core/src/main/java/io/kestra/core/storages/StorageInterface.java +++ b/core/src/main/java/io/kestra/core/storages/StorageInterface.java @@ -1,16 +1,20 @@ package io.kestra.core.storages; -import io.micronaut.core.annotation.Introspected; +import com.google.common.base.Charsets; +import com.google.common.hash.Hashing; import io.kestra.core.models.executions.Execution; import io.kestra.core.models.executions.TaskRun; import io.kestra.core.models.flows.Flow; import io.kestra.core.models.flows.Input; import io.kestra.core.models.tasks.Task; import io.kestra.core.utils.Slugify; +import io.micronaut.core.annotation.Introspected; +import io.micronaut.core.annotation.Nullable; import java.io.*; import java.net.URI; import java.net.URISyntaxException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Optional; @@ -53,6 +57,29 @@ default String executionPrefix(TaskRun taskRun) { ); } + @SuppressWarnings("UnstableApiUsage") + default String statePrefix(String namespace, String flowId, @Nullable String name, @Nullable String value) { + ArrayList paths = new ArrayList<>(List.of( + namespace.replace(".", "/"), + Slugify.of(flowId), + "states" + )); + + if (name != null) { + paths.add(name); + } + + if (value != null) { + paths.add(Hashing + .goodFastHash(64) + .hashString(value, Charsets.UTF_8) + .toString() + ); + } + + return String.join("/", paths); + } + default Optional extractExecutionId(URI path) { Pattern pattern = Pattern.compile("^/(.+)/executions/([^/]+)/", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(path.getPath()); diff --git a/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java b/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java index 087954c014c..644528ccef1 100644 --- a/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java +++ b/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java @@ -322,21 +322,27 @@ protected HttpResponse validateFile(String executionId, URI path, String throw new NoSuchElementException("Unable to find flow id '" + executionId + "'"); } - // maybe redirect to correct execution String prefix = storageInterface.executionPrefix(flow.get(), execution.get()); - if (!path.getPath().substring(1).startsWith(prefix)) { - Optional redirectedExecution = storageInterface.extractExecutionId(path); + if (path.getPath().substring(1).startsWith(prefix)) { + return null; + } - if (redirectedExecution.isPresent()) { - return HttpResponse.redirect(URI.create((basePath != null? basePath : "") + - redirect.replace("{executionId}", redirectedExecution.get())) - ); - } + // maybe state + prefix = storageInterface.statePrefix(flow.get().getNamespace(), flow.get().getId(), null, null); + if (path.getPath().substring(1).startsWith(prefix)) { + return null; + } - throw new IllegalArgumentException("Invalid prefix path"); + // maybe redirect to correct execution + Optional redirectedExecution = storageInterface.extractExecutionId(path); + + if (redirectedExecution.isPresent()) { + return HttpResponse.redirect(URI.create((basePath != null? basePath : "") + + redirect.replace("{executionId}", redirectedExecution.get())) + ); } - return null; + throw new IllegalArgumentException("Invalid prefix path"); } /** * Download file binary from uri parameter From 8673accad6adeef3904939d1dd3003a163f58269 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Wed, 26 Jan 2022 21:43:38 +0100 Subject: [PATCH 08/44] feat(core): add a retry on storage interface --- cli/src/main/resources/application.yml | 6 + .../io/kestra/core/annotations/Retryable.java | 68 ++++++++++ .../io/kestra/core/listeners/RetryEvents.java | 4 +- .../io/kestra/core/runners/RunContext.java | 2 +- .../core/services/ConditionService.java | 3 +- .../core/storages/StorageInterface.java | 14 +- .../intercept/OverrideRetryInterceptor.java | 123 ++++++++++++++++++ .../OverrideRetryInterceptorTest.java | 50 +++++++ .../io/kestra/storage/local/LocalStorage.java | 6 +- ui/src/styles/layout/bootstrap.scss | 4 + .../controllers/ExecutionController.java | 2 +- 11 files changed, 271 insertions(+), 11 deletions(-) create mode 100644 core/src/main/java/io/kestra/core/annotations/Retryable.java create mode 100644 core/src/main/java/io/micronaut/retry/intercept/OverrideRetryInterceptor.java create mode 100644 core/src/test/java/io/micronaut/retry/intercept/OverrideRetryInterceptorTest.java diff --git a/cli/src/main/resources/application.yml b/cli/src/main/resources/application.yml index f11d3e73e73..9628acc4efd 100644 --- a/cli/src/main/resources/application.yml +++ b/cli/src/main/resources/application.yml @@ -39,6 +39,12 @@ endpoints: write-sensitive: false kestra: + retries: + attempts: 5 + multiplier: 2.0 + delay: 1s + maxDelay: "" + kafka: defaults: topic-prefix: "kestra_" diff --git a/core/src/main/java/io/kestra/core/annotations/Retryable.java b/core/src/main/java/io/kestra/core/annotations/Retryable.java new file mode 100644 index 00000000000..be52890d786 --- /dev/null +++ b/core/src/main/java/io/kestra/core/annotations/Retryable.java @@ -0,0 +1,68 @@ +package io.kestra.core.annotations; + +import io.micronaut.aop.Around; +import io.micronaut.context.annotation.AliasFor; +import io.micronaut.context.annotation.Type; +import io.micronaut.retry.annotation.DefaultRetryPredicate; +import io.micronaut.retry.annotation.RetryPredicate; +import io.micronaut.retry.intercept.OverrideRetryInterceptor; + +import java.lang.annotation.*; + +import javax.validation.constraints.Digits; + +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +@Inherited +@Documented +@Retention(RUNTIME) +@Target({ElementType.METHOD, ElementType.TYPE, ElementType.ANNOTATION_TYPE}) +@Around +@Type(OverrideRetryInterceptor.class) +public @interface Retryable { + int MAX_INTEGRAL_DIGITS = 4; + + /** + * @return The exception types to include (defaults to all) + */ + Class[] value() default {}; + + /** + * @return The exception types to include (defaults to all) + */ + @AliasFor(member = "value") + Class[] includes() default {}; + + /** + * @return The exception types to exclude (defaults to none) + */ + Class[] excludes() default {}; + + /** + * @return The maximum number of retry attempts + */ + @Digits(integer = MAX_INTEGRAL_DIGITS, fraction = 0) + String attempts() default "${kestra.retries.attempts:5}"; + + /** + * @return The delay between retry attempts + */ + String delay() default "${kestra.retries.delay:1s}"; + + /** + * @return The maximum overall delay + */ + String maxDelay() default "${kestra.retries.max-delay:}"; + + /** + * @return The multiplier to use to calculate the delay + */ + @Digits(integer = 2, fraction = 2) + String multiplier() default "${kestra.retries.multiplier:2.0}"; + + /** + * @return The retry predicate class to use instead of {@link io.micronaut.retry.annotation.Retryable#includes} and {@link io.micronaut.retry.annotation.Retryable#excludes} + * (defaults to none) + */ + Class predicate() default DefaultRetryPredicate.class; +} \ No newline at end of file diff --git a/core/src/main/java/io/kestra/core/listeners/RetryEvents.java b/core/src/main/java/io/kestra/core/listeners/RetryEvents.java index 4ab22224e9c..bb957f2b2b1 100644 --- a/core/src/main/java/io/kestra/core/listeners/RetryEvents.java +++ b/core/src/main/java/io/kestra/core/listeners/RetryEvents.java @@ -13,8 +13,8 @@ public class RetryEvents { void onRetry(final RetryEvent event) { log.info( "Retry from '{}.{}()', attempt {}, overallDelay {}", - event.getSource().getExecutableMethod().getClass().getName(), - event.getSource().getName(), + event.getSource().getTarget().getClass().getName(), + event.getSource().getExecutableMethod().getName(), event.getRetryState().currentAttempt(), event.getRetryState().getOverallDelay() ); diff --git a/core/src/main/java/io/kestra/core/runners/RunContext.java b/core/src/main/java/io/kestra/core/runners/RunContext.java index 33eb96826d1..2f813c96eee 100644 --- a/core/src/main/java/io/kestra/core/runners/RunContext.java +++ b/core/src/main/java/io/kestra/core/runners/RunContext.java @@ -340,7 +340,7 @@ public org.slf4j.Logger logger() { return runContextLogger.logger(); } - public InputStream uriToInputStream(URI uri) throws FileNotFoundException { + public InputStream uriToInputStream(URI uri) throws IOException { if (uri.getScheme().equals("kestra")) { return this.storageInterface.get(uri); } diff --git a/core/src/main/java/io/kestra/core/services/ConditionService.java b/core/src/main/java/io/kestra/core/services/ConditionService.java index 37d7aea0921..7a5116b5e3d 100644 --- a/core/src/main/java/io/kestra/core/services/ConditionService.java +++ b/core/src/main/java/io/kestra/core/services/ConditionService.java @@ -18,10 +18,12 @@ import java.util.List; import java.util.stream.Collectors; import jakarta.inject.Inject; +import jakarta.inject.Singleton; /** * Provides business logic to manipulate {@link Condition} */ +@Singleton public class ConditionService { @Inject private RunContextFactory runContextFactory; @@ -97,7 +99,6 @@ public ConditionContext conditionContext(RunContext runContext, Flow flow, @Null .build(); } - public ConditionContext conditionContext(RunContext runContext, Flow flow, @Nullable Execution execution) { return this.conditionContext(runContext, flow, execution, null); } diff --git a/core/src/main/java/io/kestra/core/storages/StorageInterface.java b/core/src/main/java/io/kestra/core/storages/StorageInterface.java index f06fa98ca6e..1e4dfc977a1 100644 --- a/core/src/main/java/io/kestra/core/storages/StorageInterface.java +++ b/core/src/main/java/io/kestra/core/storages/StorageInterface.java @@ -2,6 +2,7 @@ import com.google.common.base.Charsets; import com.google.common.hash.Hashing; +import io.kestra.core.annotations.Retryable; import io.kestra.core.models.executions.Execution; import io.kestra.core.models.executions.TaskRun; import io.kestra.core.models.flows.Flow; @@ -23,14 +24,19 @@ @Introspected public interface StorageInterface { - InputStream get(URI uri) throws FileNotFoundException; + @Retryable(includes = {IOException.class}, excludes = {FileNotFoundException.class}) + InputStream get(URI uri) throws IOException; - Long size(URI uri) throws FileNotFoundException; + @Retryable(includes = {IOException.class}, excludes = {FileNotFoundException.class}) + Long size(URI uri) throws IOException; + @Retryable(includes = {IOException.class}) URI put(URI uri, InputStream data) throws IOException; + @Retryable(includes = {IOException.class}) boolean delete(URI uri) throws IOException; + @Retryable(includes = {IOException.class}) List deleteByPrefix(URI storagePrefix) throws IOException; default String executionPrefix(Flow flow, Execution execution) { @@ -91,7 +97,7 @@ default Optional extractExecutionId(URI path) { return Optional.of(matcher.group(2)); } - default URI uri(Flow flow, Execution execution, String inputName, String file) throws URISyntaxException { + default URI uri(Flow flow, Execution execution, String inputName, String file) throws URISyntaxException { return new URI("/" + String.join( "/", Arrays.asList( @@ -103,6 +109,7 @@ default URI uri(Flow flow, Execution execution, String inputName, String file) t )); } + @Retryable(includes = {IOException.class}) default URI from(Flow flow, Execution execution, String input, File file) throws IOException { try { return this.put( @@ -114,6 +121,7 @@ default URI from(Flow flow, Execution execution, String input, File file) throws } } + @Retryable(includes = {IOException.class}) default URI from(Flow flow, Execution execution, Input input, File file) throws IOException { return this.from(flow, execution, input.getName(), file); } diff --git a/core/src/main/java/io/micronaut/retry/intercept/OverrideRetryInterceptor.java b/core/src/main/java/io/micronaut/retry/intercept/OverrideRetryInterceptor.java new file mode 100644 index 00000000000..931396167e6 --- /dev/null +++ b/core/src/main/java/io/micronaut/retry/intercept/OverrideRetryInterceptor.java @@ -0,0 +1,123 @@ +package io.micronaut.retry.intercept; + +import io.kestra.core.annotations.Retryable; +import io.micronaut.aop.InterceptPhase; +import io.micronaut.aop.InterceptedMethod; +import io.micronaut.aop.MethodInterceptor; +import io.micronaut.aop.MethodInvocationContext; +import io.micronaut.context.event.ApplicationEventPublisher; +import io.micronaut.core.annotation.AnnotationValue; +import io.micronaut.core.annotation.Nullable; +import io.micronaut.core.convert.value.MutableConvertibleValues; +import io.micronaut.retry.RetryState; +import io.micronaut.retry.annotation.DefaultRetryPredicate; +import io.micronaut.retry.event.RetryEvent; +import jakarta.inject.Singleton; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +/** + * Replace {@link DefaultRetryInterceptor} only to catch Throwable + */ +@Singleton +public class OverrideRetryInterceptor implements MethodInterceptor { + private static final Logger LOG = LoggerFactory.getLogger(OverrideRetryInterceptor.class); + private final ApplicationEventPublisher eventPublisher; + + public OverrideRetryInterceptor(ApplicationEventPublisher eventPublisher) { + this.eventPublisher = eventPublisher; + } + + @Override + public int getOrder() { + return InterceptPhase.RETRY.getPosition(); + } + + @Nullable + @Override + public Object intercept(MethodInvocationContext context) { + Optional> opt = context.findAnnotation(Retryable.class); + if (opt.isEmpty()) { + return context.proceed(); + } + + AnnotationValue retry = opt.get(); + + MutableRetryState retryState = new SimpleRetry( + retry.get("attempts", Integer.class).orElse(5), + retry.get("multiplier", Double.class).orElse(2D), + retry.get("delay", Duration.class).orElse(Duration.ofSeconds(1)), + retry.get("maxDelay", Duration.class).orElse(null), + new DefaultRetryPredicate(resolveIncludes(retry, "includes"), resolveIncludes(retry, "excludes")) + ); + + MutableConvertibleValues attrs = context.getAttributes(); + attrs.put(RetryState.class.getName(), retry); + + InterceptedMethod interceptedMethod = InterceptedMethod.of(context); + try { + retryState.open(); + + Object result = retrySync(context, retryState, interceptedMethod); + switch (interceptedMethod.resultType()) { + case SYNCHRONOUS: + retryState.close(null); + return result; + default: + return interceptedMethod.unsupported(); + } + } catch (Exception e) { + return interceptedMethod.handleException(e); + } + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static List> resolveIncludes(AnnotationValue retry, String includes) { + Class[] values = retry.classValues(includes); + return (List) Collections.unmodifiableList(Arrays.asList(values)); + } + + private Object retrySync(MethodInvocationContext context, MutableRetryState retryState, InterceptedMethod interceptedMethod) { + boolean firstCall = true; + while (true) { + try { + if (firstCall) { + firstCall = false; + return interceptedMethod.interceptResult(); + } + return interceptedMethod.interceptResult(this); + } catch (Throwable e) { + if (!retryState.canRetry(e)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Cannot retry anymore. Rethrowing original exception for method: {}", context); + } + retryState.close(e); + throw e; + } else { + long delayMillis = retryState.nextDelay(); + try { + if (eventPublisher != null) { + try { + eventPublisher.publishEvent(new RetryEvent(context, retryState, e)); + } catch (Exception e1) { + LOG.error("Error occurred publishing RetryEvent: " + e1.getMessage(), e1); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Retrying execution for method [{}] after delay of {}ms for exception: {}", context, delayMillis, e.getMessage()); + } + Thread.sleep(delayMillis); + } catch (InterruptedException e1) { + throw e; + } + } + } + } + } +} diff --git a/core/src/test/java/io/micronaut/retry/intercept/OverrideRetryInterceptorTest.java b/core/src/test/java/io/micronaut/retry/intercept/OverrideRetryInterceptorTest.java new file mode 100644 index 00000000000..e67f9816ee5 --- /dev/null +++ b/core/src/test/java/io/micronaut/retry/intercept/OverrideRetryInterceptorTest.java @@ -0,0 +1,50 @@ +package io.micronaut.retry.intercept; + +import io.kestra.core.annotations.Retryable; +import io.micronaut.retry.event.RetryEvent; +import io.micronaut.runtime.event.annotation.EventListener; +import io.micronaut.test.extensions.junit5.annotation.MicronautTest; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import lombok.extern.slf4j.Slf4j; +import org.junit.jupiter.api.Test; + +import java.nio.channels.AlreadyBoundException; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@MicronautTest +class OverrideRetryInterceptorTest { + @Inject + RetryEvents retryEvents; + + @Inject + TestRetry retry; + + @Test + void test() { + assertThrows(AlreadyBoundException.class, retry::failedMethod); + + assertThat(retryEvents.count, is(5)); + } + + @Singleton + public static class TestRetry { + @Retryable(delay = "1s", multiplier = "2.0") + public String failedMethod() { + throw new AlreadyBoundException(); + } + } + + @Singleton + @Slf4j + public static class RetryEvents { + public int count = 0; + @EventListener + void onRetry(final RetryEvent event) { + this.count++; + } + } +} \ No newline at end of file diff --git a/storage-local/src/main/java/io/kestra/storage/local/LocalStorage.java b/storage-local/src/main/java/io/kestra/storage/local/LocalStorage.java index fd5e1093369..c69a4887a3e 100644 --- a/storage-local/src/main/java/io/kestra/storage/local/LocalStorage.java +++ b/storage-local/src/main/java/io/kestra/storage/local/LocalStorage.java @@ -49,7 +49,7 @@ private void createDirectory(URI append) { } @Override - public InputStream get(URI uri) throws FileNotFoundException { + public InputStream get(URI uri) throws IOException { return new BufferedInputStream(new FileInputStream(getPath(URI.create(uri.getPath())) .toAbsolutePath() .toString()) @@ -57,13 +57,13 @@ public InputStream get(URI uri) throws FileNotFoundException { } @Override - public Long size(URI uri) throws FileNotFoundException { + public Long size(URI uri) throws IOException { try { return Files.size(getPath(URI.create(uri.getPath()))); } catch (NoSuchFileException e) { throw new FileNotFoundException("Unable to find file at '" + uri + "'"); } catch (IOException e) { - throw new FileNotFoundException("Unable to find file at '" + uri + "' with message '" + e.getMessage() + "'"); + throw new IOException("Unable to find file at '" + uri + "' with message '" + e.getMessage() + "'"); } } diff --git a/ui/src/styles/layout/bootstrap.scss b/ui/src/styles/layout/bootstrap.scss index 6e719a29ecb..a757d4fff9d 100644 --- a/ui/src/styles/layout/bootstrap.scss +++ b/ui/src/styles/layout/bootstrap.scss @@ -242,6 +242,10 @@ table { border-radius: 0; } } + + td { + word-break: break-all; + } } .table { diff --git a/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java b/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java index 644528ccef1..d78c4c74626 100644 --- a/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java +++ b/webserver/src/main/java/io/kestra/webserver/controllers/ExecutionController.java @@ -378,7 +378,7 @@ public HttpResponse file( public HttpResponse filesize( String executionId, @QueryValue(value = "path") URI path - ) throws FileNotFoundException { + ) throws IOException { HttpResponse httpResponse =this.validateFile(executionId, path, "/api/v1/executions/{executionId}/file/metas?path=" + path); if (httpResponse != null) { return httpResponse; From 95d2b2d6ad259a149c0154385a67209309d2a1c0 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 28 Jan 2022 10:27:49 +0100 Subject: [PATCH 09/44] feat(kafka-runner): create multiple stream thread to separate load (#452) --- .../cli/commands/servers/ExecutorCommand.java | 8 +- .../commands/servers/StandAloneCommand.java | 9 +- cli/src/main/resources/application.yml | 2 +- .../core/models/executions/Execution.java | 4 +- .../core/runners/ExecutorInterface.java | 7 + ...ractExecutor.java => ExecutorService.java} | 45 +- .../core/runners/MemoryFlowExecutor.java | 2 +- .../kestra/core/runners/StandAloneRunner.java | 6 +- .../scripts/runners/DockerScriptRunner.java | 4 +- .../core/runners/FlowTriggerCaseTest.java | 11 +- .../io/kestra/runner/kafka/KafkaExecutor.java | 887 +----------------- .../kafka/executors/ExecutorFlowTrigger.java | 98 ++ .../runner/kafka/executors/ExecutorMain.java | 700 ++++++++++++++ .../executors/ExecutorWorkerRunning.java | 140 +++ .../executors/KafkaExecutorInterface.java | 15 + .../kafka/services/KafkaStreamService.java | 4 + .../streams/ExecutorNextTransformer.java | 15 +- .../streams/WorkerInstanceTransformer.java | 7 +- .../runner/kafka/KafkaExecutorTest.java | 137 ++- .../kestra/runner/kafka/KafkaRunnerTest.java | 2 +- .../src/test/resources/application.yml | 2 +- .../kestra/runner/memory/MemoryExecutor.java | 95 +- 22 files changed, 1227 insertions(+), 973 deletions(-) create mode 100644 core/src/main/java/io/kestra/core/runners/ExecutorInterface.java rename core/src/main/java/io/kestra/core/runners/{AbstractExecutor.java => ExecutorService.java} (95%) create mode 100644 runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowTrigger.java create mode 100644 runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorMain.java create mode 100644 runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java create mode 100644 runner-kafka/src/main/java/io/kestra/runner/kafka/executors/KafkaExecutorInterface.java diff --git a/cli/src/main/java/io/kestra/cli/commands/servers/ExecutorCommand.java b/cli/src/main/java/io/kestra/cli/commands/servers/ExecutorCommand.java index ff0e9e98b56..2ef6f62b019 100644 --- a/cli/src/main/java/io/kestra/cli/commands/servers/ExecutorCommand.java +++ b/cli/src/main/java/io/kestra/cli/commands/servers/ExecutorCommand.java @@ -1,11 +1,11 @@ package io.kestra.cli.commands.servers; import com.google.common.collect.ImmutableMap; +import io.kestra.core.runners.ExecutorInterface; import io.micronaut.context.ApplicationContext; import lombok.extern.slf4j.Slf4j; import io.kestra.cli.AbstractCommand; import io.kestra.core.models.ServerType; -import io.kestra.core.runners.AbstractExecutor; import io.kestra.core.utils.Await; import picocli.CommandLine; @@ -36,12 +36,12 @@ public static Map propertiesOverrides() { public Integer call() throws Exception { super.call(); - AbstractExecutor abstractExecutor = applicationContext.getBean(AbstractExecutor.class); - abstractExecutor.run(); + ExecutorInterface executorService = applicationContext.getBean(ExecutorInterface.class); + executorService.run(); log.info("Executor started"); - this.shutdownHook(abstractExecutor::close); + this.shutdownHook(executorService::close); Await.until(() -> !this.applicationContext.isRunning()); diff --git a/cli/src/main/java/io/kestra/cli/commands/servers/StandAloneCommand.java b/cli/src/main/java/io/kestra/cli/commands/servers/StandAloneCommand.java index dbe267bd4cb..ba199309cad 100644 --- a/cli/src/main/java/io/kestra/cli/commands/servers/StandAloneCommand.java +++ b/cli/src/main/java/io/kestra/cli/commands/servers/StandAloneCommand.java @@ -1,22 +1,19 @@ package io.kestra.cli.commands.servers; import com.google.common.collect.ImmutableMap; -import io.micronaut.context.ApplicationContext; -import lombok.extern.slf4j.Slf4j; import io.kestra.cli.AbstractCommand; import io.kestra.core.models.ServerType; import io.kestra.core.repositories.LocalFlowRepositoryLoader; -import io.kestra.core.runners.AbstractExecutor; import io.kestra.core.runners.StandAloneRunner; import io.kestra.core.utils.Await; -import io.kestra.runner.kafka.KafkaExecutor; +import io.micronaut.context.ApplicationContext; +import jakarta.inject.Inject; +import lombok.extern.slf4j.Slf4j; import picocli.CommandLine; import java.io.File; import java.io.IOException; import java.util.Map; -import java.util.Optional; -import jakarta.inject.Inject; @CommandLine.Command( name = "standalone", diff --git a/cli/src/main/resources/application.yml b/cli/src/main/resources/application.yml index 9628acc4efd..5cb48a5964c 100644 --- a/cli/src/main/resources/application.yml +++ b/cli/src/main/resources/application.yml @@ -111,7 +111,7 @@ kestra: segment.bytes: "10485760" executor: - name: "${kestra.kafka.defaults.topic-prefix}executor-executor-changelog" + name: "${kestra.kafka.defaults.topic-prefix}executor_main-executor-changelog" cls: io.kestra.core.runners.Executor properties: cleanup.policy: "delete,compact" diff --git a/core/src/main/java/io/kestra/core/models/executions/Execution.java b/core/src/main/java/io/kestra/core/models/executions/Execution.java index bc82853a245..a2d76f35df7 100644 --- a/core/src/main/java/io/kestra/core/models/executions/Execution.java +++ b/core/src/main/java/io/kestra/core/models/executions/Execution.java @@ -434,13 +434,13 @@ public boolean hasTaskRunJoinable(TaskRun taskRun) { } /** - * Convert an exception on {@link io.kestra.core.runners.AbstractExecutor} and add log to the current + * Convert an exception on Executor and add log to the current * {@code RUNNING} taskRun, on the lastAttempts. * If no Attempt is found, we create one (must be nominal case). * The executor will catch the {@code FAILED} taskRun emitted and will failed the execution. * In the worst case, we FAILED the execution (only from {@link io.kestra.core.models.triggers.types.Flow}). * - * @param e the exception throw from {@link io.kestra.core.runners.AbstractExecutor} + * @param e the exception throw from Executor * @return a new execution with taskrun failed if possible or execution failed is other case */ public FailedExecutionWithLog failedExecutionFromExecutor(Exception e) { diff --git a/core/src/main/java/io/kestra/core/runners/ExecutorInterface.java b/core/src/main/java/io/kestra/core/runners/ExecutorInterface.java new file mode 100644 index 00000000000..97090f69315 --- /dev/null +++ b/core/src/main/java/io/kestra/core/runners/ExecutorInterface.java @@ -0,0 +1,7 @@ +package io.kestra.core.runners; + +import java.io.Closeable; + +public interface ExecutorInterface extends Closeable, Runnable { + +} diff --git a/core/src/main/java/io/kestra/core/runners/AbstractExecutor.java b/core/src/main/java/io/kestra/core/runners/ExecutorService.java similarity index 95% rename from core/src/main/java/io/kestra/core/runners/AbstractExecutor.java rename to core/src/main/java/io/kestra/core/runners/ExecutorService.java index c7356b1d2b8..eddce20809e 100644 --- a/core/src/main/java/io/kestra/core/runners/AbstractExecutor.java +++ b/core/src/main/java/io/kestra/core/runners/ExecutorService.java @@ -12,34 +12,43 @@ import io.kestra.core.models.tasks.ResolvedTask; import io.kestra.core.models.tasks.Task; import io.kestra.core.services.ConditionService; +import io.micronaut.context.ApplicationContext; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; import lombok.extern.slf4j.Slf4j; import org.slf4j.Logger; -import java.io.Closeable; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; import static io.kestra.core.utils.Rethrow.throwFunction; -import static io.kestra.core.utils.Rethrow.throwPredicate; -public abstract class AbstractExecutor implements Runnable, Closeable { - protected static final Logger log = org.slf4j.LoggerFactory.getLogger(AbstractExecutor.class); +@Singleton +@Slf4j +public class ExecutorService { + @Inject + protected ApplicationContext applicationContext; + @Inject protected RunContextFactory runContextFactory; + + @Inject protected MetricRegistry metricRegistry; + + @Inject protected ConditionService conditionService; + protected FlowExecutorInterface flowExecutorInterface; - public AbstractExecutor( - RunContextFactory runContextFactory, - MetricRegistry metricRegistry, - ConditionService conditionService - ) { - this.runContextFactory = runContextFactory; - this.metricRegistry = metricRegistry; - this.conditionService = conditionService; + protected FlowExecutorInterface flowExecutorInterface() { + // bean is injected late, so we need to wait + if (this.flowExecutorInterface == null) { + this.flowExecutorInterface = applicationContext.getBean(FlowExecutorInterface.class); + } + + return this.flowExecutorInterface; } public Executor process(Executor executor) { @@ -250,7 +259,7 @@ private List saveFlowableOutput( ImmutableMap.of() ); } catch (Exception e) { - log.warn("Unable to save output on taskRun '{}'", taskRun, e); + executor.getFlow().logger().warn("Unable to save output on taskRun '{}'", taskRun, e); } return taskRun; @@ -510,7 +519,7 @@ private Executor handleFlowTask(final Executor executor) { ); try { - Execution execution = flowTask.createExecution(runContext, flowExecutorInterface); + Execution execution = flowTask.createExecution(runContext, flowExecutorInterface()); WorkerTaskExecution workerTaskExecution = WorkerTaskExecution.builder() .task(flowTask) @@ -552,7 +561,7 @@ private Executor handleFlowTask(final Executor executor) { return resultExecutor; } - protected static void log(Logger log, Boolean in, WorkerTask value) { + public void log(Logger log, Boolean in, WorkerTask value) { log.debug( "{} {} : {}", in ? "<< IN " : ">> OUT", @@ -561,7 +570,7 @@ protected static void log(Logger log, Boolean in, WorkerTask value) { ); } - protected static void log(Logger log, Boolean in, WorkerTaskResult value) { + public void log(Logger log, Boolean in, WorkerTaskResult value) { log.debug( "{} {} : {}", in ? "<< IN " : ">> OUT", @@ -570,7 +579,7 @@ protected static void log(Logger log, Boolean in, WorkerTaskResult value) { ); } - protected static void log(Logger log, Boolean in, Execution value) { + public void log(Logger log, Boolean in, Execution value) { log.debug( "{} {} [key='{}']\n{}", in ? "<< IN " : ">> OUT", @@ -580,7 +589,7 @@ protected static void log(Logger log, Boolean in, Execution value) { ); } - protected static void log(Logger log, Boolean in, Executor value) { + public void log(Logger log, Boolean in, Executor value) { log.debug( "{} {} [key='{}', from='{}', offset='{}']\n{}", in ? "<< IN " : ">> OUT", diff --git a/core/src/main/java/io/kestra/core/runners/MemoryFlowExecutor.java b/core/src/main/java/io/kestra/core/runners/MemoryFlowExecutor.java index 38b5d2066b3..595c88c8076 100644 --- a/core/src/main/java/io/kestra/core/runners/MemoryFlowExecutor.java +++ b/core/src/main/java/io/kestra/core/runners/MemoryFlowExecutor.java @@ -6,7 +6,7 @@ import java.util.Optional; public class MemoryFlowExecutor implements FlowExecutorInterface { - private FlowRepositoryInterface flowRepositoryInterface; + private final FlowRepositoryInterface flowRepositoryInterface; public MemoryFlowExecutor(FlowRepositoryInterface flowRepositoryInterface) { this.flowRepositoryInterface = flowRepositoryInterface; diff --git a/core/src/main/java/io/kestra/core/runners/StandAloneRunner.java b/core/src/main/java/io/kestra/core/runners/StandAloneRunner.java index 575aee53594..88484bd7240 100644 --- a/core/src/main/java/io/kestra/core/runners/StandAloneRunner.java +++ b/core/src/main/java/io/kestra/core/runners/StandAloneRunner.java @@ -11,13 +11,13 @@ import java.io.Closeable; import java.io.IOException; -import java.util.concurrent.ExecutorService; + import jakarta.inject.Inject; import jakarta.inject.Named; @Slf4j public class StandAloneRunner implements RunnerInterface, Closeable { - @Setter private ExecutorService poolExecutor; + @Setter private java.util.concurrent.ExecutorService poolExecutor; @Setter protected int workerThread = Math.max(3, Runtime.getRuntime().availableProcessors()); @Setter protected boolean schedulerEnabled = true; @@ -47,7 +47,7 @@ public void run() { poolExecutor = executorsUtils.cachedThreadPool("standalone-runner"); - poolExecutor.execute(applicationContext.getBean(AbstractExecutor.class)); + poolExecutor.execute(applicationContext.getBean(ExecutorInterface.class)); Worker worker = new Worker(applicationContext, workerThread); applicationContext.registerSingleton(worker); diff --git a/core/src/main/java/io/kestra/core/tasks/scripts/runners/DockerScriptRunner.java b/core/src/main/java/io/kestra/core/tasks/scripts/runners/DockerScriptRunner.java index 5a384265a02..865ede5318b 100644 --- a/core/src/main/java/io/kestra/core/tasks/scripts/runners/DockerScriptRunner.java +++ b/core/src/main/java/io/kestra/core/tasks/scripts/runners/DockerScriptRunner.java @@ -23,6 +23,7 @@ import io.micronaut.context.ApplicationContext; import lombok.SneakyThrows; import org.apache.commons.lang3.StringUtils; +import org.apache.hc.core5.http.ConnectionClosedException; import org.slf4j.Logger; import java.io.IOException; @@ -186,7 +187,8 @@ public RunResult run( .maxAttempt(5) .build() ).run( - InternalServerErrorException.class, + (bool, throwable) -> throwable instanceof InternalServerErrorException || + throwable.getCause() instanceof ConnectionClosedException, () -> { pull .withTag(!imageParse.tag.equals("") ? imageParse.tag : "latest") diff --git a/core/src/test/java/io/kestra/core/runners/FlowTriggerCaseTest.java b/core/src/test/java/io/kestra/core/runners/FlowTriggerCaseTest.java index 87fdd7bd6a1..bb2c3a71077 100644 --- a/core/src/test/java/io/kestra/core/runners/FlowTriggerCaseTest.java +++ b/core/src/test/java/io/kestra/core/runners/FlowTriggerCaseTest.java @@ -5,17 +5,17 @@ import io.kestra.core.models.flows.State; import io.kestra.core.queues.QueueFactoryInterface; import io.kestra.core.queues.QueueInterface; +import jakarta.inject.Inject; +import jakarta.inject.Named; +import jakarta.inject.Singleton; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; -import jakarta.inject.Inject; -import jakarta.inject.Named; -import jakarta.inject.Singleton; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.is; @Singleton public class FlowTriggerCaseTest { @@ -49,7 +49,8 @@ public void trigger() throws InterruptedException, TimeoutException { logEntryQueue.receive(logEntry -> { if (logEntry.getMessage().contains("Failed to trigger flow") && - logEntry.getTriggerId().equals("listen-flow-invalid") + logEntry.getTriggerId() != null && + logEntry.getTriggerId().equals("listen-flow-invalid") ) { countDownLatch.countDown(); } diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java index 22f2c0781b5..64902a86f5a 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java @@ -1,855 +1,51 @@ package io.kestra.runner.kafka; -import io.kestra.core.metrics.MetricRegistry; import io.kestra.core.models.executions.Execution; import io.kestra.core.models.executions.ExecutionKilled; import io.kestra.core.models.executions.LogEntry; -import io.kestra.core.models.executions.TaskRun; import io.kestra.core.models.flows.Flow; -import io.kestra.core.models.flows.State; import io.kestra.core.models.templates.Template; import io.kestra.core.models.triggers.Trigger; -import io.kestra.core.models.triggers.multipleflows.MultipleConditionWindow; -import io.kestra.core.queues.QueueService; import io.kestra.core.runners.*; -import io.kestra.core.services.ConditionService; -import io.kestra.core.services.FlowService; -import io.kestra.runner.kafka.serializers.JsonSerde; +import io.kestra.runner.kafka.executors.KafkaExecutorInterface; import io.kestra.runner.kafka.services.KafkaAdminService; import io.kestra.runner.kafka.services.KafkaStreamService; import io.kestra.runner.kafka.services.KafkaStreamSourceService; -import io.kestra.runner.kafka.services.KafkaStreamsBuilder; -import io.kestra.runner.kafka.streams.*; +import io.kestra.runner.kafka.streams.ExecutorFlowTrigger; import io.micronaut.context.ApplicationContext; -import lombok.AllArgsConstructor; -import lombok.Getter; -import lombok.NoArgsConstructor; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.kafka.common.serialization.Serdes; -import org.apache.kafka.common.utils.Bytes; -import org.apache.kafka.streams.*; -import org.apache.kafka.streams.kstream.*; -import org.apache.kafka.streams.state.KeyValueStore; -import org.apache.kafka.streams.state.QueryableStoreTypes; -import org.apache.kafka.streams.state.Stores; -import org.slf4j.event.Level; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.streams.Topology; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.io.Closeable; import java.io.IOException; import java.time.Duration; -import java.util.HashMap; -import java.util.Map; +import java.util.List; import java.util.Properties; import java.util.stream.Collectors; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; @KafkaQueueEnabled @Singleton -public class KafkaExecutor extends AbstractExecutor implements Closeable { - private static final String EXECUTOR_STATE_STORE_NAME = "executor"; - private static final String WORKERTASK_DEDUPLICATION_STATE_STORE_NAME = "workertask_deduplication"; - private static final String TRIGGER_DEDUPLICATION_STATE_STORE_NAME = "trigger_deduplication"; - public static final String TRIGGER_MULTIPLE_STATE_STORE_NAME = "trigger_multiplecondition"; - private static final String NEXTS_DEDUPLICATION_STATE_STORE_NAME = "next_deduplication"; - public static final String WORKER_RUNNING_STATE_STORE_NAME = "worker_running"; - public static final String WORKERINSTANCE_STATE_STORE_NAME = "worker_instance"; - public static final String TOPIC_EXECUTOR_WORKERINSTANCE = "executorworkerinstance"; +@Slf4j +public class KafkaExecutor implements ExecutorInterface { + private List streams; + @Inject protected ApplicationContext applicationContext; - protected KafkaStreamService kafkaStreamService; - protected KafkaAdminService kafkaAdminService; - protected FlowService flowService; - protected KafkaStreamSourceService kafkaStreamSourceService; - protected QueueService queueService; - - protected KafkaStreamService.Stream stream; @Inject - public KafkaExecutor( - ApplicationContext applicationContext, - RunContextFactory runContextFactory, - KafkaStreamService kafkaStreamService, - KafkaAdminService kafkaAdminService, - MetricRegistry metricRegistry, - FlowService flowService, - ConditionService conditionService, - KafkaStreamSourceService kafkaStreamSourceService, - QueueService queueService - ) { - super(runContextFactory, metricRegistry, conditionService); - - this.applicationContext = applicationContext; - this.kafkaStreamService = kafkaStreamService; - this.kafkaAdminService = kafkaAdminService; - this.flowService = flowService; - this.kafkaStreamSourceService = kafkaStreamSourceService; - this.queueService = queueService; - } - - public StreamsBuilder topology() { - StreamsBuilder builder = new KafkaStreamsBuilder(); - - // executor - builder.addStateStore(Stores.keyValueStoreBuilder( - Stores.persistentKeyValueStore(EXECUTOR_STATE_STORE_NAME), - Serdes.String(), - JsonSerde.of(Executor.class) - )); - - // WorkerTask deduplication - builder.addStateStore(Stores.keyValueStoreBuilder( - Stores.persistentKeyValueStore(WORKERTASK_DEDUPLICATION_STATE_STORE_NAME), - Serdes.String(), - Serdes.String() - )); - - // next deduplication - builder.addStateStore(Stores.keyValueStoreBuilder( - Stores.persistentKeyValueStore(NEXTS_DEDUPLICATION_STATE_STORE_NAME), - Serdes.String(), - JsonSerde.of(ExecutorNextTransformer.Store.class) - )); - - // trigger deduplication - builder.addStateStore(Stores.keyValueStoreBuilder( - Stores.persistentKeyValueStore(TRIGGER_DEDUPLICATION_STATE_STORE_NAME), - Serdes.String(), - Serdes.String() - )); - - // declare common stream - KStream workerTaskResultKStream = this.workerTaskResultKStream(builder); - KStream executorKStream = this.executorKStream(builder); - - // join with killed - KStream executionKilledKStream = this.executionKilledKStream(builder); - KStream executionWithKilled = this.joinExecutionKilled(executionKilledKStream, executorKStream); - - // join with WorkerResult - KStream executionKStream = this.joinWorkerResult(workerTaskResultKStream, executionWithKilled); - - // handle state on execution - GlobalKTable flowKTable = kafkaStreamSourceService.flowGlobalKTable(builder); - GlobalKTable templateKTable = kafkaStreamSourceService.templateGlobalKTable(builder); - KStream stream = kafkaStreamSourceService.executorWithFlow(executionKStream, true); - - stream = this.handleExecutor(stream); - - // save execution - this.toExecution(stream); - this.toWorkerTask(stream); - this.toWorkerTaskResult(stream); - - // trigger - builder.addStateStore( - Stores.keyValueStoreBuilder( - Stores.persistentKeyValueStore(TRIGGER_MULTIPLE_STATE_STORE_NAME), - Serdes.String(), - JsonSerde.of(MultipleConditionWindow.class) - ) - ); - - KStream flowWithTriggerStream = this.flowWithTriggerStream(builder); - - this.toExecutorFlowTriggerTopic(stream); - this.handleFlowTrigger(flowWithTriggerStream); - - // task Flow - KTable workerTaskExecutionKTable = this.workerTaskExecutionStream(builder); - - KStream workerTaskExecutionKStream = this.deduplicateWorkerTaskExecution(stream); - this.toWorkerTaskExecution(workerTaskExecutionKStream); - this.workerTaskExecutionToExecution(workerTaskExecutionKStream); - this.handleWorkerTaskExecution(workerTaskExecutionKTable, stream); - - // purge at end - this.purgeExecutor(stream); - - // handle worker running - builder.addGlobalStore( - Stores.keyValueStoreBuilder( - Stores.persistentKeyValueStore(WORKERINSTANCE_STATE_STORE_NAME), - Serdes.String(), - JsonSerde.of(WorkerInstance.class) - ), - kafkaAdminService.getTopicName(TOPIC_EXECUTOR_WORKERINSTANCE), - Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("GlobalStore.ExecutorWorkerInstace"), - () -> new GlobalStateProcessor<>(WORKERINSTANCE_STATE_STORE_NAME) - ); - - GlobalKTable workerTaskRunningKTable = this.workerTaskRunningKStream(builder); - KStream workerInstanceKStream = this.workerInstanceKStream(builder); - - this.purgeWorkerRunning(workerTaskResultKStream); - this.detectNewWorker(workerInstanceKStream, workerTaskRunningKTable); - - return builder; - } - - public KStream executorKStream(StreamsBuilder builder) { - KStream result = builder - .stream( - kafkaAdminService.getTopicName(Execution.class), - Consumed.with(Serdes.String(), JsonSerde.of(Execution.class)).withName("Executor.fromExecution") - ) - .filter((key, value) -> value != null, Named.as("Executor.filterNotNull")) - .transformValues( - () -> new ExecutorFromExecutionTransformer(EXECUTOR_STATE_STORE_NAME), - Named.as("Executor.toExecutor"), - EXECUTOR_STATE_STORE_NAME - ); - - // logs - KafkaStreamSourceService.logIfEnabled( - log, - result, - (key, value) -> log(log, true, value), - "ExecutionIn" - ); - - return result; - } - - private KStream executionKilledKStream(StreamsBuilder builder) { - return builder - .stream( - kafkaAdminService.getTopicName(ExecutionKilled.class), - Consumed.with(Serdes.String(), JsonSerde.of(ExecutionKilled.class)).withName("KTable.ExecutionKilled") - ); - } - - private GlobalKTable workerTaskRunningKStream(StreamsBuilder builder) { - return builder - .globalTable( - kafkaAdminService.getTopicName(WorkerTaskRunning.class), - Consumed.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("GlobalKTable.WorkerTaskRunning"), - Materialized.>as(WORKER_RUNNING_STATE_STORE_NAME) - .withKeySerde(Serdes.String()) - .withValueSerde(JsonSerde.of(WorkerTaskRunning.class)) - ); - } - - private KStream workerInstanceKStream(StreamsBuilder builder) { - return builder - .stream( - kafkaAdminService.getTopicName(WorkerInstance.class), - Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("KStream.WorkerInstance") - ); - } - - private KStream flowWithTriggerStream(StreamsBuilder builder) { - return builder - .stream( - kafkaAdminService.getTopicName(ExecutorFlowTrigger.class), - Consumed.with(Serdes.String(), JsonSerde.of(ExecutorFlowTrigger.class)).withName("KStream.ExecutorFlowTrigger") - ) - .filter((key, value) -> value != null, Named.as("Flowwithtriggerstream.filterNotNull")); - } - - private KStream joinExecutionKilled(KStream executionKilledKStream, KStream executorKStream) { - return executorKStream - .merge( - executionKilledKStream - .transformValues( - () -> new ExecutorKilledJoinerTransformer( - EXECUTOR_STATE_STORE_NAME - ), - Named.as("JoinExecutionKilled.transformValues"), - EXECUTOR_STATE_STORE_NAME - ) - .filter((key, value) -> value != null, Named.as("JoinExecutionKilled.filterNotNull")), - Named.as("JoinExecutionKilled.merge") - ); - } - - private KStream workerTaskResultKStream(StreamsBuilder builder) { - return builder - .stream( - kafkaAdminService.getTopicName(WorkerTaskResult.class), - Consumed.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName("KStream.WorkerTaskResult") - ) - .filter((key, value) -> value != null, Named.as("WorkerTaskResultKStream.filterNotNull")); - } - - private KStream joinWorkerResult(KStream workerTaskResultKstream, KStream executorKStream) { - return executorKStream - .merge( - workerTaskResultKstream - .selectKey((key, value) -> value.getTaskRun().getExecutionId(), Named.as("JoinWorkerResult.selectKey")) - .mapValues( - (key, value) -> new Executor(value), - Named.as("JoinWorkerResult.WorkerTaskResultMap") - ) - .repartition( - Repartitioned.as("workertaskjoined") - .withKeySerde(Serdes.String()) - .withValueSerde(JsonSerde.of(Executor.class)) - ), - Named.as("JoinWorkerResult.merge") - ) - .transformValues( - () -> new ExecutorJoinerTransformer( - EXECUTOR_STATE_STORE_NAME, - this.metricRegistry - ), - Named.as("JoinWorkerResult.transformValues"), - EXECUTOR_STATE_STORE_NAME - ) - .filter( - (key, value) -> value != null, - Named.as("JoinWorkerResult.notNullFilter") - ); - } - - private KStream handleExecutor(KStream stream) { - return stream - .transformValues( - () -> new ExecutorNextTransformer( - NEXTS_DEDUPLICATION_STATE_STORE_NAME, - this - ), - Named.as("HandleExecutor.transformValues"), - NEXTS_DEDUPLICATION_STATE_STORE_NAME - ); - } - - private void purgeExecutor(KStream stream) { - KStream terminatedWithKilled = stream - .filter( - (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), - Named.as("PurgeExecutor.filterTerminated") - ); - - // we don't purge killed execution in order to have feedback about child running tasks - // this can be killed lately (after the executor kill the execution), but we want to keep - // feedback about the actual state (killed or not) - // @TODO: this can lead to infinite state store for most executor topic - KStream terminated = terminatedWithKilled.filter( - (key, value) -> value.getExecution().getState().getCurrent() != State.Type.KILLED, - Named.as("PurgeExecutor.filterKilledExecution") - ); - - // clean up executor - terminated - .mapValues( - (readOnlyKey, value) -> (Execution) null, - Named.as("PurgeExecutor.executionToNull") - ) - .to( - kafkaAdminService.getTopicName(Executor.class), - Produced.with(Serdes.String(), JsonSerde.of(Execution.class)).withName("PurgeExecutor.toExecutor") - ); - - // flatMap taskRun - KStream taskRunKStream = terminated - .filter( - (key, value) -> value.getExecution().getTaskRunList() != null, - Named.as("PurgeExecutor.notNullTaskRunList") - ) - .flatMapValues( - (readOnlyKey, value) -> value.getExecution().getTaskRunList(), - Named.as("PurgeExecutor.flatMapTaskRunList") - ); - - // clean up workerTaskResult - taskRunKStream - .map( - (readOnlyKey, value) -> new KeyValue<>( - value.getId(), - (WorkerTaskResult) null - ), - Named.as("PurgeExecutor.workerTaskResultToNull") - ) - .to( - kafkaAdminService.getTopicName(WorkerTaskResult.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName("PurgeExecutor.toWorkerTaskResult") - ); - - // clean up WorkerTask deduplication state - taskRunKStream - .transformValues( - () -> new DeduplicationPurgeTransformer<>( - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, - (key, value) -> value.getExecutionId() + "-" + value.getId() - ), - Named.as("PurgeExecutor.purgeWorkerTaskDeduplication"), - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME - ); - - taskRunKStream - .transformValues( - () -> new DeduplicationPurgeTransformer<>( - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, - (key, value) -> "WorkerTaskExecution-" + value.getExecutionId() + "-" + value.getId() - ), - Named.as("PurgeExecutor.purgeWorkerTaskExecutionDeduplication"), - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME - ); - - // clean up Execution Nexts deduplication state - terminated - .transformValues( - () -> new DeduplicationPurgeTransformer<>( - NEXTS_DEDUPLICATION_STATE_STORE_NAME, - (key, value) -> value.getExecution().getId() - ), - Named.as("PurgeExecutor.purgeNextsDeduplication"), - NEXTS_DEDUPLICATION_STATE_STORE_NAME - ); - - // clean up killed - terminatedWithKilled - .mapValues( - (readOnlyKey, value) -> (ExecutionKilled) null, - Named.as("PurgeExecutor.executionKilledToNull") - ) - .to( - kafkaAdminService.getTopicName(ExecutionKilled.class), - Produced.with(Serdes.String(), JsonSerde.of(ExecutionKilled.class)).withName("PurgeExecutor.toExecutionKilled") - ); - } - - private void toExecutorFlowTriggerTopic(KStream stream) { - stream - .filter( - (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), - Named.as("HandleExecutorFlowTriggerTopic.filterTerminated") - ) - .transformValues( - () -> new DeduplicationTransformer<>( - "FlowTrigger", - TRIGGER_DEDUPLICATION_STATE_STORE_NAME, - (key, value) -> value.getExecution().getId(), - (key, value) -> value.getExecution().getId() - ), - Named.as("HandleExecutorFlowTriggerTopic.deduplication"), - TRIGGER_DEDUPLICATION_STATE_STORE_NAME - ) - .filter((key, value) -> value != null, Named.as("HandleExecutorFlowTriggerTopic.deduplicationNotNull")) - .flatTransform( - () -> new FlowWithTriggerTransformer( - flowService - ), - Named.as("HandleExecutorFlowTriggerTopic.flatMapToExecutorFlowTrigger") - ) - .to( - kafkaAdminService.getTopicName(ExecutorFlowTrigger.class), - Produced.with(Serdes.String(), JsonSerde.of(ExecutorFlowTrigger.class)).withName("PurgeExecutor.toExecutorFlowTrigger") - ); - } - - private void handleFlowTrigger(KStream stream) { - stream - .transformValues( - () -> new FlowTriggerWithExecutionTransformer( - TRIGGER_MULTIPLE_STATE_STORE_NAME, - flowService - ), - Named.as("HandleFlowTrigger.transformToExecutionList"), - TRIGGER_MULTIPLE_STATE_STORE_NAME - ) - .flatMap( - (key, value) -> value - .stream() - .map(execution -> new KeyValue<>(execution.getId(), execution)) - .collect(Collectors.toList()), - Named.as("HandleFlowTrigger.flapMapToExecution") - ) - .to( - kafkaAdminService.getTopicName(Execution.class), - Produced.with(Serdes.String(), JsonSerde.of(Execution.class)).withName("HandleFlowTrigger.toExecution") - ); - - stream - .mapValues( - (readOnlyKey, value) -> (ExecutorFlowTrigger)null, - Named.as("HandleFlowTrigger.executorFlowTriggerToNull") - ) - .to( - kafkaAdminService.getTopicName(ExecutorFlowTrigger.class), - Produced.with(Serdes.String(), JsonSerde.of(ExecutorFlowTrigger.class)).withName("HandleFlowTrigger.toExecutorFlowTrigger") - ); - } - - private void toWorkerTask(KStream stream) { - // deduplication worker task - KStream dedupWorkerTask = stream - .flatMapValues( - (readOnlyKey, value) -> value.getWorkerTasks(), - Named.as("HandleWorkerTask.flatMapToWorkerTask") - ) - .transformValues( - () -> new DeduplicationTransformer<>( - "WorkerTask", - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, - (key, value) -> value.getTaskRun().getExecutionId() + "-" + value.getTaskRun().getId(), - (key, value) -> value.getTaskRun().getState().getCurrent().name() - ), - Named.as("HandleWorkerTask.deduplication"), - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME - ) - .filter((key, value) -> value != null, Named.as("HandleWorkerTask.notNullFilter")); - - // flowable > running to WorkerTaskResult - KStream resultFlowable = dedupWorkerTask - .filter((key, value) -> value.getTask().isFlowable(), Named.as("HandleWorkerTaskFlowable.filterIsFlowable")) - .mapValues( - (key, value) -> new WorkerTaskResult(value.withTaskRun(value.getTaskRun().withState(State.Type.RUNNING))), - Named.as("HandleWorkerTaskFlowable.toRunning") - ) - .map( - (key, value) -> new KeyValue<>(queueService.key(value), value), - Named.as("HandleWorkerTaskFlowable.mapWithKey") - ) - .selectKey( - (key, value) -> queueService.key(value), - Named.as("HandleWorkerTaskFlowable.selectKey") - ); - - KStream workerTaskResultKStream = KafkaStreamSourceService.logIfEnabled( - log, - resultFlowable, - (key, value) -> log(log, false, value), - "HandleWorkerTaskFlowable" - ); - - workerTaskResultKStream - .to( - kafkaAdminService.getTopicName(WorkerTaskResult.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName("HandleWorkerTaskFlowable.toWorkerTaskResult") - ); - - // not flowable > to WorkerTask - KStream resultNotFlowable = dedupWorkerTask - .filter((key, value) -> !value.getTask().isFlowable(), Named.as("HandleWorkerTaskNotFlowable.filterIsNotFlowable")) - .map((key, value) -> new KeyValue<>(queueService.key(value), value), Named.as("HandleWorkerTaskNotFlowable.mapWithKey")) - .selectKey( - (key, value) -> queueService.key(value), - Named.as("HandleWorkerTaskNotFlowable.selectKey") - ); - - KStream workerTaskKStream = KafkaStreamSourceService.logIfEnabled( - log, - resultNotFlowable, - (key, value) -> log(log, false, value), - "HandleWorkerTaskNotFlowable" - ); - - workerTaskKStream - .to( - kafkaAdminService.getTopicName(WorkerTask.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTask.class)).withName("HandleWorkerTaskNotFlowable.toWorkerTask") - ); - } - - private KTable workerTaskExecutionStream(StreamsBuilder builder) { - return builder - .table( - kafkaAdminService.getTopicName(WorkerTaskExecution.class), - Consumed.with(Serdes.String(), JsonSerde.of(WorkerTaskExecution.class)).withName("WorkerTaskExecution.from"), - Materialized.>as("workertaskexecution") - .withKeySerde(Serdes.String()) - .withValueSerde(JsonSerde.of(WorkerTaskExecution.class)) - ); - } - - private KStream deduplicateWorkerTaskExecution(KStream stream) { - return stream - .flatMapValues( - (readOnlyKey, value) -> value.getWorkerTaskExecutions(), - Named.as("DeduplicateWorkerTaskExecution.flatMap") - ) - .transformValues( - () -> new DeduplicationTransformer<>( - "DeduplicateWorkerTaskExecution", - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, - (key, value) -> "WorkerTaskExecution-" + value.getTaskRun().getExecutionId() + "-" + value.getTaskRun().getId(), - (key, value) -> value.getTaskRun().getState().getCurrent().name() - ), - Named.as("DeduplicateWorkerTaskExecution.deduplication"), - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME - ) - .filter((key, value) -> value != null, Named.as("DeduplicateWorkerTaskExecution.notNullFilter")); - } - - private void toWorkerTaskExecution(KStream stream) { - stream - .selectKey( - (key, value) -> value.getExecution().getId(), - Named.as("ToWorkerTaskExecution.selectKey") - ) - .to( - kafkaAdminService.getTopicName(WorkerTaskExecution.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskExecution.class)).withName("ToWorkerTaskExecution.toWorkerTaskExecution") - ); - } - - private void workerTaskExecutionToExecution(KStream stream) { - stream - .mapValues( - value -> { - String message = "Create new execution for flow '" + - value.getExecution().getNamespace() + "'." + value.getExecution().getFlowId() + - "' with id '" + value.getExecution().getId() + "' from task '" + value.getTask().getId() + - "' and taskrun '" + value.getTaskRun().getId() + - (value.getTaskRun().getValue() != null ? " (" + value.getTaskRun().getValue() + ")" : "") + "'"; - - log.info(message); - LogEntry.LogEntryBuilder logEntryBuilder = LogEntry.of(value.getTaskRun()).toBuilder() - .level(Level.INFO) - .message(message) - .timestamp(value.getTaskRun().getState().getStartDate()) - .thread(Thread.currentThread().getName()); - - return logEntryBuilder.build(); - }, - Named.as("WorkerTaskExecutionToExecution.mapToLog") - ) - .selectKey((key, value) -> (String)null, Named.as("WorkerTaskExecutionToExecution.logRemoveKey")) - .to( - kafkaAdminService.getTopicName(LogEntry.class), - Produced.with(Serdes.String(), JsonSerde.of(LogEntry.class)).withName("WorkerTaskExecutionToExecution.toLogEntry") - ); - - KStream executionKStream = stream - .mapValues( - (key, value) -> value.getExecution(), - Named.as("WorkerTaskExecutionToExecution.map") - ) - .selectKey( - (key, value) -> value.getId(), - Named.as("WorkerTaskExecutionToExecution.selectKey") - ); - - executionKStream = KafkaStreamSourceService.logIfEnabled( - log, - executionKStream, - (key, value) -> log(log, false, value), - "WorkerTaskExecutionToExecution" - ); - - executionKStream - .to( - kafkaAdminService.getTopicName(Execution.class), - Produced.with(Serdes.String(), JsonSerde.of(Execution.class)).withName("WorkerTaskExecutionToExecution.toExecution") - ); - } - - private void handleWorkerTaskExecution(KTable workerTaskExecutionKTable, KStream stream) { - KStream joinKStream = stream - .filter( - (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), - Named.as("HandleWorkerTaskExecution.isTerminated") - ) - .transformValues( - () -> new WorkerTaskExecutionTransformer(runContextFactory, workerTaskExecutionKTable.queryableStoreName()), - Named.as("HandleWorkerTaskExecution.transform"), - workerTaskExecutionKTable.queryableStoreName() - ) - .filter((key, value) -> value != null, Named.as("HandleWorkerTaskExecution.joinNotNullFilter")); - - toWorkerTaskResultSend(joinKStream, "HandleWorkerTaskExecution"); - } - - private void toWorkerTaskResult(KStream stream) { - KStream workerTaskResultKStream = stream - .flatMapValues( - (readOnlyKey, value) -> value.getWorkerTaskResults(), - Named.as("HandleWorkerTaskResult.flapMap") - ); - - toWorkerTaskResultSend(workerTaskResultKStream, "HandleWorkerTaskResult"); - } - - private void toWorkerTaskResultSend(KStream stream, String name) { - KStream workerTaskResultKStream = stream - .transformValues( - () -> new DeduplicationTransformer<>( - name, - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, - (key, value) -> value.getTaskRun().getExecutionId() + "-" + value.getTaskRun().getId(), - (key, value) -> value.getTaskRun().getState().getCurrent().name() - ), - Named.as(name + ".deduplication"), - WORKERTASK_DEDUPLICATION_STATE_STORE_NAME - ) - .filter((key, value) -> value != null, Named.as(name + ".notNullFilter")) - .selectKey( - (key, value) -> value.getTaskRun().getId(), - Named.as(name + ".selectKey") - ); - - KafkaStreamSourceService.logIfEnabled( - log, - workerTaskResultKStream, - (key, value) -> log(log, false, value), - name - ) - .to( - kafkaAdminService.getTopicName(WorkerTaskResult.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName(name + ".toWorkerTaskResult") - ); - } - - private void purgeWorkerRunning(KStream workerTaskResultKStream) { - workerTaskResultKStream - .filter((key, value) -> value.getTaskRun().getState().isTerninated(), Named.as("PurgeWorkerRunning.filterTerminated")) - .mapValues((readOnlyKey, value) -> (WorkerTaskRunning)null, Named.as("PurgeWorkerRunning.toNull")) - .to( - kafkaAdminService.getTopicName(WorkerTaskRunning.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("PurgeWorkerRunning.toWorkerTaskRunning") - ); - } - - private void detectNewWorker(KStream workerInstanceKStream, GlobalKTable workerTaskRunningGlobalKTable) { - workerInstanceKStream - .to( - kafkaAdminService.getTopicName(TOPIC_EXECUTOR_WORKERINSTANCE), - Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorker.toExecutorWorkerInstance") - ); - - KStream stream = workerInstanceKStream - .transformValues( - WorkerInstanceTransformer::new, - Named.as("DetectNewWorker.workerInstanceTransformer") - ) - .flatMapValues((readOnlyKey, value) -> value, Named.as("DetectNewWorker.flapMapList")); - - // we resend the worker task from evicted worker - KStream resultWorkerTask = stream - .flatMapValues( - (readOnlyKey, value) -> value.getWorkerTasksToSend(), - Named.as("DetectNewWorkerTask.flapMapWorkerTaskToSend") - ); - - // and remove from running since already sent - resultWorkerTask - .map((key, value) -> KeyValue.pair(value.getTaskRun().getId(), (WorkerTaskRunning)null), Named.as("DetectNewWorkerTask.workerTaskRunningToNull")) - .to( - kafkaAdminService.getTopicName(WorkerTaskRunning.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("DetectNewWorker.toWorkerTaskRunning") - ); - - KafkaStreamSourceService.logIfEnabled( - log, - resultWorkerTask, - (key, value) -> log.debug( - ">> OUT WorkerTask resend : {}", - value.getTaskRun().toStringState() - ), - "DetectNewWorkerTask" - ) - .to( - kafkaAdminService.getTopicName(WorkerTask.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerTask.class)).withName("DetectNewWorkerTask.toWorkerTask") - ); - - // we resend the WorkerInstance update - KStream updatedStream = KafkaStreamSourceService.logIfEnabled( - log, - stream, - (key, value) -> log.debug( - "Instance updated: {}", - value - ), - "DetectNewWorkerInstance" - ) - .map( - (key, value) -> value.getWorkerInstanceUpdated(), - Named.as("DetectNewWorkerInstance.mapInstance") - ); - - // cleanup executor workerinstance state store - updatedStream - .filter((key, value) -> value != null, Named.as("DetectNewWorkerInstance.filterNotNull")) - .to( - kafkaAdminService.getTopicName(TOPIC_EXECUTOR_WORKERINSTANCE), - Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorkerInstance.toExecutorWorkerInstance") - ); - - updatedStream - .to( - kafkaAdminService.getTopicName(WorkerInstance.class), - Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorkerInstance.toWorkerInstance") - ); - } - - private void toExecution(KStream stream) { - KStream streamFrom = stream - .filter((key, value) -> value.isExecutionUpdated(), Named.as("ToExecution.haveFrom")) - .transformValues( - ExecutorAddHeaderTransformer::new, - Named.as("ToExecution.addHeaders") - ); - - // send execution - KStream executionKStream = streamFrom - .filter((key, value) -> value.getException() == null, Named.as("ToExecutionExecution.notException")); - - toExecutionSend(executionKStream, "ToExecutionExecution"); - - // send exception - KStream> failedStream = streamFrom - .filter((key, value) -> value.getException() != null, Named.as("ToExecutionException.isException")) - .mapValues( - e -> Pair.of(e, e.getExecution().failedExecutionFromExecutor(e.getException())), - Named.as("ToExecutionException.mapToFailedExecutionWithLog") - ); - - failedStream - .flatMapValues(e -> e.getRight().getLogs(), Named.as("ToExecutionException.flatmapLogs")) - .selectKey((key, value) -> (String)null, Named.as("ToExecutionException.removeKey")) - .to( - kafkaAdminService.getTopicName(LogEntry.class), - Produced.with(Serdes.String(), JsonSerde.of(LogEntry.class)).withName("ToExecutionException.toLogEntry") - ); - - KStream executorFailedKStream = failedStream - .mapValues(e -> e.getLeft().withExecution(e.getRight().getExecution(), "failedExecutionFromExecutor"), Named.as("ToExecutionException.mapToExecutor")); - - toExecutionSend(executorFailedKStream, "ToExecutionException"); - } - - private void toExecutionSend(KStream stream, String from) { - stream = KafkaStreamSourceService.logIfEnabled( - log, - stream, - (key, value) -> log(log, false, value), - from - ); - - stream - .transformValues( - () -> new StateStoreTransformer<>(EXECUTOR_STATE_STORE_NAME, Executor::serialize), - Named.as(from + ".store"), - EXECUTOR_STATE_STORE_NAME - ) - .mapValues((readOnlyKey, value) -> value.getExecution(), Named.as(from + ".mapToExecution")) - .to( - kafkaAdminService.getTopicName(Execution.class), - Produced.with(Serdes.String(), JsonSerde.of(Execution.class)).withName(from + ".toExecution") - ); - } + protected KafkaStreamService kafkaStreamService; - @NoArgsConstructor - @Getter - public static class WorkerTaskResultState { - Map results = new HashMap<>(); - } + @Inject + protected KafkaAdminService kafkaAdminService; - @AllArgsConstructor - @Getter - public static class WorkerTaskRunningWithWorkerTaskRunning { - WorkerInstance workerInstance; - WorkerTaskRunning workerTaskRunning; - } + @Inject + protected List kafkaExecutors; - @NoArgsConstructor - @Getter - public static class WorkerTaskRunningState { - Map workerTaskRunnings = new HashMap<>(); - } + @Inject + protected ExecutorService executorService; @Override public void run() { @@ -868,38 +64,33 @@ public void run() { kafkaAdminService.createIfNotExist(Trigger.class); kafkaAdminService.createIfNotExist(ExecutorFlowTrigger.class); - Properties properties = new Properties(); - // build - Topology topology = this.topology().build(); - - if (log.isTraceEnabled()) { - log.trace(topology.describe().toString()); - } + this.streams = this.kafkaExecutors + .stream() + .parallel() + .map(executor -> { + Properties properties = new Properties(); + // build + Topology topology = executor.topology().build(); - stream = kafkaStreamService.of(this.getClass(), this.getClass(), topology, properties, log); - stream.start(); + Logger logger = LoggerFactory.getLogger(executor.getClass()); + KafkaStreamService.Stream stream = kafkaStreamService.of(executor.getClass(), executor.getClass(), topology, properties, logger); + stream.start(); - applicationContext.inject(stream); + executor.onCreated(applicationContext, stream); - applicationContext.registerSingleton(new KafkaTemplateExecutor( - stream.store(StoreQueryParameters.fromNameAndType("template", QueryableStoreTypes.keyValueStore())), - "template" - )); + applicationContext.inject(stream); - this.flowExecutorInterface = new KafkaFlowExecutor( - stream.store(StoreQueryParameters.fromNameAndType("flow", QueryableStoreTypes.keyValueStore())), - "flow", - applicationContext - ); - applicationContext.registerSingleton(this.flowExecutorInterface); + return stream; + }) + .collect(Collectors.toList()); } @Override public void close() throws IOException { - if (this.stream != null) { - this.stream.close(Duration.ofSeconds(10)); - this.stream = null; + if (streams != null) { + streams + .parallelStream() + .forEach(stream -> stream.close(Duration.ofSeconds(10))); } } - } diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowTrigger.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowTrigger.java new file mode 100644 index 00000000000..b9b6e38e534 --- /dev/null +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowTrigger.java @@ -0,0 +1,98 @@ +package io.kestra.runner.kafka.executors; + +import io.kestra.core.models.executions.Execution; +import io.kestra.core.models.triggers.multipleflows.MultipleConditionWindow; +import io.kestra.core.services.FlowService; +import io.kestra.runner.kafka.KafkaQueueEnabled; +import io.kestra.runner.kafka.serializers.JsonSerde; +import io.kestra.runner.kafka.services.KafkaAdminService; +import io.kestra.runner.kafka.services.KafkaStreamSourceService; +import io.kestra.runner.kafka.services.KafkaStreamsBuilder; +import io.kestra.runner.kafka.streams.FlowTriggerWithExecutionTransformer; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.Named; +import org.apache.kafka.streams.kstream.Produced; +import org.apache.kafka.streams.state.Stores; + +import java.util.stream.Collectors; + +@KafkaQueueEnabled +@Singleton +public class ExecutorFlowTrigger implements KafkaExecutorInterface { + public static final String TRIGGER_MULTIPLE_STATE_STORE_NAME = "trigger_multiplecondition"; + + @Inject + private KafkaAdminService kafkaAdminService; + + @Inject + private FlowService flowService; + + @Inject + private KafkaStreamSourceService kafkaStreamSourceService; + + public StreamsBuilder topology() { + StreamsBuilder builder = new KafkaStreamsBuilder(); + + kafkaStreamSourceService.flowGlobalKTable(builder); + + // trigger + builder.addStateStore( + Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore(TRIGGER_MULTIPLE_STATE_STORE_NAME), + Serdes.String(), + JsonSerde.of(MultipleConditionWindow.class) + ) + ); + + KStream stream = builder + .stream( + kafkaAdminService.getTopicName(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class), + Consumed.with(Serdes.String(), JsonSerde.of(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class)) + .withName("KStream.ExecutorFlowTrigger") + ) + .filter((key, value) -> value != null, Named.as("ExecutorFlowTrigger.filterNotNull")); + + stream + .transformValues( + () -> new FlowTriggerWithExecutionTransformer( + TRIGGER_MULTIPLE_STATE_STORE_NAME, + flowService + ), + Named.as("ExecutorFlowTrigger.transformToExecutionList"), + TRIGGER_MULTIPLE_STATE_STORE_NAME + ) + .flatMap( + (key, value) -> value + .stream() + .map(execution -> new KeyValue<>(execution.getId(), execution)) + .collect(Collectors.toList()), + Named.as("ExecutorFlowTrigger.flapMapToExecution") + ) + .to( + kafkaAdminService.getTopicName(Execution.class), + Produced + .with(Serdes.String(), JsonSerde.of(Execution.class)) + .withName("ExecutorFlowTrigger.toExecution") + ); + + stream + .mapValues( + (readOnlyKey, value) -> (io.kestra.runner.kafka.streams.ExecutorFlowTrigger)null, + Named.as("ExecutorFlowTrigger.executorFlowTriggerToNull") + ) + .to( + kafkaAdminService.getTopicName(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class), + Produced + .with(Serdes.String(), JsonSerde.of(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class)) + .withName("ExecutorFlowTrigger.toExecutorFlowTrigger") + ); + + return builder; + } +} diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorMain.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorMain.java new file mode 100644 index 00000000000..a1e21a0bf62 --- /dev/null +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorMain.java @@ -0,0 +1,700 @@ +package io.kestra.runner.kafka.executors; + +import io.kestra.core.metrics.MetricRegistry; +import io.kestra.core.models.executions.Execution; +import io.kestra.core.models.executions.ExecutionKilled; +import io.kestra.core.models.executions.LogEntry; +import io.kestra.core.models.executions.TaskRun; +import io.kestra.core.models.flows.Flow; +import io.kestra.core.models.flows.State; +import io.kestra.core.models.templates.Template; +import io.kestra.core.queues.QueueService; +import io.kestra.core.runners.*; +import io.kestra.core.services.ConditionService; +import io.kestra.core.services.FlowService; +import io.kestra.runner.kafka.KafkaFlowExecutor; +import io.kestra.runner.kafka.KafkaQueueEnabled; +import io.kestra.runner.kafka.KafkaTemplateExecutor; +import io.kestra.runner.kafka.serializers.JsonSerde; +import io.kestra.runner.kafka.services.KafkaAdminService; +import io.kestra.runner.kafka.services.KafkaStreamService; +import io.kestra.runner.kafka.services.KafkaStreamSourceService; +import io.kestra.runner.kafka.services.KafkaStreamsBuilder; +import io.kestra.runner.kafka.streams.*; +import io.micronaut.context.ApplicationContext; +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.streams.*; +import org.apache.kafka.streams.kstream.*; +import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.QueryableStoreTypes; +import org.apache.kafka.streams.state.Stores; +import org.slf4j.event.Level; + +import java.util.HashMap; +import java.util.Map; + +import jakarta.inject.Inject; +import jakarta.inject.Singleton; + +@KafkaQueueEnabled +@Singleton +@Slf4j +public class ExecutorMain implements KafkaExecutorInterface { + private static final String EXECUTOR_STATE_STORE_NAME = "executor"; + private static final String WORKERTASK_DEDUPLICATION_STATE_STORE_NAME = "workertask_deduplication"; + private static final String TRIGGER_DEDUPLICATION_STATE_STORE_NAME = "trigger_deduplication"; + private static final String NEXTS_DEDUPLICATION_STATE_STORE_NAME = "next_deduplication"; + + protected FlowExecutorInterface flowExecutorInterface; + + @Inject + private KafkaAdminService kafkaAdminService; + + @Inject + private FlowService flowService; + + @Inject + private KafkaStreamSourceService kafkaStreamSourceService; + + @Inject + private QueueService queueService; + + @Inject + private MetricRegistry metricRegistry; + + @Inject + private ConditionService conditionService; + + @Inject + private ExecutorService executorService; + + @Inject + private RunContextFactory runContextFactory; + + @Override + public void onCreated(ApplicationContext applicationContext, KafkaStreamService.Stream stream) { + applicationContext.registerSingleton(new KafkaTemplateExecutor( + stream.store(StoreQueryParameters.fromNameAndType("template", QueryableStoreTypes.keyValueStore())), + "template" + )); + + if (flowExecutorInterface == null) { + this.flowExecutorInterface = new KafkaFlowExecutor( + stream.store(StoreQueryParameters.fromNameAndType("flow", QueryableStoreTypes.keyValueStore())), + "flow", + applicationContext + ); + } + + applicationContext.registerSingleton(this.flowExecutorInterface); + } + + public StreamsBuilder topology() { + StreamsBuilder builder = new KafkaStreamsBuilder(); + + // executor + builder.addStateStore(Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore(EXECUTOR_STATE_STORE_NAME), + Serdes.String(), + JsonSerde.of(Executor.class) + )); + + // WorkerTask deduplication + builder.addStateStore(Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore(WORKERTASK_DEDUPLICATION_STATE_STORE_NAME), + Serdes.String(), + Serdes.String() + )); + + // next deduplication + builder.addStateStore(Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore(NEXTS_DEDUPLICATION_STATE_STORE_NAME), + Serdes.String(), + JsonSerde.of(ExecutorNextTransformer.Store.class) + )); + + // trigger deduplication + builder.addStateStore(Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore(TRIGGER_DEDUPLICATION_STATE_STORE_NAME), + Serdes.String(), + Serdes.String() + )); + + // declare common stream + KStream workerTaskResultKStream = this.workerTaskResultKStream(builder); + KStream executorKStream = this.executorKStream(builder); + + // join with killed + KStream executionKilledKStream = this.executionKilledKStream(builder); + KStream executionWithKilled = this.joinExecutionKilled(executionKilledKStream, executorKStream); + + // join with WorkerResult + KStream executionKStream = this.joinWorkerResult(workerTaskResultKStream, executionWithKilled); + + // handle state on execution + GlobalKTable flowKTable = kafkaStreamSourceService.flowGlobalKTable(builder); + GlobalKTable templateKTable = kafkaStreamSourceService.templateGlobalKTable(builder); + KStream stream = kafkaStreamSourceService.executorWithFlow(executionKStream, true); + + stream = this.handleExecutor(stream); + + // save execution + this.toExecution(stream); + this.toWorkerTask(stream); + this.toWorkerTaskResult(stream); + + this.toExecutorFlowTriggerTopic(stream); + + // task Flow + KTable workerTaskExecutionKTable = this.workerTaskExecutionStream(builder); + + KStream workerTaskExecutionKStream = this.deduplicateWorkerTaskExecution(stream); + this.toWorkerTaskExecution(workerTaskExecutionKStream); + this.workerTaskExecutionToExecution(workerTaskExecutionKStream); + this.handleWorkerTaskExecution(workerTaskExecutionKTable, stream); + + // purge at end + this.purgeExecutor(stream); + + this.purgeWorkerRunning(workerTaskResultKStream); + + return builder; + } + + public KStream executorKStream(StreamsBuilder builder) { + KStream result = builder + .stream( + kafkaAdminService.getTopicName(Execution.class), + Consumed.with(Serdes.String(), JsonSerde.of(Execution.class)).withName("Executor.fromExecution") + ) + .filter((key, value) -> value != null, Named.as("Executor.filterNotNull")) + .transformValues( + () -> new ExecutorFromExecutionTransformer(EXECUTOR_STATE_STORE_NAME), + Named.as("Executor.toExecutor"), + EXECUTOR_STATE_STORE_NAME + ); + + // logs + KafkaStreamSourceService.logIfEnabled( + log, + result, + (key, value) -> executorService.log(log, true, value), + "ExecutionIn" + ); + + return result; + } + + private KStream executionKilledKStream(StreamsBuilder builder) { + return builder + .stream( + kafkaAdminService.getTopicName(ExecutionKilled.class), + Consumed.with(Serdes.String(), JsonSerde.of(ExecutionKilled.class)).withName("KTable.ExecutionKilled") + ); + } + + private KStream joinExecutionKilled(KStream executionKilledKStream, KStream executorKStream) { + return executorKStream + .merge( + executionKilledKStream + .transformValues( + () -> new ExecutorKilledJoinerTransformer( + EXECUTOR_STATE_STORE_NAME + ), + Named.as("JoinExecutionKilled.transformValues"), + EXECUTOR_STATE_STORE_NAME + ) + .filter((key, value) -> value != null, Named.as("JoinExecutionKilled.filterNotNull")), + Named.as("JoinExecutionKilled.merge") + ); + } + + private KStream workerTaskResultKStream(StreamsBuilder builder) { + return builder + .stream( + kafkaAdminService.getTopicName(WorkerTaskResult.class), + Consumed.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName("KStream.WorkerTaskResult") + ) + .filter((key, value) -> value != null, Named.as("WorkerTaskResultKStream.filterNotNull")); + } + + private KStream joinWorkerResult(KStream workerTaskResultKstream, KStream executorKStream) { + return executorKStream + .merge( + workerTaskResultKstream + .selectKey((key, value) -> value.getTaskRun().getExecutionId(), Named.as("JoinWorkerResult.selectKey")) + .mapValues( + (key, value) -> new Executor(value), + Named.as("JoinWorkerResult.WorkerTaskResultMap") + ) + .repartition( + Repartitioned.as("workertaskjoined") + .withKeySerde(Serdes.String()) + .withValueSerde(JsonSerde.of(Executor.class)) + ), + Named.as("JoinWorkerResult.merge") + ) + .transformValues( + () -> new ExecutorJoinerTransformer( + EXECUTOR_STATE_STORE_NAME, + this.metricRegistry + ), + Named.as("JoinWorkerResult.transformValues"), + EXECUTOR_STATE_STORE_NAME + ) + .filter( + (key, value) -> value != null, + Named.as("JoinWorkerResult.notNullFilter") + ); + } + + private KStream handleExecutor(KStream stream) { + return stream + .transformValues( + () -> new ExecutorNextTransformer( + NEXTS_DEDUPLICATION_STATE_STORE_NAME, + this.executorService + ), + Named.as("HandleExecutor.transformValues"), + NEXTS_DEDUPLICATION_STATE_STORE_NAME + ); + } + + private void purgeExecutor(KStream stream) { + KStream terminatedWithKilled = stream + .filter( + (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), + Named.as("PurgeExecutor.filterTerminated") + ); + + // we don't purge killed execution in order to have feedback about child running tasks + // this can be killed lately (after the executor kill the execution), but we want to keep + // feedback about the actual state (killed or not) + // @TODO: this can lead to infinite state store for most executor topic + KStream terminated = terminatedWithKilled.filter( + (key, value) -> value.getExecution().getState().getCurrent() != State.Type.KILLED, + Named.as("PurgeExecutor.filterKilledExecution") + ); + + // clean up executor + terminated + .mapValues( + (readOnlyKey, value) -> (Execution) null, + Named.as("PurgeExecutor.executionToNull") + ) + .to( + kafkaAdminService.getTopicName(Executor.class), + Produced.with(Serdes.String(), JsonSerde.of(Execution.class)).withName("PurgeExecutor.toExecutor") + ); + + // flatMap taskRun + KStream taskRunKStream = terminated + .filter( + (key, value) -> value.getExecution().getTaskRunList() != null, + Named.as("PurgeExecutor.notNullTaskRunList") + ) + .flatMapValues( + (readOnlyKey, value) -> value.getExecution().getTaskRunList(), + Named.as("PurgeExecutor.flatMapTaskRunList") + ); + + // clean up workerTaskResult + taskRunKStream + .map( + (readOnlyKey, value) -> new KeyValue<>( + value.getId(), + (WorkerTaskResult) null + ), + Named.as("PurgeExecutor.workerTaskResultToNull") + ) + .to( + kafkaAdminService.getTopicName(WorkerTaskResult.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName("PurgeExecutor.toWorkerTaskResult") + ); + + // clean up WorkerTask deduplication state + taskRunKStream + .transformValues( + () -> new DeduplicationPurgeTransformer<>( + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, + (key, value) -> value.getExecutionId() + "-" + value.getId() + ), + Named.as("PurgeExecutor.purgeWorkerTaskDeduplication"), + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME + ); + + taskRunKStream + .transformValues( + () -> new DeduplicationPurgeTransformer<>( + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, + (key, value) -> "WorkerTaskExecution-" + value.getExecutionId() + "-" + value.getId() + ), + Named.as("PurgeExecutor.purgeWorkerTaskExecutionDeduplication"), + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME + ); + + // clean up Execution Nexts deduplication state + terminated + .transformValues( + () -> new DeduplicationPurgeTransformer<>( + NEXTS_DEDUPLICATION_STATE_STORE_NAME, + (key, value) -> value.getExecution().getId() + ), + Named.as("PurgeExecutor.purgeNextsDeduplication"), + NEXTS_DEDUPLICATION_STATE_STORE_NAME + ); + + // clean up killed + terminatedWithKilled + .mapValues( + (readOnlyKey, value) -> (ExecutionKilled) null, + Named.as("PurgeExecutor.executionKilledToNull") + ) + .to( + kafkaAdminService.getTopicName(ExecutionKilled.class), + Produced.with(Serdes.String(), JsonSerde.of(ExecutionKilled.class)).withName("PurgeExecutor.toExecutionKilled") + ); + } + + private void toExecutorFlowTriggerTopic(KStream stream) { + stream + .filter( + (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), + Named.as("HandleExecutorFlowTriggerTopic.filterTerminated") + ) + .transformValues( + () -> new DeduplicationTransformer<>( + "FlowTrigger", + TRIGGER_DEDUPLICATION_STATE_STORE_NAME, + (key, value) -> value.getExecution().getId(), + (key, value) -> value.getExecution().getId() + ), + Named.as("HandleExecutorFlowTriggerTopic.deduplication"), + TRIGGER_DEDUPLICATION_STATE_STORE_NAME + ) + .filter((key, value) -> value != null, Named.as("HandleExecutorFlowTriggerTopic.deduplicationNotNull")) + .flatTransform( + () -> new FlowWithTriggerTransformer(flowService), + Named.as("HandleExecutorFlowTriggerTopic.flatMapToExecutorFlowTrigger") + ) + .to( + kafkaAdminService.getTopicName(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class), + Produced + .with(Serdes.String(), JsonSerde.of(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class)) + .withName("PurgeExecutor.toExecutorFlowTrigger") + ); + } + + private void toWorkerTask(KStream stream) { + // deduplication worker task + KStream dedupWorkerTask = stream + .flatMapValues( + (readOnlyKey, value) -> value.getWorkerTasks(), + Named.as("HandleWorkerTask.flatMapToWorkerTask") + ) + .transformValues( + () -> new DeduplicationTransformer<>( + "WorkerTask", + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, + (key, value) -> value.getTaskRun().getExecutionId() + "-" + value.getTaskRun().getId(), + (key, value) -> value.getTaskRun().getState().getCurrent().name() + ), + Named.as("HandleWorkerTask.deduplication"), + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME + ) + .filter((key, value) -> value != null, Named.as("HandleWorkerTask.notNullFilter")); + + // flowable > running to WorkerTaskResult + KStream resultFlowable = dedupWorkerTask + .filter((key, value) -> value.getTask().isFlowable(), Named.as("HandleWorkerTaskFlowable.filterIsFlowable")) + .mapValues( + (key, value) -> new WorkerTaskResult(value.withTaskRun(value.getTaskRun().withState(State.Type.RUNNING))), + Named.as("HandleWorkerTaskFlowable.toRunning") + ) + .map( + (key, value) -> new KeyValue<>(queueService.key(value), value), + Named.as("HandleWorkerTaskFlowable.mapWithKey") + ) + .selectKey( + (key, value) -> queueService.key(value), + Named.as("HandleWorkerTaskFlowable.selectKey") + ); + + KStream workerTaskResultKStream = KafkaStreamSourceService.logIfEnabled( + log, + resultFlowable, + (key, value) -> executorService.log(log, false, value), + "HandleWorkerTaskFlowable" + ); + + workerTaskResultKStream + .to( + kafkaAdminService.getTopicName(WorkerTaskResult.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName("HandleWorkerTaskFlowable.toWorkerTaskResult") + ); + + // not flowable > to WorkerTask + KStream resultNotFlowable = dedupWorkerTask + .filter((key, value) -> !value.getTask().isFlowable(), Named.as("HandleWorkerTaskNotFlowable.filterIsNotFlowable")) + .map((key, value) -> new KeyValue<>(queueService.key(value), value), Named.as("HandleWorkerTaskNotFlowable.mapWithKey")) + .selectKey( + (key, value) -> queueService.key(value), + Named.as("HandleWorkerTaskNotFlowable.selectKey") + ); + + KStream workerTaskKStream = KafkaStreamSourceService.logIfEnabled( + log, + resultNotFlowable, + (key, value) -> executorService.log(log, false, value), + "HandleWorkerTaskNotFlowable" + ); + + workerTaskKStream + .to( + kafkaAdminService.getTopicName(WorkerTask.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTask.class)).withName("HandleWorkerTaskNotFlowable.toWorkerTask") + ); + } + + private KTable workerTaskExecutionStream(StreamsBuilder builder) { + return builder + .table( + kafkaAdminService.getTopicName(WorkerTaskExecution.class), + Consumed.with(Serdes.String(), JsonSerde.of(WorkerTaskExecution.class)).withName("WorkerTaskExecution.from"), + Materialized.>as("workertaskexecution") + .withKeySerde(Serdes.String()) + .withValueSerde(JsonSerde.of(WorkerTaskExecution.class)) + ); + } + + private KStream deduplicateWorkerTaskExecution(KStream stream) { + return stream + .flatMapValues( + (readOnlyKey, value) -> value.getWorkerTaskExecutions(), + Named.as("DeduplicateWorkerTaskExecution.flatMap") + ) + .transformValues( + () -> new DeduplicationTransformer<>( + "DeduplicateWorkerTaskExecution", + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, + (key, value) -> "WorkerTaskExecution-" + value.getTaskRun().getExecutionId() + "-" + value.getTaskRun().getId(), + (key, value) -> value.getTaskRun().getState().getCurrent().name() + ), + Named.as("DeduplicateWorkerTaskExecution.deduplication"), + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME + ) + .filter((key, value) -> value != null, Named.as("DeduplicateWorkerTaskExecution.notNullFilter")); + } + + private void toWorkerTaskExecution(KStream stream) { + stream + .selectKey( + (key, value) -> value.getExecution().getId(), + Named.as("ToWorkerTaskExecution.selectKey") + ) + .to( + kafkaAdminService.getTopicName(WorkerTaskExecution.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskExecution.class)).withName("ToWorkerTaskExecution.toWorkerTaskExecution") + ); + } + + private void workerTaskExecutionToExecution(KStream stream) { + stream + .mapValues( + value -> { + String message = "Create new execution for flow '" + + value.getExecution().getNamespace() + "'." + value.getExecution().getFlowId() + + "' with id '" + value.getExecution().getId() + "' from task '" + value.getTask().getId() + + "' and taskrun '" + value.getTaskRun().getId() + + (value.getTaskRun().getValue() != null ? " (" + value.getTaskRun().getValue() + ")" : "") + "'"; + + log.info(message); + + LogEntry.LogEntryBuilder logEntryBuilder = LogEntry.of(value.getTaskRun()).toBuilder() + .level(Level.INFO) + .message(message) + .timestamp(value.getTaskRun().getState().getStartDate()) + .thread(Thread.currentThread().getName()); + + return logEntryBuilder.build(); + }, + Named.as("WorkerTaskExecutionToExecution.mapToLog") + ) + .selectKey((key, value) -> (String)null, Named.as("WorkerTaskExecutionToExecution.logRemoveKey")) + .to( + kafkaAdminService.getTopicName(LogEntry.class), + Produced.with(Serdes.String(), JsonSerde.of(LogEntry.class)).withName("WorkerTaskExecutionToExecution.toLogEntry") + ); + + KStream executionKStream = stream + .mapValues( + (key, value) -> value.getExecution(), + Named.as("WorkerTaskExecutionToExecution.map") + ) + .selectKey( + (key, value) -> value.getId(), + Named.as("WorkerTaskExecutionToExecution.selectKey") + ); + + executionKStream = KafkaStreamSourceService.logIfEnabled( + log, + executionKStream, + (key, value) -> executorService.log(log, false, value), + "WorkerTaskExecutionToExecution" + ); + + executionKStream + .to( + kafkaAdminService.getTopicName(Execution.class), + Produced.with(Serdes.String(), JsonSerde.of(Execution.class)).withName("WorkerTaskExecutionToExecution.toExecution") + ); + } + + private void handleWorkerTaskExecution(KTable workerTaskExecutionKTable, KStream stream) { + KStream joinKStream = stream + .filter( + (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), + Named.as("HandleWorkerTaskExecution.isTerminated") + ) + .transformValues( + () -> new WorkerTaskExecutionTransformer(runContextFactory, workerTaskExecutionKTable.queryableStoreName()), + Named.as("HandleWorkerTaskExecution.transform"), + workerTaskExecutionKTable.queryableStoreName() + ) + .filter((key, value) -> value != null, Named.as("HandleWorkerTaskExecution.joinNotNullFilter")); + + toWorkerTaskResultSend(joinKStream, "HandleWorkerTaskExecution"); + } + + private void toWorkerTaskResult(KStream stream) { + KStream workerTaskResultKStream = stream + .flatMapValues( + (readOnlyKey, value) -> value.getWorkerTaskResults(), + Named.as("HandleWorkerTaskResult.flapMap") + ); + + toWorkerTaskResultSend(workerTaskResultKStream, "HandleWorkerTaskResult"); + } + + private void toWorkerTaskResultSend(KStream stream, String name) { + KStream workerTaskResultKStream = stream + .transformValues( + () -> new DeduplicationTransformer<>( + name, + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME, + (key, value) -> value.getTaskRun().getExecutionId() + "-" + value.getTaskRun().getId(), + (key, value) -> value.getTaskRun().getState().getCurrent().name() + ), + Named.as(name + ".deduplication"), + WORKERTASK_DEDUPLICATION_STATE_STORE_NAME + ) + .filter((key, value) -> value != null, Named.as(name + ".notNullFilter")) + .selectKey( + (key, value) -> value.getTaskRun().getId(), + Named.as(name + ".selectKey") + ); + + KafkaStreamSourceService.logIfEnabled( + log, + workerTaskResultKStream, + (key, value) -> executorService.log(log, false, value), + name + ) + .to( + kafkaAdminService.getTopicName(WorkerTaskResult.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskResult.class)).withName(name + ".toWorkerTaskResult") + ); + } + + private void purgeWorkerRunning(KStream workerTaskResultKStream) { + workerTaskResultKStream + .filter((key, value) -> value.getTaskRun().getState().isTerninated(), Named.as("PurgeWorkerRunning.filterTerminated")) + .mapValues((readOnlyKey, value) -> (WorkerTaskRunning)null, Named.as("PurgeWorkerRunning.toNull")) + .to( + kafkaAdminService.getTopicName(WorkerTaskRunning.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("PurgeWorkerRunning.toWorkerTaskRunning") + ); + } + + private void toExecution(KStream stream) { + KStream streamFrom = stream + .filter((key, value) -> value.isExecutionUpdated(), Named.as("ToExecution.haveFrom")) + .transformValues( + ExecutorAddHeaderTransformer::new, + Named.as("ToExecution.addHeaders") + ); + + // send execution + KStream executionKStream = streamFrom + .filter((key, value) -> value.getException() == null, Named.as("ToExecutionExecution.notException")); + + toExecutionSend(executionKStream, "ToExecutionExecution"); + + // send exception + KStream> failedStream = streamFrom + .filter((key, value) -> value.getException() != null, Named.as("ToExecutionException.isException")) + .mapValues( + e -> Pair.of(e, e.getExecution().failedExecutionFromExecutor(e.getException())), + Named.as("ToExecutionException.mapToFailedExecutionWithLog") + ); + + failedStream + .flatMapValues(e -> e.getRight().getLogs(), Named.as("ToExecutionException.flatmapLogs")) + .selectKey((key, value) -> (String)null, Named.as("ToExecutionException.removeKey")) + .to( + kafkaAdminService.getTopicName(LogEntry.class), + Produced.with(Serdes.String(), JsonSerde.of(LogEntry.class)).withName("ToExecutionException.toLogEntry") + ); + + KStream executorFailedKStream = failedStream + .mapValues(e -> e.getLeft().withExecution(e.getRight().getExecution(), "failedExecutionFromExecutor"), Named.as("ToExecutionException.mapToExecutor")); + + toExecutionSend(executorFailedKStream, "ToExecutionException"); + } + + private void toExecutionSend(KStream stream, String from) { + stream = KafkaStreamSourceService.logIfEnabled( + log, + stream, + (key, value) -> executorService.log(log, false, value), + from + ); + + stream + .transformValues( + () -> new StateStoreTransformer<>(EXECUTOR_STATE_STORE_NAME, Executor::serialize), + Named.as(from + ".store"), + EXECUTOR_STATE_STORE_NAME + ) + .mapValues((readOnlyKey, value) -> value.getExecution(), Named.as(from + ".mapToExecution")) + .to( + kafkaAdminService.getTopicName(Execution.class), + Produced.with(Serdes.String(), JsonSerde.of(Execution.class)).withName(from + ".toExecution") + ); + } + + @NoArgsConstructor + @Getter + public static class WorkerTaskResultState { + Map results = new HashMap<>(); + } + + @AllArgsConstructor + @Getter + public static class WorkerTaskRunningWithWorkerTaskRunning { + WorkerInstance workerInstance; + WorkerTaskRunning workerTaskRunning; + } + + @NoArgsConstructor + @Getter + public static class WorkerTaskRunningState { + Map workerTaskRunnings = new HashMap<>(); + } +} diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java new file mode 100644 index 00000000000..cad48486180 --- /dev/null +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java @@ -0,0 +1,140 @@ +package io.kestra.runner.kafka.executors; + +import io.kestra.core.runners.ExecutorService; +import io.kestra.core.runners.WorkerInstance; +import io.kestra.core.runners.WorkerTask; +import io.kestra.core.runners.WorkerTaskRunning; +import io.kestra.runner.kafka.KafkaQueueEnabled; +import io.kestra.runner.kafka.serializers.JsonSerde; +import io.kestra.runner.kafka.services.KafkaAdminService; +import io.kestra.runner.kafka.services.KafkaStreamSourceService; +import io.kestra.runner.kafka.services.KafkaStreamsBuilder; +import io.kestra.runner.kafka.streams.GlobalStateProcessor; +import io.kestra.runner.kafka.streams.WorkerInstanceTransformer; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.kstream.*; +import org.apache.kafka.streams.state.KeyValueStore; +import org.apache.kafka.streams.state.Stores; + +@KafkaQueueEnabled +@Singleton +@Slf4j +public class ExecutorWorkerRunning implements KafkaExecutorInterface { + public static final String WORKERINSTANCE_STATE_STORE_NAME = "worker_instance"; + public static final String TOPIC_EXECUTOR_WORKERINSTANCE = "executorworkerinstance"; + public static final String WORKER_RUNNING_STATE_STORE_NAME = "worker_running"; + + @Inject + private KafkaAdminService kafkaAdminService; + + @Inject + private ExecutorService executorService; + + public StreamsBuilder topology() { + StreamsBuilder builder = new KafkaStreamsBuilder(); + + builder.addGlobalStore( + Stores.keyValueStoreBuilder( + Stores.persistentKeyValueStore(WORKERINSTANCE_STATE_STORE_NAME), + Serdes.String(), + JsonSerde.of(WorkerInstance.class) + ), + kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_EXECUTOR_WORKERINSTANCE), + Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("GlobalStore.ExecutorWorkerInstace"), + () -> new GlobalStateProcessor<>(WORKERINSTANCE_STATE_STORE_NAME) + ); + + // only used as state store + builder + .globalTable( + kafkaAdminService.getTopicName(WorkerTaskRunning.class), + Consumed.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("GlobalKTable.WorkerTaskRunning"), + Materialized.>as(WORKER_RUNNING_STATE_STORE_NAME) + .withKeySerde(Serdes.String()) + .withValueSerde(JsonSerde.of(WorkerTaskRunning.class)) + ); + + KStream workerInstanceKStream = builder + .stream( + kafkaAdminService.getTopicName(WorkerInstance.class), + Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("KStream.WorkerInstance") + ); + + workerInstanceKStream + .to( + kafkaAdminService.getTopicName(TOPIC_EXECUTOR_WORKERINSTANCE), + Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorker.toExecutorWorkerInstance") + ); + + KStream stream = workerInstanceKStream + .transformValues( + WorkerInstanceTransformer::new, + Named.as("DetectNewWorker.workerInstanceTransformer") + ) + .flatMapValues((readOnlyKey, value) -> value, Named.as("DetectNewWorker.flapMapList")); + + // we resend the worker task from evicted worker + KStream resultWorkerTask = stream + .flatMapValues( + (readOnlyKey, value) -> value.getWorkerTasksToSend(), + Named.as("DetectNewWorkerTask.flapMapWorkerTaskToSend") + ); + + // and remove from running since already sent + resultWorkerTask + .map((key, value) -> KeyValue.pair(value.getTaskRun().getId(), (WorkerTaskRunning)null), Named.as("DetectNewWorkerTask.workerTaskRunningToNull")) + .to( + kafkaAdminService.getTopicName(WorkerTaskRunning.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("DetectNewWorker.toWorkerTaskRunning") + ); + + KafkaStreamSourceService.logIfEnabled( + log, + resultWorkerTask, + (key, value) -> executorService.log(log, false, value), + "DetectNewWorkerTask" + ) + .to( + kafkaAdminService.getTopicName(WorkerTask.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerTask.class)).withName("DetectNewWorkerTask.toWorkerTask") + ); + + // we resend the WorkerInstance update + KStream updatedStream = KafkaStreamSourceService.logIfEnabled( + log, + stream, + (key, value) -> log.debug( + "Instance updated: {}", + value + ), + "DetectNewWorkerInstance" + ) + .map( + (key, value) -> value.getWorkerInstanceUpdated(), + Named.as("DetectNewWorkerInstance.mapInstance") + ); + + // cleanup executor workerinstance state store + updatedStream + .filter((key, value) -> value != null, Named.as("DetectNewWorkerInstance.filterNotNull")) + .to( + kafkaAdminService.getTopicName(TOPIC_EXECUTOR_WORKERINSTANCE), + Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorkerInstance.toExecutorWorkerInstance") + ); + + updatedStream + .to( + kafkaAdminService.getTopicName(WorkerInstance.class), + Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorkerInstance.toWorkerInstance") + ); + + + return builder; + } +} diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/KafkaExecutorInterface.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/KafkaExecutorInterface.java new file mode 100644 index 00000000000..bc58e88244f --- /dev/null +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/KafkaExecutorInterface.java @@ -0,0 +1,15 @@ +package io.kestra.runner.kafka.executors; + +import io.kestra.runner.kafka.KafkaQueueEnabled; +import io.kestra.runner.kafka.services.KafkaStreamService; +import io.micronaut.context.ApplicationContext; +import org.apache.kafka.streams.StreamsBuilder; + +@KafkaQueueEnabled +public interface KafkaExecutorInterface { + StreamsBuilder topology(); + + default void onCreated(ApplicationContext applicationContext, KafkaStreamService.Stream stream) { + // no op + } +} diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java index 562517f94ec..28b54894212 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java @@ -123,6 +123,10 @@ private Stream(Topology topology, Properties props, MetricRegistry meterRegistry } this.logger = logger != null ? logger : log; + + if (this.logger.isTraceEnabled()) { + this.logger.trace(topology.describe().toString()); + } } public synchronized void start(final KafkaStreams.StateListener listener) throws IllegalStateException, StreamsException { diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/ExecutorNextTransformer.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/ExecutorNextTransformer.java index 81e9bf8f02d..09956b6c7ec 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/ExecutorNextTransformer.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/ExecutorNextTransformer.java @@ -1,7 +1,7 @@ package io.kestra.runner.kafka.streams; import io.kestra.core.models.executions.Execution; -import io.kestra.core.runners.AbstractExecutor; +import io.kestra.core.runners.ExecutorService; import io.kestra.core.runners.Executor; import lombok.Getter; import lombok.NoArgsConstructor; @@ -18,23 +18,22 @@ @Slf4j public class ExecutorNextTransformer implements ValueTransformerWithKey { private final String storeName; - private final AbstractExecutor abstractExecutor; + private final ExecutorService executorService; private KeyValueStore store; - public ExecutorNextTransformer(String storeName, AbstractExecutor abstractExecutor) { + public ExecutorNextTransformer(String storeName, ExecutorService executorService) { this.storeName = storeName; - this.abstractExecutor = abstractExecutor; + this.executorService = executorService; } @Override - @SuppressWarnings("unchecked") public void init(final ProcessorContext context) { - this.store = (KeyValueStore) context.getStateStore(this.storeName); + this.store = context.getStateStore(this.storeName); } @Override public Executor transform(final String key, final Executor value) { - Executor executor = abstractExecutor.process(value); + Executor executor = executorService.process(value); if (executor.getNexts().size() == 0) { return value; @@ -57,7 +56,7 @@ public Executor transform(final String key, final Executor value) { store.addAll(groups.get(false)); this.store.put(key, store); - Execution newExecution = abstractExecutor.onNexts( + Execution newExecution = executorService.onNexts( value.getFlow(), value.getExecution(), executor.getNexts() diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/WorkerInstanceTransformer.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/WorkerInstanceTransformer.java index 4597bf5abc0..c9a4baa0a01 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/WorkerInstanceTransformer.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/WorkerInstanceTransformer.java @@ -2,6 +2,7 @@ import com.google.common.collect.Streams; import io.kestra.core.runners.*; +import io.kestra.runner.kafka.executors.ExecutorWorkerRunning; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.ToString; @@ -13,7 +14,6 @@ import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.state.ValueAndTimestamp; import io.kestra.core.services.WorkerInstanceService; -import io.kestra.runner.kafka.KafkaExecutor; import java.util.Collections; import java.util.List; @@ -29,10 +29,9 @@ public WorkerInstanceTransformer() { } @Override - @SuppressWarnings("unchecked") public void init(final ProcessorContext context) { - this.instanceStore = (KeyValueStore) context.getStateStore(KafkaExecutor.WORKERINSTANCE_STATE_STORE_NAME); - this.runningStore = (KeyValueStore>) context.getStateStore(KafkaExecutor.WORKER_RUNNING_STATE_STORE_NAME); + this.instanceStore = context.getStateStore(ExecutorWorkerRunning.WORKERINSTANCE_STATE_STORE_NAME); + this.runningStore = context.getStateStore(ExecutorWorkerRunning.WORKER_RUNNING_STATE_STORE_NAME); } @Override diff --git a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaExecutorTest.java b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaExecutorTest.java index 2fc319e74d5..b3e542732c7 100644 --- a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaExecutorTest.java +++ b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaExecutorTest.java @@ -17,16 +17,18 @@ import io.kestra.core.utils.TestsUtils; import io.kestra.runner.kafka.configs.ClientConfig; import io.kestra.runner.kafka.configs.StreamDefaultsConfig; +import io.kestra.runner.kafka.executors.ExecutorMain; +import io.kestra.runner.kafka.executors.ExecutorFlowTrigger; +import io.kestra.runner.kafka.executors.ExecutorWorkerRunning; +import io.kestra.runner.kafka.executors.KafkaExecutorInterface; import io.kestra.runner.kafka.serializers.JsonSerde; import io.kestra.runner.kafka.services.KafkaAdminService; import io.micronaut.context.ApplicationContext; import io.micronaut.test.extensions.junit5.annotation.MicronautTest; import lombok.extern.slf4j.Slf4j; -import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.streams.*; -import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.state.KeyValueStore; import org.apache.kafka.streams.test.TestRecord; import org.junit.jupiter.api.AfterEach; @@ -51,7 +53,13 @@ class KafkaExecutorTest { ApplicationContext applicationContext; @Inject - KafkaExecutor stream; + ExecutorMain executorMain; + + @Inject + ExecutorFlowTrigger executorFlowTrigger; + + @Inject + ExecutorWorkerRunning executorWorkerRunning; @Inject ClientConfig clientConfig; @@ -70,12 +78,12 @@ class KafkaExecutorTest { TopologyTestDriver testTopology; - static WorkerInstance workerInstance = workerInstance(); - @BeforeEach void init() throws IOException, URISyntaxException { TestsUtils.loads(repositoryLoader); + } + void startStream(KafkaExecutorInterface kafkaExecutorInterface) { Properties properties = new Properties(); properties.putAll(clientConfig.getProperties()); properties.putAll(streamConfig.getProperties()); @@ -85,18 +93,24 @@ void init() throws IOException, URISyntaxException { // @TODO properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); - Topology topology = stream.topology().build(); + Topology topology = kafkaExecutorInterface.topology().build(); if (log.isTraceEnabled()) { log.trace(topology.describe().toString()); } + if (testTopology != null) { + testTopology.close(); + } + testTopology = new TopologyTestDriver(topology, properties); - applicationContext.registerSingleton(new KafkaTemplateExecutor( - testTopology.getKeyValueStore("template"), - "template" - )); + if (kafkaExecutorInterface.getClass() == ExecutorMain.class) { + applicationContext.registerSingleton(new KafkaTemplateExecutor( + testTopology.getKeyValueStore("template"), + "template" + )); + } } @AfterEach @@ -108,6 +122,8 @@ void tear() { @Test void standard() { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "logs").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -136,6 +152,8 @@ void standard() { @Test void concurrent() { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "logs").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -165,6 +183,8 @@ void concurrent() { @Test void killed() { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "logs").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -204,6 +224,8 @@ void killed() { @Test void killedAlreadyFinished() { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "logs").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -225,6 +247,8 @@ void killedAlreadyFinished() { @ParameterizedTest @ValueSource(booleans = {true, false}) void killedParallel(boolean killed) { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "parallel").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -290,6 +314,8 @@ void killedParallel(boolean killed) { @Test void eachNull() { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "each-null").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -305,6 +331,8 @@ void eachNull() { @Test void parallel() { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "parallel").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -368,6 +396,8 @@ void parallel() { @Test void eachParallelNested() throws InternalException { + startStream(this.executorMain); + Flow flow = flowRepository.findById("io.kestra.tests", "each-parallel-nested").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); @@ -424,6 +454,8 @@ void eachParallelNested() throws InternalException { @Test void flowTrigger() { + startStream(this.executorMain); + Flow triggerFlow = flowRepository.findById("io.kestra.tests", "trigger-flow-listener-no-inputs").orElseThrow(); this.flowInput().pipeInput(triggerFlow.uid(), triggerFlow); @@ -440,16 +472,22 @@ void flowTrigger() { firstExecution = runningAndSuccessSequential(firstFlow, firstExecution, 0); assertThat(firstExecution.getState().getCurrent(), is(State.Type.SUCCESS)); - Execution triggerExecution = executionOutput().readRecord().getValue(); - assertThat(triggerExecution.getState().getCurrent(), is(State.Type.CREATED)); + io.kestra.runner.kafka.streams.ExecutorFlowTrigger executorFlowTrigger = executorFlowTriggerOutput().readRecord().value(); + assertThat(executorFlowTrigger.getFlowHavingTrigger().getId(), is("trigger-flow-listener-no-inputs")); - triggerExecution = runningAndSuccessSequential(triggerFlow, triggerExecution, 0); + startStream(this.executorFlowTrigger); - assertThat(triggerExecution.getState().getCurrent(), is(State.Type.SUCCESS)); + executorFlowTriggerInput().pipeInput(executorFlowTrigger.getFlowHavingTrigger().uid(), executorFlowTrigger); + + Execution triggerExecution = executionOutput().readRecord().getValue(); + assertThat(triggerExecution.getState().getCurrent(), is(State.Type.CREATED)); + assertThat(triggerExecution.getFlowId(), is("trigger-flow-listener-no-inputs")); } @Test void multipleTrigger() { + startStream(this.executorMain); + Flow flowA = flowRepository.findById("io.kestra.tests", "trigger-multiplecondition-flow-a").orElseThrow(); this.flowInput().pipeInput(flowA.uid(), flowA); @@ -469,19 +507,33 @@ void multipleTrigger() { executionB = runningAndSuccessSequential(flowB, executionB, 0); assertThat(executionB.getState().getCurrent(), is(State.Type.SUCCESS)); + // get trigger + io.kestra.runner.kafka.streams.ExecutorFlowTrigger executorFlowTriggerA = executorFlowTriggerOutput().readRecord().value(); + assertThat(executorFlowTriggerA.getExecution().getFlowId(), is("trigger-multiplecondition-flow-a")); + io.kestra.runner.kafka.streams.ExecutorFlowTrigger executorFlowTriggerB = executorFlowTriggerOutput().readRecord().value(); + assertThat(executorFlowTriggerB.getExecution().getFlowId(), is("trigger-multiplecondition-flow-b")); + + startStream(this.executorFlowTrigger); + this.flowInput().pipeInput(flowA.uid(), flowA); + this.flowInput().pipeInput(flowB.uid(), flowB); + this.flowInput().pipeInput(triggerFlow.uid(), triggerFlow); + + executorFlowTriggerInput().pipeInput(executorFlowTriggerA.getFlowHavingTrigger().uid(), executorFlowTriggerA); + executorFlowTriggerInput().pipeInput(executorFlowTriggerB.getFlowHavingTrigger().uid(), executorFlowTriggerB); + // trigger start Execution triggerExecution = executionOutput().readRecord().getValue(); assertThat(triggerExecution.getState().getCurrent(), is(State.Type.CREATED)); - - triggerExecution = runningAndSuccessSequential(triggerFlow, triggerExecution, 0); - assertThat(triggerExecution.getState().getCurrent(), is(State.Type.SUCCESS)); + assertThat(triggerExecution.getFlowId(), is("trigger-multiplecondition-listener")); } @Test void workerRebalanced() { + startStream(this.executorMain); + + // start an execution to generate some id Flow flow = flowRepository.findById("io.kestra.tests", "logs").orElseThrow(); this.flowInput().pipeInput(flow.uid(), flow); - this.workerInstanceInput().pipeInput(workerInstance.getWorkerUuid().toString(), workerInstance); Execution execution = createExecution(flow); @@ -489,7 +541,19 @@ void workerRebalanced() { String taskRunId = execution.getTaskRunList().get(0).getId(); assertThat(execution.getTaskRunList().get(0).getState().getCurrent(), is(State.Type.RUNNING)); - assertThat(this.workerTaskOutput().readRecord().value().getTaskRun().getState().getCurrent(), is(State.Type.CREATED)); + + WorkerTask workerTask = this.workerTaskOutput().readRecord().value(); + assertThat(workerTask.getTaskRun().getState().getCurrent(), is(State.Type.CREATED)); + + // start worker running stream + startStream(this.executorWorkerRunning); + + WorkerInstance startInstance = workerInstance(); + this.workerInstanceInput().pipeInput(startInstance.getWorkerUuid().toString(), startInstance); + + // add a running + WorkerTaskRunning running = WorkerTaskRunning.of(workerTask, startInstance, 0); + this.workerTaskRunningInput().pipeInput(running.getTaskRun().getId(), running); // declare a new worker instance WorkerInstance newInstance = workerInstance(); @@ -508,9 +572,10 @@ void workerRebalanced() { assertThat(workerTaskRunningRecord.key(), is(taskRunId)); } - @Test void invalidStore() throws JsonProcessingException { + startStream(this.executorMain); + KeyValueStore flow = this.testTopology.getKeyValueStore("flow"); flow.put("io.kestra.unittest_invalid_1", JacksonMapper.ofJson().writeValueAsString(Map.of( "id", "invalid", @@ -525,7 +590,6 @@ void invalidStore() throws JsonProcessingException { ) ))); - Flow triggerFlow = flowRepository.findById("io.kestra.tests", "trigger-flow-listener-no-inputs").orElseThrow(); this.flowInput().pipeInput(triggerFlow.uid(), triggerFlow); @@ -538,15 +602,10 @@ void invalidStore() throws JsonProcessingException { firstExecution = runningAndSuccessSequential(firstFlow, firstExecution, 0); assertThat(firstExecution.getState().getCurrent(), is(State.Type.SUCCESS)); - Execution triggerExecution = executionOutput().readRecord().getValue(); - assertThat(triggerExecution.getState().getCurrent(), is(State.Type.CREATED)); - - triggerExecution = runningAndSuccessSequential(triggerFlow, triggerExecution, 0); - - assertThat(triggerExecution.getState().getCurrent(), is(State.Type.SUCCESS)); + io.kestra.runner.kafka.streams.ExecutorFlowTrigger executorFlowTrigger = executorFlowTriggerOutput().readRecord().value(); + assertThat(executorFlowTrigger.getFlowHavingTrigger().getId(), is("trigger-flow-listener-no-inputs")); } - private Execution createExecution(Flow flow) { Execution execution = Execution.builder() .id(IdUtils.create()) @@ -596,6 +655,7 @@ private Execution runningAndSuccessSequential(Flow flow, Execution execution, in // add to running queue + /* TaskRun taskRun = execution.getTaskRunList().get(index); WorkerTaskRunning workerTaskRunning = WorkerTaskRunning.of( WorkerTask.builder() @@ -608,6 +668,8 @@ private Execution runningAndSuccessSequential(Flow flow, Execution execution, in ); this.workerTaskRunningInput().pipeInput(taskRun.getId(), workerTaskRunning); + */ + if (lastState == State.Type.CREATED) { return execution; } @@ -668,6 +730,25 @@ private TestInputTopic executionInput() { ); } + private TestInputTopic executorFlowTriggerInput() { + return this.testTopology + .createInputTopic( + kafkaAdminService.getTopicName(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class), + Serdes.String().serializer(), + JsonSerde.of(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class).serializer() + ); + } + + + private TestOutputTopic executorFlowTriggerOutput() { + return this.testTopology + .createOutputTopic( + kafkaAdminService.getTopicName(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class), + Serdes.String().deserializer(), + JsonSerde.of(io.kestra.runner.kafka.streams.ExecutorFlowTrigger.class).deserializer() + ); + } + private TestInputTopic executionKilledInput() { return this.testTopology .createInputTopic( diff --git a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaRunnerTest.java b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaRunnerTest.java index 7cdca9e4aa9..4ebf459ad7d 100644 --- a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaRunnerTest.java +++ b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaRunnerTest.java @@ -199,7 +199,7 @@ void workerRecordTooLarge() throws TimeoutException { ); assertThat(execution.getState().getCurrent(), is(State.Type.SUCCESS)); - assertThat(logs.stream().filter(logEntry -> logEntry.getExecutionId().equals(execution.getId())).count(), is(63L)); + assertThat(logs.stream().filter(logEntry -> logEntry.getExecutionId().equals(execution.getId())).count(), is(greaterThan(60L))); } @Test diff --git a/runner-kafka/src/test/resources/application.yml b/runner-kafka/src/test/resources/application.yml index 24dc4be5026..f00625cb7fb 100644 --- a/runner-kafka/src/test/resources/application.yml +++ b/runner-kafka/src/test/resources/application.yml @@ -77,7 +77,7 @@ kestra: segment.bytes: "10485760" executor: - name: "${kestra.kafka.defaults.topic-prefix}executor-executor-changelog" + name: "${kestra.kafka.defaults.topic-prefix}executor_main-executor-changelog" cls: io.kestra.core.runners.Executor properties: cleanup.policy: "delete,compact" diff --git a/runner-memory/src/main/java/io/kestra/runner/memory/MemoryExecutor.java b/runner-memory/src/main/java/io/kestra/runner/memory/MemoryExecutor.java index 9643c8fce7f..deaade72f83 100644 --- a/runner-memory/src/main/java/io/kestra/runner/memory/MemoryExecutor.java +++ b/runner-memory/src/main/java/io/kestra/runner/memory/MemoryExecutor.java @@ -21,58 +21,69 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; + +import io.micronaut.context.ApplicationContext; import jakarta.inject.Inject; import jakarta.inject.Named; import jakarta.inject.Singleton; +import lombok.extern.slf4j.Slf4j; @Singleton @MemoryQueueEnabled -public class MemoryExecutor extends AbstractExecutor { - private final FlowRepositoryInterface flowRepository; - private final QueueInterface executionQueue; - private final QueueInterface workerTaskQueue; - private final QueueInterface workerTaskResultQueue; - private final QueueInterface logQueue; - private final FlowService flowService; - private final TaskDefaultService taskDefaultService; - private final Template.TemplateExecutorInterface templateExecutorInterface; +@Slf4j +public class MemoryExecutor implements ExecutorInterface { private static final MemoryMultipleConditionStorage multipleConditionStorage = new MemoryMultipleConditionStorage(); - private static final ConcurrentHashMap EXECUTIONS = new ConcurrentHashMap<>(); private static final ConcurrentHashMap WORKERTASKEXECUTIONS_WATCHER = new ConcurrentHashMap<>(); - private List allFlows; @Inject - public MemoryExecutor( - RunContextFactory runContextFactory, - FlowRepositoryInterface flowRepository, - @Named(QueueFactoryInterface.EXECUTION_NAMED) QueueInterface executionQueue, - @Named(QueueFactoryInterface.WORKERTASK_NAMED) QueueInterface workerTaskQueue, - @Named(QueueFactoryInterface.WORKERTASKRESULT_NAMED) QueueInterface workerTaskResultQueue, - @Named(QueueFactoryInterface.WORKERTASKLOG_NAMED) QueueInterface logQueue, - MetricRegistry metricRegistry, - FlowService flowService, - ConditionService conditionService, - TaskDefaultService taskDefaultService, - Template.TemplateExecutorInterface templateExecutorInterface - ) { - super(runContextFactory, metricRegistry, conditionService); - - this.flowRepository = flowRepository; - this.executionQueue = executionQueue; - this.workerTaskQueue = workerTaskQueue; - this.workerTaskResultQueue = workerTaskResultQueue; - this.logQueue = logQueue; - this.flowService = flowService; - this.conditionService = conditionService; - this.taskDefaultService = taskDefaultService; - this.flowExecutorInterface = new MemoryFlowExecutor(this.flowRepository); - this.templateExecutorInterface = templateExecutorInterface; - } + private ApplicationContext applicationContext; + + @Inject + private FlowRepositoryInterface flowRepository; + + @Inject + @Named(QueueFactoryInterface.EXECUTION_NAMED) + private QueueInterface executionQueue; + + @Inject + @Named(QueueFactoryInterface.WORKERTASK_NAMED) + private QueueInterface workerTaskQueue; + + @Inject + @Named(QueueFactoryInterface.WORKERTASKRESULT_NAMED) + private QueueInterface workerTaskResultQueue; + + @Inject + @Named(QueueFactoryInterface.WORKERTASKLOG_NAMED) + private QueueInterface logQueue; + + @Inject + private FlowService flowService; + + @Inject + private TaskDefaultService taskDefaultService; + + @Inject + private Template.TemplateExecutorInterface templateExecutorInterface; + + @Inject + private ExecutorService executorService; + + @Inject + private ConditionService conditionService; + + @Inject + private RunContextFactory runContextFactory; + + @Inject + private MetricRegistry metricRegistry; @Override public void run() { + applicationContext.registerSingleton(new MemoryFlowExecutor(this.flowRepository)); + this.allFlows = this.flowRepository.findAll(); this.executionQueue.receive(MemoryExecutor.class, this::executionQueue); this.workerTaskResultQueue.receive(MemoryExecutor.class, this::workerTaskResultQueue); @@ -107,14 +118,14 @@ private void handleExecution(ExecutionState state) { Executor executor = new Executor(execution, null).withFlow(flow); if (log.isDebugEnabled()) { - log(log, true, executor); + executorService.log(log, true, executor); } - executor = this.process(executor); + executor = executorService.process(executor); if (executor.getNexts().size() > 0 && deduplicateNexts(execution, executor.getNexts())) { executor.withExecution( - this.onNexts(executor.getFlow(), executor.getExecution(), executor.getNexts()), + executorService.onNexts(executor.getFlow(), executor.getExecution(), executor.getNexts()), "onNexts" ); } @@ -224,7 +235,7 @@ private ExecutionState saveExecution(Execution execution) { private void toExecution(Executor executor) { if (log.isDebugEnabled()) { - log(log, false, executor); + executorService.log(log, false, executor); } // emit for other consumer than executor @@ -242,7 +253,7 @@ private void toExecution(Executor executor) { private void workerTaskResultQueue(WorkerTaskResult message) { synchronized (this) { if (log.isDebugEnabled()) { - log(log, true, message); + executorService.log(log, true, message); } metricRegistry From 9fc78595c8874fadb501f9242f65b0a1433db70e Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 28 Jan 2022 13:12:39 +0100 Subject: [PATCH 10/44] feat(repository-elasticsearch): add metrics from elasticsearch client --- .../kestra/core/metrics/MetricRegistry.java | 10 +++++ .../services/ElasticsearchClientFactory.java | 38 +++++++++++++++++-- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/io/kestra/core/metrics/MetricRegistry.java b/core/src/main/java/io/kestra/core/metrics/MetricRegistry.java index 732cf650df6..6aea377c05a 100644 --- a/core/src/main/java/io/kestra/core/metrics/MetricRegistry.java +++ b/core/src/main/java/io/kestra/core/metrics/MetricRegistry.java @@ -215,6 +215,16 @@ public String[] tags(SchedulerExecutionWithTrigger schedulerExecutionWithTrigger ); } + + /** + * Return globals tags + * + * @return tags to applied to metrics + */ + public Tags tags(String... tags) { + return Tags.of(tags); + } + /** * Attach a {@link MeterBinder} to current registry * diff --git a/repository-elasticsearch/src/main/java/io/kestra/repository/elasticsearch/services/ElasticsearchClientFactory.java b/repository-elasticsearch/src/main/java/io/kestra/repository/elasticsearch/services/ElasticsearchClientFactory.java index fa9f9cf1b85..94ae4d9e385 100644 --- a/repository-elasticsearch/src/main/java/io/kestra/repository/elasticsearch/services/ElasticsearchClientFactory.java +++ b/repository-elasticsearch/src/main/java/io/kestra/repository/elasticsearch/services/ElasticsearchClientFactory.java @@ -1,20 +1,30 @@ package io.kestra.repository.elasticsearch.services; +import io.kestra.core.metrics.MetricRegistry; +import io.kestra.repository.elasticsearch.configs.ElasticsearchConfig; +import io.micrometer.core.instrument.MeterRegistry; +import io.micrometer.core.instrument.binder.httpcomponents.MicrometerHttpClientInterceptor; import io.micronaut.context.annotation.Bean; import io.micronaut.context.annotation.Factory; import io.micronaut.context.annotation.Requires; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientBuilder; import org.opensearch.client.RestHighLevelClient; -import io.kestra.repository.elasticsearch.configs.ElasticsearchConfig; -import jakarta.inject.Inject; -import jakarta.inject.Singleton; +import java.net.URI; @Requires(beans = ElasticsearchConfig.class) @Factory public class ElasticsearchClientFactory { + @Inject + MetricRegistry metricRegistry; + + @Inject + MeterRegistry meterRegistry; + /** * Create the {@link RestHighLevelClient} bean for the given configuration. * @@ -52,6 +62,28 @@ protected RestClientBuilder restClientBuilder(ElasticsearchConfig config) { }) .setHttpClientConfigCallback(httpClientBuilder -> { httpClientBuilder = config.httpAsyncClientBuilder(); + + MicrometerHttpClientInterceptor interceptor = new MicrometerHttpClientInterceptor( + meterRegistry, + request -> { + String path = URI.create(request.getRequestLine().getUri()).getPath(); + + int i = path.indexOf("/_doc/"); + + if (i < 0) { + return path; + } else { + return path.substring(0, i + 5) + "/{id}"; + } + }, + metricRegistry.tags("type", "elasticsearch"), + true + ); + + httpClientBuilder + .addInterceptorFirst(interceptor.getRequestInterceptor()) + .addInterceptorLast(interceptor.getResponseInterceptor()); + return httpClientBuilder; }); From 2c11418d43cfb8ce73370d22d915c720c9ae9687 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Mon, 31 Jan 2022 18:45:58 +0100 Subject: [PATCH 11/44] chore(docker): add kafka plugins --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5f417fc6902..94511b2e187 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -195,7 +195,7 @@ jobs: push: true tags: ${{ format('kestra/kestra:{0}-full', steps.vars.outputs.tag) }} build-args: | - KESTRA_PLUGINS=${{ steps.vars.outputs.plugins }} io.kestra.storage:storage-gcs:LATEST io.kestra.storage:storage-minio:LATEST io.kestra.plugin:plugin-aws:LATEST io.kestra.plugin:plugin-compress:LATEST io.kestra.plugin:plugin-crypto:LATEST io.kestra.plugin:plugin-elasticsearch:LATEST io.kestra.plugin:plugin-fs:LATEST io.kestra.plugin:plugin-gcp:LATEST io.kestra.plugin:plugin-googleworkspace:LATEST io.kestra.plugin:plugin-jdbc-clickhouse:LATEST io.kestra.plugin:plugin-jdbc-mysql:LATEST io.kestra.plugin:plugin-jdbc-oracle:LATEST io.kestra.plugin:plugin-jdbc-postgres:LATEST io.kestra.plugin:plugin-jdbc-redshift:LATEST io.kestra.plugin:plugin-jdbc-vertica:LATEST io.kestra.plugin:plugin-jdbc-vectorwise:LATEST io.kestra.plugin:plugin-kubernetes:LATEST io.kestra.plugin:plugin-mongodb:LATEST io.kestra.plugin:plugin-notifications:LATEST io.kestra.plugin:plugin-script-groovy:LATEST io.kestra.plugin:plugin-script-jython:LATEST io.kestra.plugin:plugin-script-nashorn:LATEST io.kestra.plugin:plugin-serdes:LATEST io.kestra.plugin:plugin-singer:LATEST + KESTRA_PLUGINS=${{ steps.vars.outputs.plugins }} io.kestra.storage:storage-gcs:LATEST io.kestra.storage:storage-minio:LATEST io.kestra.plugin:plugin-aws:LATEST io.kestra.plugin:plugin-compress:LATEST io.kestra.plugin:plugin-crypto:LATEST io.kestra.plugin:plugin-elasticsearch:LATEST io.kestra.plugin:plugin-fs:LATEST io.kestra.plugin:plugin-gcp:LATEST io.kestra.plugin:plugin-googleworkspace:LATEST io.kestra.plugin:plugin-jdbc-clickhouse:LATEST io.kestra.plugin:plugin-jdbc-mysql:LATEST io.kestra.plugin:plugin-jdbc-oracle:LATEST io.kestra.plugin:plugin-jdbc-postgres:LATEST io.kestra.plugin:plugin-jdbc-redshift:LATEST io.kestra.plugin:plugin-jdbc-vertica:LATEST io.kestra.plugin:plugin-jdbc-vectorwise:LATEST io.kestra.plugin:plugin-kafka:LATEST io.kestra.plugin:plugin-kubernetes:LATEST io.kestra.plugin:plugin-mongodb:LATEST io.kestra.plugin:plugin-notifications:LATEST io.kestra.plugin:plugin-script-groovy:LATEST io.kestra.plugin:plugin-script-jython:LATEST io.kestra.plugin:plugin-script-nashorn:LATEST io.kestra.plugin:plugin-serdes:LATEST io.kestra.plugin:plugin-singer:LATEST APT_PACKAGES=python3-pip python3-wheel python3-setuptools python3-virtualenv nodejs # Slack From e660ed843e7455eb3854b35aa95c15a7f4043ef6 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Mon, 31 Jan 2022 18:46:29 +0100 Subject: [PATCH 12/44] feat(kafka-runner): create state dir early to avoid concurrent access --- .../java/io/kestra/runner/kafka/KafkaExecutor.java | 1 - .../runner/kafka/services/KafkaStreamService.java | 12 +++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java index 64902a86f5a..e4549a81a73 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java @@ -66,7 +66,6 @@ public void run() { this.streams = this.kafkaExecutors .stream() - .parallel() .map(executor -> { Properties properties = new Properties(); // build diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java index 28b54894212..359e9e6b5e6 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java @@ -22,6 +22,7 @@ import io.kestra.runner.kafka.configs.StreamDefaultsConfig; import org.slf4j.Logger; +import java.io.File; import java.time.Duration; import java.util.List; import java.util.Properties; @@ -97,8 +98,17 @@ public KafkaStreamService.Stream of(Class clientId, Class groupId, Topolog ); } + if (properties.containsKey(StreamsConfig.STATE_DIR_CONFIG)) { + File stateDir = new File((String) properties.get(StreamsConfig.STATE_DIR_CONFIG)); + + if (!stateDir.exists()) { + //noinspection ResultOfMethodCallIgnored + stateDir.mkdirs(); + } + } + Stream stream = new Stream(topology, properties, metricsEnabled ? metricRegistry : null, logger); - eventPublisher.publishEvent(new KafkaStreamEndpoint.Event(clientId.getName(), stream)); + eventPublisher.publishEventAsync(new KafkaStreamEndpoint.Event(clientId.getName(), stream)); return stream; } From c2872084b12d62b62655fa6fb78fbd714b7e8582 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Tue, 1 Feb 2022 20:52:07 +0100 Subject: [PATCH 13/44] fix(docker): fix compose dependencies version --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index dea74fc4aba..b18739e6016 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -14,7 +14,7 @@ volumes: services: zookeeper: - image: confluentinc/cp-zookeeper + image: confluentinc/cp-zookeeper:7.0.1 volumes: - zookeeper-data:/var/lib/zookeeper/data - zookeeper-log:/var/lib/zookeeper/log @@ -24,7 +24,7 @@ services: ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: WARN kafka: - image: confluentinc/cp-kafka + image: confluentinc/cp-kafka:7.0.1 volumes: - kafka-data:/var/lib/kafka environment: From cc0bba2a547445a0bb106a4a0e77f5fa101e23a0 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Wed, 2 Feb 2022 18:04:36 +0100 Subject: [PATCH 14/44] fix(tasks): Python buffered log by default so we force unbuffer to have all logs relate to #456 --- .../io/kestra/core/tasks/scripts/AbstractBash.java | 6 +++++- .../io/kestra/core/tasks/scripts/AbstractPython.java | 11 +++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/io/kestra/core/tasks/scripts/AbstractBash.java b/core/src/main/java/io/kestra/core/tasks/scripts/AbstractBash.java index 5c45c845682..6c25035f405 100644 --- a/core/src/main/java/io/kestra/core/tasks/scripts/AbstractBash.java +++ b/core/src/main/java/io/kestra/core/tasks/scripts/AbstractBash.java @@ -143,6 +143,10 @@ protected Map finalInputFiles() throws IOException { return this.inputFiles != null ? new HashMap<>(this.inputFiles) : new HashMap<>(); } + protected Map finalEnv() throws IOException { + return this.env != null ? new HashMap<>(this.env) : new HashMap<>(); + } + protected List finalCommandsWithInterpreter(String commandAsString) throws IOException { return BashService.finalCommandsWithInterpreter( this.interpreter, @@ -198,7 +202,7 @@ protected ScriptOutput run(RunContext runContext, Supplier supplier) thr logger, workingDirectory, finalCommandsWithInterpreter(commandAsString), - this.env, + this.finalEnv(), (inputStream, isStdErr) -> { AbstractLogThread thread = new LogThread(logger, inputStream, isStdErr, runContext); thread.setName("bash-log-" + (isStdErr ? "-err" : "-out")); diff --git a/core/src/main/java/io/kestra/core/tasks/scripts/AbstractPython.java b/core/src/main/java/io/kestra/core/tasks/scripts/AbstractPython.java index a9624150258..ad3ec343dc8 100644 --- a/core/src/main/java/io/kestra/core/tasks/scripts/AbstractPython.java +++ b/core/src/main/java/io/kestra/core/tasks/scripts/AbstractPython.java @@ -15,6 +15,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import javax.validation.constraints.NotEmpty; import javax.validation.constraints.NotNull; @@ -117,4 +118,14 @@ protected String virtualEnvCommand(RunContext runContext, List requireme return String.join("\n", renderer); } + + @Override + protected Map finalEnv() throws IOException { + Map env = super.finalEnv(); + + // python buffer log by default, so we force unbuffer to have the whole log + env.put("PYTHONUNBUFFERED", "true"); + + return env; + } } From 7f36cb9924da3ce9ebdb038a7308b10bef923be1 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Thu, 3 Feb 2022 08:16:05 +0100 Subject: [PATCH 15/44] feat(kafka-runner): add consumer on GlobalStateProcessor to be notified about every change --- .../kestra/runner/kafka/KafkaScheduler.java | 5 ++-- .../executors/ExecutorWorkerRunning.java | 2 +- .../kafka/streams/GlobalStateProcessor.java | 28 +++++++++++++++++-- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java index da66e1737ca..8d6e1344172 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java @@ -31,6 +31,7 @@ import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.*; +import org.apache.kafka.streams.state.KeyValueBytesStoreSupplier; import org.apache.kafka.streams.state.QueryableStoreTypes; import org.apache.kafka.streams.state.Stores; @@ -138,7 +139,7 @@ public StreamsBuilder topology() { JsonSerde.of(Executor.class) ), kafkaAdminService.getTopicName(Executor.class), - Consumed.with(Serdes.String(), JsonSerde.of(Executor.class)), + Consumed.with(Serdes.String(), JsonSerde.of(Executor.class)).withName("GlobalStore.Executor"), () -> new GlobalStateProcessor<>(STATESTORE_EXECUTOR) ); @@ -150,7 +151,7 @@ public StreamsBuilder topology() { JsonSerde.of(Trigger.class) ), kafkaAdminService.getTopicName(Trigger.class), - Consumed.with(Serdes.String(), JsonSerde.of(Trigger.class)), + Consumed.with(Serdes.String(), JsonSerde.of(Trigger.class)).withName("GlobalStore.Trigger"), () -> new GlobalStateLockProcessor<>(STATESTORE_TRIGGER, triggerLock) ); diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java index cad48486180..f630dcd261e 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorWorkerRunning.java @@ -46,7 +46,7 @@ public StreamsBuilder topology() { JsonSerde.of(WorkerInstance.class) ), kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_EXECUTOR_WORKERINSTANCE), - Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("GlobalStore.ExecutorWorkerInstace"), + Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("GlobalStore.ExecutorWorkerInstance"), () -> new GlobalStateProcessor<>(WORKERINSTANCE_STATE_STORE_NAME) ); diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/GlobalStateProcessor.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/GlobalStateProcessor.java index e0b05cfe531..097547c69d3 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/GlobalStateProcessor.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/streams/GlobalStateProcessor.java @@ -1,23 +1,36 @@ package io.kestra.runner.kafka.streams; +import com.google.common.collect.Streams; import lombok.extern.slf4j.Slf4j; import org.apache.kafka.streams.processor.Processor; import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.KeyValueStore; +import java.util.List; +import java.util.function.Consumer; +import java.util.stream.Collectors; + @Slf4j public class GlobalStateProcessor implements Processor { private final String storeName; + private final Consumer> consumer; private KeyValueStore store; public GlobalStateProcessor(String storeName) { + this(storeName, null); + } + + public GlobalStateProcessor(String storeName, Consumer> consumer) { this.storeName = storeName; + this.consumer = consumer; } - @SuppressWarnings("unchecked") @Override public void init(ProcessorContext context) { - this.store = (KeyValueStore) context.getStateStore(this.storeName); + this.store = context.getStateStore(this.storeName); + + this.send(); } @Override @@ -27,6 +40,17 @@ public void process(String key, T value) { } else { this.store.put(key, value); } + + this.send(); + } + + @SuppressWarnings("UnstableApiUsage") + private void send() { + if (consumer != null) { + try (KeyValueIterator all = this.store.all()) { + consumer.accept(Streams.stream(all).map(e -> e.value).collect(Collectors.toList())); + } + } } @Override From cba08932c9ba01e33e86fc0604eb244d6c462b70 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Thu, 3 Feb 2022 11:24:54 +0100 Subject: [PATCH 16/44] feat(core): add a keys peeble filters --- .../kestra/core/runners/pebble/Extension.java | 1 + .../runners/pebble/filters/KeysFilter.java | 54 ++++++++++++++++++ .../pebble/filters/KeysFilterTest.java | 56 +++++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 core/src/main/java/io/kestra/core/runners/pebble/filters/KeysFilter.java create mode 100644 core/src/test/java/io/kestra/core/runners/pebble/filters/KeysFilterTest.java diff --git a/core/src/main/java/io/kestra/core/runners/pebble/Extension.java b/core/src/main/java/io/kestra/core/runners/pebble/Extension.java index 5357f3ff5b1..bbf33461665 100644 --- a/core/src/main/java/io/kestra/core/runners/pebble/Extension.java +++ b/core/src/main/java/io/kestra/core/runners/pebble/Extension.java @@ -54,6 +54,7 @@ public Map getFilters() { filters.put("timestampNano", new TimestampNanoFilter()); filters.put("jq", new JqFilter()); filters.put("json", new JsonFilter()); + filters.put("keys", new KeysFilter()); filters.put("slugify", new SlugifyFilter()); filters.put("substringBefore", new SubstringBeforeFilter()); filters.put("substringBeforeLast", new SubstringBeforeLastFilter()); diff --git a/core/src/main/java/io/kestra/core/runners/pebble/filters/KeysFilter.java b/core/src/main/java/io/kestra/core/runners/pebble/filters/KeysFilter.java new file mode 100644 index 00000000000..9eed613c121 --- /dev/null +++ b/core/src/main/java/io/kestra/core/runners/pebble/filters/KeysFilter.java @@ -0,0 +1,54 @@ +package io.kestra.core.runners.pebble.filters; + +import com.mitchellbosecke.pebble.error.PebbleException; +import com.mitchellbosecke.pebble.extension.Filter; +import com.mitchellbosecke.pebble.template.EvaluationContext; +import com.mitchellbosecke.pebble.template.PebbleTemplate; +import org.apache.commons.lang3.StringUtils; + +import java.lang.reflect.Array; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class KeysFilter implements Filter { + @Override + public List getArgumentNames() { + return null; + } + + @SuppressWarnings("rawtypes") + @Override + public Object apply(Object input, Map args, PebbleTemplate self, EvaluationContext context, int lineNumber) throws PebbleException { + if (input == null) { + return null; + } + + if (input instanceof Map) { + Map inputMap = (Map) input; + return inputMap.keySet(); + } + + if (input instanceof List) { + List inputList = (List) input; + return IntStream + .rangeClosed(0, inputList.size() - 1) + .boxed() + .collect(Collectors.toList()); + } + + if (input.getClass().isArray()) { + int length = Array.getLength(input); + return IntStream + .rangeClosed(0, length - 1) + .boxed() + .collect(Collectors.toList()); + } + + throw new PebbleException(null, "'keys' filter can only be applied to List, Map, Array. Actual type was: " + input.getClass().getName(), lineNumber, self.getName()); + + } +} diff --git a/core/src/test/java/io/kestra/core/runners/pebble/filters/KeysFilterTest.java b/core/src/test/java/io/kestra/core/runners/pebble/filters/KeysFilterTest.java new file mode 100644 index 00000000000..363dfc2e60e --- /dev/null +++ b/core/src/test/java/io/kestra/core/runners/pebble/filters/KeysFilterTest.java @@ -0,0 +1,56 @@ +package io.kestra.core.runners.pebble.filters; + +import com.google.common.collect.ImmutableMap; +import io.kestra.core.exceptions.IllegalVariableEvaluationException; +import io.kestra.core.runners.VariableRenderer; +import io.micronaut.test.extensions.junit5.annotation.MicronautTest; +import jakarta.inject.Inject; +import org.junit.jupiter.api.Test; + +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.Map; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.*; + +@MicronautTest +class KeysFilterTest { + @Inject + VariableRenderer variableRenderer; + + @Test + void map() throws IllegalVariableEvaluationException { + ZonedDateTime date = ZonedDateTime.parse("2013-09-08T16:19:00+02").withZoneSameLocal(ZoneId.systemDefault()); + + ImmutableMap vars = ImmutableMap.of( + "vars", ImmutableMap.of("second", Map.of( + "string", "string", + "int", 1, + "float", 1.123F, + "list", Arrays.asList( + "string", + 1, + 1.123F + ), + "bool", true, + "date", date, + "map", Map.of( + "string", "string", + "int", 1, + "float", 1.123F + ) + )) + ); + + String render = variableRenderer.render("{{ vars.second.list | keys }}", vars); + assertThat(render, is("[0,1,2]")); + + render = variableRenderer.render("{{ vars.second | keys }}", vars); + assertThat(render, containsString("\"string\"")); + assertThat(render, containsString("\"map\"")); + assertThat(render.split(",").length, is(7)); + } +} From 30ff6acb6e72eb0441ade39cf6dec97c58608980 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 4 Feb 2022 14:56:51 +0100 Subject: [PATCH 17/44] feat(kafka-runner): scheduler could recreate deleted trigger during race condition (#457) --- .../sys/FlowListenersRestoreCommand.java | 12 +- .../sys/FlowListenersRestoreCommandTest.java | 4 +- .../core/schedulers/AbstractScheduler.java | 22 ++- .../io/kestra/runner/kafka/KafkaExecutor.java | 2 + .../runner/kafka/KafkaFlowListeners.java | 98 +----------- .../kestra/runner/kafka/KafkaScheduler.java | 44 ------ .../kafka/executors/ExecutorFlowLast.java | 148 ++++++++++++++++++ .../executors/ExecutorTriggerCleaner.java | 72 +++++++++ .../runner/kafka/KafkaFlowListenersTest.java | 13 +- .../runner/kafka/KafkaSchedulerTest.java | 5 + 10 files changed, 267 insertions(+), 153 deletions(-) create mode 100644 runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowLast.java create mode 100644 runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorTriggerCleaner.java diff --git a/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java b/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java index ae05ccf7266..870a4f45cea 100644 --- a/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java +++ b/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java @@ -24,7 +24,7 @@ public class FlowListenersRestoreCommand extends AbstractCommand { private ApplicationContext applicationContext; @CommandLine.Option(names = {"--timeout"}, description = "Timeout before quit, considering we complete the restore") - private Duration timeout = Duration.ofSeconds(15); + private Duration timeout = Duration.ofSeconds(60); public FlowListenersRestoreCommand() { super(false); @@ -39,12 +39,16 @@ public Integer call() throws Exception { AtomicReference lastTime = new AtomicReference<>(ZonedDateTime.now()); flowListeners.listen(flows -> { - stdOut("Received {0} flows", flows.size()); + long count = flows.stream().filter(flow -> !flow.isDeleted()).count(); - lastTime.set(ZonedDateTime.now()); + stdOut("Received {0} active flows", count); + + if (count > 0) { + lastTime.set(ZonedDateTime.now()); + } }); - // we can't know when it's over to wait no more flow received + // we can't know when it's over, wait no more flow received Await.until(() -> lastTime.get().compareTo(ZonedDateTime.now().minus(this.timeout)) < 0); return 0; diff --git a/cli/src/test/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommandTest.java b/cli/src/test/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommandTest.java index 5806ab532c1..660251766b3 100644 --- a/cli/src/test/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommandTest.java +++ b/cli/src/test/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommandTest.java @@ -46,8 +46,8 @@ void run() throws InterruptedException { thread.join(); - assertThat(out.toString(), containsString("Received 1 flows")); - assertThat(out.toString(), containsString("Received 5 flows")); + assertThat(out.toString(), containsString("Received 1 active flows")); + assertThat(out.toString(), containsString("Received 5 active flows")); } } } \ No newline at end of file diff --git a/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java b/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java index e3d833cee0a..7debd75305a 100644 --- a/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java +++ b/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java @@ -61,6 +61,7 @@ public abstract class AbstractScheduler implements Runnable, AutoCloseable { private final Map lastEvaluate = new ConcurrentHashMap<>(); private final Map evaluateRunning = new ConcurrentHashMap<>(); private final Map evaluateRunningCount = new ConcurrentHashMap<>(); + private final Map triggerStateSaved = new ConcurrentHashMap<>(); @Getter private List schedulable = new ArrayList<>(); @@ -392,10 +393,6 @@ private Trigger getLastTrigger(FlowWithPollingTrigger f, ZonedDateTime now) { return triggerState .findLast(f.getTriggerContext()) .orElseGet(() -> { - // we don't find, so never started execution, create a trigger context with previous date in the past. - // this allows some edge case when the evaluation loop of schedulers will change second - // between start and end - ZonedDateTime nextDate = f.getPollingTrigger().nextEvaluationDate(Optional.empty()); Trigger build = Trigger.builder() @@ -407,7 +404,22 @@ private Trigger getLastTrigger(FlowWithPollingTrigger f, ZonedDateTime now) { .updatedDate(Instant.now()) .build(); - triggerState.save(build); + // we don't find, so never started execution, create a trigger context with previous date in the past. + // this allows some edge case when the evaluation loop of schedulers will change second + // between start and end + // but since we could have some changed on the flow in meantime, we wait 1 min before saving them. + if (triggerStateSaved.containsKey(build.uid())) { + Trigger cachedTrigger = triggerStateSaved.get(build.uid()); + + if (cachedTrigger.getUpdatedDate() != null && Instant.now().isAfter(cachedTrigger.getUpdatedDate().plusSeconds(60))) { + triggerState.save(build); + triggerStateSaved.remove(build.uid()); + } + + return cachedTrigger; + } else { + triggerStateSaved.put(build.uid(), build); + } return build; }); diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java index e4549a81a73..f368d7b8128 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaExecutor.java @@ -53,6 +53,7 @@ public void run() { kafkaAdminService.createIfNotExist(WorkerTaskResult.class); kafkaAdminService.createIfNotExist(Execution.class); kafkaAdminService.createIfNotExist(Flow.class); + kafkaAdminService.createIfNotExist(KafkaStreamSourceService.TOPIC_FLOWLAST); kafkaAdminService.createIfNotExist(Executor.class); kafkaAdminService.createIfNotExist(KafkaStreamSourceService.TOPIC_EXECUTOR_WORKERINSTANCE); kafkaAdminService.createIfNotExist(ExecutionKilled.class); @@ -66,6 +67,7 @@ public void run() { this.streams = this.kafkaExecutors .stream() + .parallel() .map(executor -> { Properties properties = new Properties(); // build diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java index b9edc5cfb41..901e0c0af91 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java @@ -33,8 +33,6 @@ @KafkaQueueEnabled public class KafkaFlowListeners implements FlowListenersInterface { private final KafkaAdminService kafkaAdminService; - private final FlowService flowService; - private SafeKeyValueStore store; private final List>> consumers = new ArrayList<>(); private final KafkaStreamService.Stream stream; @@ -43,18 +41,12 @@ public class KafkaFlowListeners implements FlowListenersInterface { @Inject public KafkaFlowListeners( KafkaAdminService kafkaAdminService, - KafkaStreamService kafkaStreamService, - FlowService flowService + KafkaStreamService kafkaStreamService ) { this.kafkaAdminService = kafkaAdminService; - this.flowService = flowService; - kafkaAdminService.createIfNotExist(Flow.class); kafkaAdminService.createIfNotExist(KafkaStreamSourceService.TOPIC_FLOWLAST); - KafkaStreamService.Stream buillLastVersion = kafkaStreamService.of(FlowListenerBuild.class, FlowListenerBuild.class, new FlowListenerBuild().topology(), log); - buillLastVersion.start(); - stream = kafkaStreamService.of(FlowListener.class, FlowListener.class, new FlowListener().topology(), log); stream.start((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING) { @@ -79,94 +71,6 @@ public KafkaFlowListeners( }); } - public class FlowListenerBuild { - public Topology topology() { - StreamsBuilder builder = new KafkaStreamsBuilder(); - - KStream stream = builder - .stream( - kafkaAdminService.getTopicName(Flow.class), - Consumed.with(Serdes.String(), JsonSerde.of(Flow.class, false)) - ); - - KStream result = KafkaStreamSourceService.logIfEnabled( - log, - stream, - (key, value) -> log.trace( - "Flow in '{}.{}' with revision {}", - value.getNamespace(), - value.getId(), - value.getRevision() - ), - "flow-in" - ) - .filter((key, value) -> value != null, Named.as("notNull")) - .selectKey((key, value) -> value.uidWithoutRevision(), Named.as("rekey")) - .groupBy( - (String key, Flow value) -> value.uidWithoutRevision(), - Grouped.as("grouped") - .withKeySerde(Serdes.String()) - .withValueSerde(JsonSerde.of(Flow.class, false)) - ) - .aggregate( - AllFlowRevision::new, - (key, value, aggregate) -> { - aggregate.revisions.add(value); - - return aggregate; - }, - Materialized.>as("list") - .withKeySerde(Serdes.String()) - .withValueSerde(JsonSerde.of(AllFlowRevision.class, false)) - ) - .mapValues( - (readOnlyKey, value) -> { - List flows = new ArrayList<>(flowService - .keepLastVersion(value.revisions)); - - if (flows.size() > 1) { - throw new IllegalArgumentException("Too many flows (" + flows.size() + ")"); - } - - return flows.size() == 0 ? null : flows.get(0); - }, - Named.as("last") - ) - .toStream(); - - KafkaStreamSourceService.logIfEnabled( - log, - result, - (key, value) -> log.trace( - "Flow out '{}.{}' with revision {}", - value.getNamespace(), - value.getId(), - value.getRevision() - ), - "Flow-out" - ) - .to( - kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_FLOWLAST), - Produced.with(Serdes.String(), JsonSerde.of(Flow.class)) - ); - - Topology topology = builder.build(); - - if (log.isTraceEnabled()) { - log.trace(topology.describe().toString()); - } - - return topology; - } - - } - - @NoArgsConstructor - @Getter - public static class AllFlowRevision { - private final List revisions = new ArrayList<>(); - } - public class FlowListener { public Topology topology() { StreamsBuilder builder = new KafkaStreamsBuilder(); diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java index 8d6e1344172..8478729bfb0 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaScheduler.java @@ -51,8 +51,6 @@ public class KafkaScheduler extends AbstractScheduler { private final KafkaAdminService kafkaAdminService; private final KafkaStreamService kafkaStreamService; private final QueueInterface triggerQueue; - private final ConditionService conditionService; - private final KafkaStreamSourceService kafkaStreamSourceService; private final QueueService queueService; private final KafkaProducer kafkaProducer; private final TopicsConfig topicsConfigTrigger; @@ -76,8 +74,6 @@ public KafkaScheduler( this.triggerQueue = applicationContext.getBean(QueueInterface.class, Qualifiers.byName(QueueFactoryInterface.TRIGGER_NAMED)); this.kafkaAdminService = applicationContext.getBean(KafkaAdminService.class); this.kafkaStreamService = applicationContext.getBean(KafkaStreamService.class); - this.conditionService = applicationContext.getBean(ConditionService.class); - this.kafkaStreamSourceService = applicationContext.getBean(KafkaStreamSourceService.class); this.queueService = applicationContext.getBean(QueueService.class); this.kafkaProducer = applicationContext.getBean(KafkaProducerService.class).of( KafkaScheduler.class, @@ -90,43 +86,6 @@ public KafkaScheduler( this.kafkaProducer.initTransactions(); } - public class SchedulerCleaner { - private StreamsBuilder topology() { - StreamsBuilder builder = new KafkaStreamsBuilder(); - - KStream executorKStream = kafkaStreamSourceService.executorKStream(builder); - - kafkaStreamSourceService.flowGlobalKTable(builder); - kafkaStreamSourceService.templateGlobalKTable(builder); - KStream executionWithFlowKStream = kafkaStreamSourceService.executorWithFlow(executorKStream, false); - - GlobalKTable triggerGlobalKTable = kafkaStreamSourceService.triggerGlobalKTable(builder); - - executionWithFlowKStream - .filter( - (key, value) -> value.getExecution().getTrigger() != null, - Named.as("cleanTrigger-hasTrigger-filter") - ) - .filter( - (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), - Named.as("cleanTrigger-terminated-filter") - ) - .join( - triggerGlobalKTable, - (key, executionWithFlow) -> Trigger.uid(executionWithFlow.getExecution()), - (execution, trigger) -> trigger.resetExecution(), - Named.as("cleanTrigger-join") - ) - .selectKey((key, value) -> queueService.key(value)) - .to( - kafkaAdminService.getTopicName(Trigger.class), - Produced.with(Serdes.String(), JsonSerde.of(Trigger.class)) - ); - - return builder; - } - } - public class SchedulerState { public StreamsBuilder topology() { StreamsBuilder builder = new KafkaStreamsBuilder(); @@ -221,9 +180,6 @@ public void initStream() { this.executionState = new KafkaSchedulerExecutionState( stateStream.store(StoreQueryParameters.fromNameAndType(STATESTORE_EXECUTOR, QueryableStoreTypes.keyValueStore())) ); - - this.cleanTriggerStream = this.init(SchedulerCleaner.class, new SchedulerCleaner().topology()); - this.cleanTriggerStream.start(); } @Override diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowLast.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowLast.java new file mode 100644 index 00000000000..352013becb9 --- /dev/null +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorFlowLast.java @@ -0,0 +1,148 @@ +package io.kestra.runner.kafka.executors; + +import io.kestra.core.models.flows.Flow; +import io.kestra.core.models.triggers.AbstractTrigger; +import io.kestra.core.models.triggers.Trigger; +import io.kestra.core.queues.QueueService; +import io.kestra.core.services.FlowService; +import io.kestra.core.utils.ListUtils; +import io.kestra.runner.kafka.KafkaQueueEnabled; +import io.kestra.runner.kafka.serializers.JsonSerde; +import io.kestra.runner.kafka.services.KafkaAdminService; +import io.kestra.runner.kafka.services.KafkaStreamSourceService; +import io.kestra.runner.kafka.services.KafkaStreamsBuilder; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.extern.jackson.Jacksonized; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.kstream.*; +import org.apache.kafka.streams.state.KeyValueStore; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +@KafkaQueueEnabled +@Singleton +@Slf4j +public class ExecutorFlowLast implements KafkaExecutorInterface { + @Inject + private KafkaAdminService kafkaAdminService; + + @Inject + private QueueService queueService; + + public StreamsBuilder topology() { + StreamsBuilder builder = new KafkaStreamsBuilder(); + + // last global KTable + GlobalKTable flowGlobalKTable = builder + .globalTable( + kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_FLOWLAST), + Consumed.with(Serdes.String(), JsonSerde.of(Flow.class)).withName("GlobalKTable.FlowLast"), + Materialized.>as("last") + .withKeySerde(Serdes.String()) + .withValueSerde(JsonSerde.of(Flow.class)) + ); + + // stream + KStream stream = builder + .stream( + kafkaAdminService.getTopicName(Flow.class), + Consumed.with(Serdes.String(), JsonSerde.of(Flow.class, false)).withName("Stream.Flow") + ); + + // logs + stream = KafkaStreamSourceService.logIfEnabled( + log, + stream, + (key, value) -> log.trace( + "Flow in '{}.{}' with revision {}", + value.getNamespace(), + value.getId(), + value.getRevision() + ), + "Main" + ); + + // join with previous if more recent revision + KStream streamWithPrevious = stream + .filter((key, value) -> value != null, Named.as("Main.notNull")) + .selectKey((key, value) -> value.uidWithoutRevision(), Named.as("Main.selectKey")) + .leftJoin( + flowGlobalKTable, + (key, value) -> key, + (readOnlyKey, current, previous) -> { + if (previous == null) { + return new ExecutorFlowLast.FlowWithPrevious(current, null); + } else if (current.getRevision() < previous.getRevision()) { + return null; + } else { + return new ExecutorFlowLast.FlowWithPrevious(current, previous); + } + }, + Named.as("Main.join") + ) + .filter((key, value) -> value != null, Named.as("Main.joinNotNull")); + + // remove triggers + streamWithPrevious + .flatMap( + (key, value) -> { + List deletedTriggers = new ArrayList<>(); + + if (value.getFlow().isDeleted()) { + deletedTriggers = ListUtils.emptyOnNull(value.getFlow().getTriggers()); + } else if (value.getPrevious() != null) { + deletedTriggers = FlowService.findRemovedTrigger( + value.getFlow(), + value.getPrevious() + ); + } + + return deletedTriggers + .stream() + .map(t -> new KeyValue<>( + queueService.key(Trigger.of(value.getFlow(), t)), + (Trigger) null + )) + .collect(Collectors.toList()); + }, + Named.as("DeleteTrigger.flatMap") + ) + .to( + kafkaAdminService.getTopicName(Trigger.class), + Produced.with(Serdes.String(), JsonSerde.of(Trigger.class)).withName("To.Trigger") + ); + + // send to last and don't drop deleted flow in order to keep last version + streamWithPrevious + .map( + (key, value) -> new KeyValue<>( + value.getFlow().uidWithoutRevision(), + value.getFlow() + ), + Named.as("Main.Map") + ) + .to( + kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_FLOWLAST), + Produced.with(Serdes.String(), JsonSerde.of(Flow.class)).withName("To.FlowLast") + ); + + return builder; + } + + @Getter + @Jacksonized + @AllArgsConstructor + public static class FlowWithPrevious { + private Flow flow; + private Flow previous; + } +} diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorTriggerCleaner.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorTriggerCleaner.java new file mode 100644 index 00000000000..8e02ee0467f --- /dev/null +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/executors/ExecutorTriggerCleaner.java @@ -0,0 +1,72 @@ +package io.kestra.runner.kafka.executors; + +import io.kestra.core.models.triggers.Trigger; +import io.kestra.core.queues.QueueService; +import io.kestra.core.runners.Executor; +import io.kestra.core.services.ConditionService; +import io.kestra.runner.kafka.KafkaQueueEnabled; +import io.kestra.runner.kafka.serializers.JsonSerde; +import io.kestra.runner.kafka.services.KafkaAdminService; +import io.kestra.runner.kafka.services.KafkaStreamSourceService; +import io.kestra.runner.kafka.services.KafkaStreamsBuilder; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import lombok.extern.slf4j.Slf4j; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.kstream.GlobalKTable; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.Named; +import org.apache.kafka.streams.kstream.Produced; + +@KafkaQueueEnabled +@Singleton +@Slf4j +public class ExecutorTriggerCleaner implements KafkaExecutorInterface { + @Inject + private KafkaAdminService kafkaAdminService; + + @Inject + private KafkaStreamSourceService kafkaStreamSourceService; + + @Inject + private QueueService queueService; + + @Inject + private ConditionService conditionService; + + public StreamsBuilder topology() { + StreamsBuilder builder = new KafkaStreamsBuilder(); + + KStream executorKStream = kafkaStreamSourceService.executorKStream(builder); + + kafkaStreamSourceService.flowGlobalKTable(builder); + kafkaStreamSourceService.templateGlobalKTable(builder); + KStream executionWithFlowKStream = kafkaStreamSourceService.executorWithFlow(executorKStream, false); + + GlobalKTable triggerGlobalKTable = kafkaStreamSourceService.triggerGlobalKTable(builder); + + executionWithFlowKStream + .filter( + (key, value) -> value.getExecution().getTrigger() != null, + Named.as("cleanTrigger-hasTrigger-filter") + ) + .filter( + (key, value) -> conditionService.isTerminatedWithListeners(value.getFlow(), value.getExecution()), + Named.as("cleanTrigger-terminated-filter") + ) + .join( + triggerGlobalKTable, + (key, executionWithFlow) -> Trigger.uid(executionWithFlow.getExecution()), + (execution, trigger) -> trigger.resetExecution(), + Named.as("cleanTrigger-join") + ) + .selectKey((key, value) -> queueService.key(value)) + .to( + kafkaAdminService.getTopicName(Trigger.class), + Produced.with(Serdes.String(), JsonSerde.of(Trigger.class)) + ); + + return builder; + } +} diff --git a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaFlowListenersTest.java b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaFlowListenersTest.java index b7035ecf044..e417c03377b 100644 --- a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaFlowListenersTest.java +++ b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaFlowListenersTest.java @@ -3,20 +3,22 @@ import com.fasterxml.jackson.core.JsonProcessingException; import io.kestra.core.models.flows.Flow; import io.kestra.core.runners.FlowListenersTest; +import io.kestra.core.runners.StandAloneRunner; import io.kestra.core.serializers.JacksonMapper; import io.kestra.core.utils.IdUtils; import io.kestra.runner.kafka.configs.TopicsConfig; import io.kestra.runner.kafka.services.KafkaProducerService; import io.micronaut.context.ApplicationContext; +import jakarta.inject.Inject; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.Serdes; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import jakarta.inject.Inject; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; @@ -31,6 +33,15 @@ class KafkaFlowListenersTest extends FlowListenersTest { @Inject KafkaProducerService kafkaProducerService; + @Inject + private StandAloneRunner runner; + + @BeforeEach + private void init() { + runner.setSchedulerEnabled(false); + runner.run(); + } + @Test public void all() { this.suite(flowListenersService); diff --git a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaSchedulerTest.java b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaSchedulerTest.java index 82037c5950e..56f865ea7dd 100644 --- a/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaSchedulerTest.java +++ b/runner-kafka/src/test/java/io/kestra/runner/kafka/KafkaSchedulerTest.java @@ -33,6 +33,9 @@ class KafkaSchedulerTest extends AbstractSchedulerTest { @Inject protected KafkaAdminService kafkaAdminService; + @Inject + protected KafkaExecutor kafkaExecutor; + @Inject protected FlowRepositoryInterface flowRepositoryInterface; @@ -58,6 +61,8 @@ void thread() throws Exception { flowRepositoryInterface.create(flow); + kafkaExecutor.run(); + // scheduler try (AbstractScheduler scheduler = new KafkaScheduler( applicationContext, From 39d2b32649a62f11821ed1ef18a53dcf2e6309c2 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 4 Feb 2022 22:44:36 +0100 Subject: [PATCH 18/44] fix(core): flow listeners injection by blocked the startup, use a run --- .../commands/sys/FlowListenersRestoreCommand.java | 2 +- .../kestra/core/schedulers/AbstractScheduler.java | 4 +++- .../io/kestra/core/schedulers/DefaultScheduler.java | 2 ++ .../core/services/FlowListenersInterface.java | 2 ++ .../io/kestra/core/runners/FlowListenersTest.java | 2 ++ .../io/kestra/runner/kafka/KafkaFlowListeners.java | 13 +++++++------ .../runner/kafka/services/KafkaStreamService.java | 2 +- .../kestra/runner/memory/MemoryFlowListeners.java | 4 +++- 8 files changed, 21 insertions(+), 10 deletions(-) diff --git a/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java b/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java index 870a4f45cea..e628dcc2a81 100644 --- a/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java +++ b/cli/src/main/java/io/kestra/cli/commands/sys/FlowListenersRestoreCommand.java @@ -35,9 +35,9 @@ public Integer call() throws Exception { super.call(); FlowListenersInterface flowListeners = applicationContext.getBean(FlowListenersInterface.class); - AtomicReference lastTime = new AtomicReference<>(ZonedDateTime.now()); + flowListeners.run(); flowListeners.listen(flows -> { long count = flows.stream().filter(flow -> !flow.isDeleted()).count(); diff --git a/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java b/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java index 7debd75305a..d12072ba40d 100644 --- a/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java +++ b/core/src/main/java/io/kestra/core/schedulers/AbstractScheduler.java @@ -46,7 +46,7 @@ public abstract class AbstractScheduler implements Runnable, AutoCloseable { protected final ApplicationContext applicationContext; private final QueueInterface executionQueue; - private final FlowListenersInterface flowListeners; + protected final FlowListenersInterface flowListeners; private final RunContextFactory runContextFactory; private final MetricRegistry metricRegistry; private final ConditionService conditionService; @@ -91,6 +91,8 @@ public AbstractScheduler( @Override public void run() { + flowListeners.run(); + ScheduledFuture handle = scheduleExecutor.scheduleAtFixedRate( this::handle, 0, diff --git a/core/src/main/java/io/kestra/core/schedulers/DefaultScheduler.java b/core/src/main/java/io/kestra/core/schedulers/DefaultScheduler.java index f64b24166d5..2aae01cbb9a 100644 --- a/core/src/main/java/io/kestra/core/schedulers/DefaultScheduler.java +++ b/core/src/main/java/io/kestra/core/schedulers/DefaultScheduler.java @@ -35,6 +35,8 @@ public DefaultScheduler( @SuppressWarnings("unchecked") @Override public void run() { + flowListeners.run(); + QueueInterface executionQueue = applicationContext.getBean(QueueInterface.class, Qualifiers.byName(QueueFactoryInterface.EXECUTION_NAMED)); QueueInterface triggerQueue = applicationContext.getBean(QueueInterface.class, Qualifiers.byName(QueueFactoryInterface.TRIGGER_NAMED)); diff --git a/core/src/main/java/io/kestra/core/services/FlowListenersInterface.java b/core/src/main/java/io/kestra/core/services/FlowListenersInterface.java index 89506c02ad7..30f054b4f6b 100644 --- a/core/src/main/java/io/kestra/core/services/FlowListenersInterface.java +++ b/core/src/main/java/io/kestra/core/services/FlowListenersInterface.java @@ -6,6 +6,8 @@ import java.util.function.Consumer; public interface FlowListenersInterface { + void run(); + void listen(Consumer> consumer); List flows(); diff --git a/core/src/test/java/io/kestra/core/runners/FlowListenersTest.java b/core/src/test/java/io/kestra/core/runners/FlowListenersTest.java index d139292e699..b8c93f14749 100644 --- a/core/src/test/java/io/kestra/core/runners/FlowListenersTest.java +++ b/core/src/test/java/io/kestra/core/runners/FlowListenersTest.java @@ -37,6 +37,8 @@ protected static Flow create(String flowId, String taskId) { } public void suite(FlowListenersInterface flowListenersService) { + flowListenersService.run(); + AtomicInteger count = new AtomicInteger(); var ref = new Ref(); diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java index 901e0c0af91..f6ebe3349a4 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/KafkaFlowListeners.java @@ -17,7 +17,6 @@ import org.apache.kafka.streams.state.ReadOnlyKeyValueStore; import io.kestra.core.models.flows.Flow; import io.kestra.core.services.FlowListenersInterface; -import io.kestra.core.services.FlowService; import io.kestra.runner.kafka.serializers.JsonSerde; import java.util.ArrayList; @@ -33,18 +32,20 @@ @KafkaQueueEnabled public class KafkaFlowListeners implements FlowListenersInterface { private final KafkaAdminService kafkaAdminService; + private final KafkaStreamService kafkaStreamService; private SafeKeyValueStore store; private final List>> consumers = new ArrayList<>(); - private final KafkaStreamService.Stream stream; + private KafkaStreamService.Stream stream; private List flows; @Inject - public KafkaFlowListeners( - KafkaAdminService kafkaAdminService, - KafkaStreamService kafkaStreamService - ) { + public KafkaFlowListeners(KafkaAdminService kafkaAdminService, KafkaStreamService kafkaStreamService) { this.kafkaAdminService = kafkaAdminService; + this.kafkaStreamService = kafkaStreamService; + } + @Override + public void run() { kafkaAdminService.createIfNotExist(KafkaStreamSourceService.TOPIC_FLOWLAST); stream = kafkaStreamService.of(FlowListener.class, FlowListener.class, new FlowListener().topology(), log); diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java index 359e9e6b5e6..db444bf517d 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaStreamService.java @@ -108,7 +108,7 @@ public KafkaStreamService.Stream of(Class clientId, Class groupId, Topolog } Stream stream = new Stream(topology, properties, metricsEnabled ? metricRegistry : null, logger); - eventPublisher.publishEventAsync(new KafkaStreamEndpoint.Event(clientId.getName(), stream)); + eventPublisher.publishEvent(new KafkaStreamEndpoint.Event(clientId.getName(), stream)); return stream; } diff --git a/runner-memory/src/main/java/io/kestra/runner/memory/MemoryFlowListeners.java b/runner-memory/src/main/java/io/kestra/runner/memory/MemoryFlowListeners.java index 11c9223967b..2782acd63cd 100644 --- a/runner-memory/src/main/java/io/kestra/runner/memory/MemoryFlowListeners.java +++ b/runner-memory/src/main/java/io/kestra/runner/memory/MemoryFlowListeners.java @@ -37,9 +37,11 @@ public MemoryFlowListeners( @Named(QueueFactoryInterface.FLOW_NAMED) QueueInterface flowQueue ) { this.flowQueue = flowQueue; - this.flows = flowRepository.findAll(); + } + @Override + public void run() { this.flowQueue.receive(flow -> { if (flow.isDeleted()) { this.remove(flow); From 5e1e6df56dc9e89abb39cb6409fb169fb276e9cc Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Sat, 5 Feb 2022 21:26:56 +0100 Subject: [PATCH 19/44] feat(ui): update cytoscape and change dom node --- ui/package-lock.json | 26 +++++++++--------- ui/package.json | 6 ++-- ui/src/components/graph/TopologyTree.vue | 35 ++++++++++++++---------- 3 files changed, 36 insertions(+), 31 deletions(-) diff --git a/ui/package-lock.json b/ui/package-lock.json index d2b715032bc..2c30e9453d8 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -9548,26 +9548,26 @@ "dev": true }, "cytoscape": { - "version": "3.17.3", - "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.17.3.tgz", - "integrity": "sha512-gWbyNmmJ13vDujzdoBO/VuP3k8vjwGvdDyf9WVR8dQt1FP7+IHd38oGZrE1Bhmrgcb90ILNTvzm3dcimI9GtEQ==", + "version": "3.20.0", + "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.20.0.tgz", + "integrity": "sha512-V2OUoav8ltY9UPgdjuhQOqkkWT28IbJODioWCjcnLqmjym9lMpnD0ie0vdQCdAiZapjbTGAv8aoKmmSuVcC1gA==", "requires": { "heap": "^0.2.6", "lodash.debounce": "^4.0.8" } }, "cytoscape-dagre": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/cytoscape-dagre/-/cytoscape-dagre-2.3.2.tgz", - "integrity": "sha512-dL9+RvGkatSlIdOKXiFwHpnpTo8ydFMqIYzZFkImJXNbDci3feyYxR46wFoaG9GFiWimc6XD9Lm0x29b1wvWpw==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/cytoscape-dagre/-/cytoscape-dagre-2.4.0.tgz", + "integrity": "sha512-jfOtKzKduCnruBs3YMHS9kqWjZKqvp6loSJwlotPO5pcU4wLUhkx7ZBIyW3VWZXa8wfkGxv/zhWoBxWtYrUxKQ==", "requires": { "dagre": "^0.8.5" } }, - "cytoscape-node-html-label": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cytoscape-node-html-label/-/cytoscape-node-html-label-1.2.2.tgz", - "integrity": "sha512-oUVwrlsIlaJJ8QrQFSMdv3uXVXPg6tMH/Tfofr8JuZIovqI4fPqBi6sQgCMcVpS6k9Td0TTjowBsNRw32CESWg==" + "cytoscape-dom-node": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/cytoscape-dom-node/-/cytoscape-dom-node-1.1.0.tgz", + "integrity": "sha512-t/nL5cm4UL4gvCz3ZSdh6JlnvVbbgx7l15tZmVvA3j8cXHjqPEs7qiUlf8X0XG7uUlfNGkQroH3fiTeHHWH4sQ==" }, "d": { "version": "1.0.1", @@ -12071,9 +12071,9 @@ "dev": true }, "heap": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/heap/-/heap-0.2.6.tgz", - "integrity": "sha1-CH4fELBGky/IWU3Z5tN4r8nR5aw=" + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/heap/-/heap-0.2.7.tgz", + "integrity": "sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg==" }, "hex-color-regex": { "version": "1.1.0", diff --git a/ui/package.json b/ui/package.json index 29087191c5c..afa405ee42b 100644 --- a/ui/package.json +++ b/ui/package.json @@ -18,9 +18,9 @@ "cron-parser": "^4.2.1", "cron-validator": "^1.2.1", "cronstrue": "^1.122.0", - "cytoscape": "3.17.3", - "cytoscape-dagre": "^2.3.2", - "cytoscape-node-html-label": "^1.2.1", + "cytoscape": "3.20.0", + "cytoscape-dagre": "^2.4.0", + "cytoscape-dom-node": "^1.1.0", "humanize-duration": "^3.27.1", "js-yaml": "^4.1.0", "lodash": "4.17.21", diff --git a/ui/src/components/graph/TopologyTree.vue b/ui/src/components/graph/TopologyTree.vue index 7301aa6c106..15be0bbb134 100644 --- a/ui/src/components/graph/TopologyTree.vue +++ b/ui/src/components/graph/TopologyTree.vue @@ -57,7 +57,7 @@ diff --git a/ui/src/components/inputs/Editor.vue b/ui/src/components/inputs/Editor.vue index 2dbf5d9bba5..fa1ec9ea210 100644 --- a/ui/src/components/inputs/Editor.vue +++ b/ui/src/components/inputs/Editor.vue @@ -269,6 +269,7 @@ }; diff --git a/ui/src/components/graph/TreeNode.vue b/ui/src/components/graph/TreeNode.vue index 46e9755089c..a392f62db43 100644 --- a/ui/src/components/graph/TreeNode.vue +++ b/ui/src/components/graph/TreeNode.vue @@ -246,7 +246,7 @@ cursor: pointer; display: flex; width: 200px; - border: 1px solid var(--gray-600); + background: var(--gray-100); .btn, .card-header { border-radius: 0 !important; @@ -262,14 +262,14 @@ width: 35px; height: 51px; background: var(--white); - border-right: 1px solid var(--gray-500); + border-right: 1px solid var(--table-border-color); position: relative; } .status-color { width: 10px; height: 51px; - border-right: 1px solid var(--gray-500); + border-right: 1px solid var(--table-border-color); } .is-success { @@ -290,18 +290,21 @@ .task-content { flex-grow: 1; - background-color: var(--light); width: 38px; .card-header { height: 25px; padding: 2px; margin: 0; - border-bottom: 1px solid var(--gray-500); - background: var(--gray-200); + border-bottom: 1px solid var(--table-border-color); flex: 1; flex-wrap: nowrap; + background-color: var(--gray-200); + .theme-dark & { + background-color: var(--gray-300); + } + .icon-wrapper { display: inline-block; flex-shrink: 2; diff --git a/ui/src/components/layout/Cytoscape.vue b/ui/src/components/layout/Cytoscape.vue new file mode 100644 index 00000000000..e875e90da65 --- /dev/null +++ b/ui/src/components/layout/Cytoscape.vue @@ -0,0 +1,116 @@ + + + diff --git a/ui/src/mixins/VueManual.js b/ui/src/mixins/VueManual.js new file mode 100644 index 00000000000..47cf6c9c9ec --- /dev/null +++ b/ui/src/mixins/VueManual.js @@ -0,0 +1,33 @@ +import Vue from "vue"; + +export default { + manualComponents: [], + methods: { + createManualComponent(parent, component, propsData) { + const vueComponent = Vue.extend({ + extends: component, + }); + + return new vueComponent({ + parent: parent, + propsData + }); + }, + mountManualComponent(vueInstance) { + const mount = vueInstance.$mount(); + + if (this.manualComponents === undefined) { + this.manualComponents = []; + } + + this.manualComponents.push(mount); + + return mount; + } + }, + destroyed() { + (this.manualComponents || []).forEach(value => { + value.$destroy(); + }); + } +} From cd45aa0a99d0aaaf04cd73a3fc636b587ec78aad Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 18 Feb 2022 13:37:43 +0100 Subject: [PATCH 40/44] chore(core): make public access on allTasksWithChilds --- core/src/main/java/io/kestra/core/models/flows/Flow.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/io/kestra/core/models/flows/Flow.java b/core/src/main/java/io/kestra/core/models/flows/Flow.java index 7f60b48522d..fbf102e4ecc 100644 --- a/core/src/main/java/io/kestra/core/models/flows/Flow.java +++ b/core/src/main/java/io/kestra/core/models/flows/Flow.java @@ -141,7 +141,7 @@ public Stream allTasks() { .flatMap(Collection::stream); } - private List allTasksWithChilds() { + public List allTasksWithChilds() { return allTasks() .flatMap(this::allTasksWithChilds) .collect(Collectors.toList()); From 3b83ef759125d069efb3f261dd3a1b30e0a24e04 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 18 Feb 2022 17:12:02 +0100 Subject: [PATCH 41/44] chore(ui): refactor flow Edit --- ui/src/components/flows/FlowSource.vue | 2 +- ui/src/mixins/flowTemplateEdit.js | 33 ++++++++++++------- ui/src/override/components/flows/FlowEdit.vue | 6 ++++ 3 files changed, 28 insertions(+), 13 deletions(-) create mode 100644 ui/src/override/components/flows/FlowEdit.vue diff --git a/ui/src/components/flows/FlowSource.vue b/ui/src/components/flows/FlowSource.vue index 65dbab85857..4af8f8f48bc 100644 --- a/ui/src/components/flows/FlowSource.vue +++ b/ui/src/components/flows/FlowSource.vue @@ -7,7 +7,7 @@ \ No newline at end of file From 6b23cc128305a9f1448c1377afca6825d871023c Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Fri, 18 Feb 2022 18:43:09 +0100 Subject: [PATCH 42/44] fix(kafka-runner): prevent first startup error on topic creation --- .../io/kestra/runner/kafka/services/KafkaAdminService.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaAdminService.java b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaAdminService.java index 7c06acdc5f9..ed85984fc82 100644 --- a/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaAdminService.java +++ b/runner-kafka/src/main/java/io/kestra/runner/kafka/services/KafkaAdminService.java @@ -145,9 +145,10 @@ public void createIfNotExist(TopicsConfig topicConfig) { }}).all().get(); log.info("Topic Config '{}' updated", newTopic.name()); - } catch (TopicExistsException ignored) { - } catch (InterruptedException | ExecutionException e1) { - throw new RuntimeException(e); + } catch (ExecutionException | InterruptedException exception) { + if (!(exception.getCause() instanceof TopicExistsException)) { + log.warn("Unable to update topic '{}'", newTopic.name(), exception); + } } } else { throw new RuntimeException(e); From d8763de6113090478a1e83dc3620715b0b833006 Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Sun, 20 Feb 2022 21:00:18 +0100 Subject: [PATCH 43/44] fix(core): fix namespace collector stats --- .../main/java/io/kestra/core/models/collectors/FlowUsage.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/main/java/io/kestra/core/models/collectors/FlowUsage.java b/core/src/main/java/io/kestra/core/models/collectors/FlowUsage.java index 078c51023b6..ce4ac26bee4 100644 --- a/core/src/main/java/io/kestra/core/models/collectors/FlowUsage.java +++ b/core/src/main/java/io/kestra/core/models/collectors/FlowUsage.java @@ -32,6 +32,7 @@ public static FlowUsage of(FlowRepositoryInterface flowRepository) { .namespacesCount(allFlows .stream() .map(Flow::getNamespace) + .distinct() .count() ) .taskTypeCount(allFlows From ee6004e39d3173edc3327335da534b493a30a05c Mon Sep 17 00:00:00 2001 From: Ludovic DEHON Date: Tue, 22 Feb 2022 10:38:03 +0100 Subject: [PATCH 44/44] feat(cicd): create a release on tag --- .github/workflows/main.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 44c6188148d..880995333f7 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -200,6 +200,16 @@ jobs: KESTRA_PLUGINS=${{ steps.vars.outputs.plugins }} io.kestra.storage:storage-gcs:LATEST io.kestra.storage:storage-minio:LATEST io.kestra.plugin:plugin-aws:LATEST io.kestra.plugin:plugin-compress:LATEST io.kestra.plugin:plugin-crypto:LATEST io.kestra.plugin:plugin-elasticsearch:LATEST io.kestra.plugin:plugin-fs:LATEST io.kestra.plugin:plugin-gcp:LATEST io.kestra.plugin:plugin-googleworkspace:LATEST io.kestra.plugin:plugin-jdbc-clickhouse:LATEST io.kestra.plugin:plugin-jdbc-mysql:LATEST io.kestra.plugin:plugin-jdbc-oracle:LATEST io.kestra.plugin:plugin-jdbc-postgres:LATEST io.kestra.plugin:plugin-jdbc-redshift:LATEST io.kestra.plugin:plugin-jdbc-vertica:LATEST io.kestra.plugin:plugin-jdbc-vectorwise:LATEST io.kestra.plugin:plugin-kafka:LATEST io.kestra.plugin:plugin-kubernetes:LATEST io.kestra.plugin:plugin-mongodb:LATEST io.kestra.plugin:plugin-notifications:LATEST io.kestra.plugin:plugin-script-groovy:LATEST io.kestra.plugin:plugin-script-jython:LATEST io.kestra.plugin:plugin-script-nashorn:LATEST io.kestra.plugin:plugin-serdes:LATEST io.kestra.plugin:plugin-singer:LATEST APT_PACKAGES=python3-pip python3-wheel python3-setuptools python3-virtualenv nodejs + # GitHub Release + - name: Create GitHub release + uses: "marvinpinto/action-automatic-releases@latest" + if: startsWith(github.ref, 'refs/tags/v') + with: + repo_token: "${{ secrets.GITHUB_TOKEN }}" + draft: true + files: | + build/executable/* + # Slack - name: Slack notification uses: 8398a7/action-slack@v3