From 3ad2a57889f5e846cfeff61459983a2868a4b8e7 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 19 Dec 2018 09:42:48 +0100 Subject: [PATCH 01/64] NodeId available on all log lines Two aproaches. First with system variable and a plugin with atomic reference. Second with custom Marker, LoggerContext, Selector and Factory. --- distribution/src/config/jvm.options | 2 + distribution/src/config/log4j2.properties | 64 ++++++- plugins/discovery-ec2/build.gradle | 4 +- plugins/repository-s3/build.gradle | 4 +- server/build.gradle | 4 + .../common/logging/MarkerLogger.java | 181 ++++++++++++++++++ .../common/logging/MarkerLoggerContext.java | 42 ++++ .../logging/MarkerLoggerContextFactory.java | 37 ++++ .../logging/MarkerLoggerContextSelector.java | 40 ++++ .../common/logging/NodeIdListener.java | 55 ++++++ .../logging/NodeIdPatternConverter.java | 83 ++++++++ .../java/org/elasticsearch/node/Node.java | 12 ++ .../elasticsearch/bootstrap/security.policy | 5 + 13 files changed, 524 insertions(+), 9 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 2b30d6a87b4a1..7c9f0ac029271 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -120,3 +120,5 @@ ${error.file} # temporary workaround for C2 bug with JDK 10 on hardware with AVX-512 10-:-XX:UseAVX=2 + +-Dlog4j2.loggerContextFactory=org.elasticsearch.common.logging.MarkerLoggerContextFactory diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 6de21cd48f67b..107563b0541b0 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -7,14 +7,37 @@ logger.action.level = debug appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout -appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.console.layout.pattern ={\ + "type": "console", \ + "timestamp": "%d{ISO8601}", \ + "level": "%-5p", \ + "class": "%c{1.}", \ + "cluster_name": ${sys:es.logs.cluster_name}", \ + "node_name": "%node_name", \ + "node_id": "%node_id_raw", \ + %marker\ + "message": "%.-10000m"\ + }%n appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.layout.type = PatternLayout -appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n +appender.rolling.layout.pattern ={\ + "type": "rolling", \ + "timestamp": "%d{ISO8601}", \ + "level": "%-5p", \ + "class": "%c{1.}", \ + "cluster_name": ${sys:es.logs.cluster_name}", \ + "node_name": "%node_name", \ + "node_id": "%node_id_raw", \ + %marker\ + "message": "%.-10000m"\ + }%n appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz + + + appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 @@ -38,7 +61,18 @@ appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log appender.deprecation_rolling.layout.type = PatternLayout -appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n +appender.deprecation_rolling.layout.pattern = {\ + "type": "rolling", \ + "timestamp": "%d{ISO8601}", \ + "level": "%-5p", \ + "class": "%c{1.}", \ + "cluster_name": ${sys:es.logs.cluster_name}", \ + "node_name": "%node_name", \ + "node_id": "%node_id_raw", \ + %marker\ + "message": "%.-10000m"\ + }%n + appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy @@ -55,7 +89,17 @@ appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log appender.index_search_slowlog_rolling.layout.type = PatternLayout -appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n +appender.index_search_slowlog_rolling.layout.pattern = {\ + "type": "rolling", \ + "timestamp": "%d{ISO8601}", \ + "level": "%-5p", \ + "class": "%c{1.}", \ + "cluster_name": ${sys:es.logs.cluster_name}", \ + "node_name": "%node_name", \ + "node_id": "%node_id_raw", \ + %marker\ + "message": "%.-10000m"\ + }%n appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy @@ -72,7 +116,17 @@ appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.layout.type = PatternLayout -appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n +appender.index_indexing_slowlog_rolling.layout.pattern = {\ + "type": "rolling", \ + "timestamp": "%d{ISO8601}", \ + "level": "%-5p", \ + "class": "%c{1.}", \ + "cluster_name": ${sys:es.logs.cluster_name}", \ + "node_name": "%node_name", \ + "node_id": "%node_id_raw", \ + %marker\ + "message": "%.-10000m"\ + }%n appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 96fe8bb3fded4..84d81b142ad86 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -33,8 +33,8 @@ dependencies { compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" - compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' - compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' + compile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" } dependencyLicenses { diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index a584ec2767b41..8c01e8524702b 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -46,8 +46,8 @@ dependencies { compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' - compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' + compile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" compile "joda-time:joda-time:${versions.joda}" diff --git a/server/build.gradle b/server/build.gradle index c3a8958f3d8a8..68e6b30036dd2 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -123,6 +123,10 @@ dependencies { // repackaged jna with native bits linked against all elastic supported platforms compile "org.elasticsearch:jna:${versions.jna}" + compile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + compile("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") + + if (!isEclipse && !isIdea) { java9Compile sourceSets.main.output } diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java new file mode 100644 index 0000000000000..4e4980156b85b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Marker; +import org.apache.logging.log4j.MarkerManager; +import org.apache.logging.log4j.core.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.message.Message; +import org.apache.logging.log4j.message.MessageFactory; +import org.apache.logging.log4j.spi.ExtendedLogger; +import org.apache.logging.log4j.util.StringBuilderFormattable; +import org.apache.logging.log4j.util.Strings; + +import java.util.WeakHashMap; +import java.util.concurrent.atomic.AtomicReference; + +public class MarkerLogger extends Logger { + + + /* + * We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker; + * however, we have transient markers from index-level and shard-level components so this would effectively be a memory leak. Since we + * can not tie into the lifecycle of these components, we have to use a mechanism that enables garbage collection of such markers when + * they are no longer in use. + */ + private static final WeakHashMap markers = new WeakHashMap<>(); + + /** + * Return the size of the cached markers. This size can vary as markers are cached but collected during GC activity when a given prefix + * is no longer in use. + * + * @return the size of the cached markers + */ + static int markersSize() { + return markers.size(); + } + + /** + * The marker for this prefix logger. + */ + private final Marker marker; + + /** + * Obtain the prefix for this prefix logger. This can be used to create a logger with the same prefix as this one. + * + * @return the prefix + */ + public String prefix() { + return marker.getName(); + } + + /** + * The constructor. + * //TODO consider what happens when UNKOWN_NODE_ID are not in use anymore + */ + protected MarkerLogger(Logger logger, AtomicReference nodeIdListener) { + super(logger.getContext(), logger.getName(), logger.getMessageFactory()); + String prefix = getPrefix(nodeIdListener); + + final Marker actualMarker; + // markers is not thread-safe, so we synchronize access + synchronized (markers) { + final Marker maybeMarker = markers.get(prefix); + if (maybeMarker == null) { + if (nodeIdListener.get() != null && Strings.isNotEmpty(nodeIdListener.get().getNodeId().get())) { + actualMarker = new MarkerManager.Log4jMarker(prefix); + } else { + actualMarker = new AtomicRefMarker(nodeIdListener); + } + + /* + * We must create a new instance here as otherwise the marker will hold a reference to the key in the weak hash map; as + * those references are held strongly, this would give a strong reference back to the key preventing them from ever being + * collected. This also guarantees that no other strong reference can be held to the prefix anywhere. + */ + // noinspection RedundantStringConstructorCall + markers.put(new String(prefix), actualMarker); + } else { + actualMarker = maybeMarker; + } + } + this.marker = actualMarker; + } + + private String getPrefix(AtomicReference nodeIdListenerRef) { + NodeIdListener nodeIdListener = nodeIdListenerRef.get(); + if (nodeIdListener != null) { + AtomicReference nodeId = nodeIdListener.getNodeId(); + return nodeId.get(); + } + return NodeIdListener.UNKOWN_NODE_ID; + } + + + @Override + public void logMessage(final String fqcn, final Level level, final Marker marker, final Message message, final Throwable t) { + super.logMessage(fqcn, level, this.marker, message, t); + } + + static class AtomicRefMarker implements Marker, StringBuilderFormattable { + + private AtomicReference nodeId; + + /** + * Constructs a new Marker. + */ + public AtomicRefMarker(AtomicReference nodeId) { + this.nodeId = nodeId; + } + + @Override + public String getName() { + if (nodeId.get() != null && Strings.isNotEmpty(nodeId.get().getNodeId().get())) { + return nodeId.get().getNodeId().get(); + } + return NodeIdListener.UNKOWN_NODE_ID; + } + + @Override + public void formatTo(StringBuilder buffer) { + buffer.append(getName()); + } + //just using the marker for its name in logs since other methos are unimplemented + + @Override + public Marker addParents(Marker... markers) { + return null; + } + + @Override + public Marker[] getParents() { + return new Marker[0]; + } + + @Override + public boolean hasParents() { + return false; + } + + @Override + public boolean isInstanceOf(Marker m) { + return false; + } + + @Override + public boolean isInstanceOf(String name) { + return false; + } + + @Override + public boolean remove(Marker marker) { + return false; + } + + @Override + public Marker setParents(Marker... markers) { + return null; + } + + + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java new file mode 100644 index 0000000000000..c5f7ee738bee1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.Logger; +import org.apache.logging.log4j.core.LoggerContext; + +import java.util.concurrent.atomic.AtomicReference; + +public class MarkerLoggerContext extends LoggerContext { + + private final AtomicReference nodeIdListener; + + public MarkerLoggerContext(String name, AtomicReference nodeIdListener) { + super(name); + this.nodeIdListener = nodeIdListener; + } + + @Override + public Logger getLogger(final String name) { + Logger logger = getLogger(name, null); + MarkerLogger marker = new MarkerLogger(logger,nodeIdListener); + return marker; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java new file mode 100644 index 0000000000000..952d92f7f345d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.impl.Log4jContextFactory; + +import java.util.concurrent.atomic.AtomicReference; + +public class MarkerLoggerContextFactory extends Log4jContextFactory { + + private static final AtomicReference nodeIdListener = new AtomicReference<>(); + + public MarkerLoggerContextFactory() { + super(new MarkerLoggerContextSelector(nodeIdListener)); + } + + public void setNodeIdListener(NodeIdListener nodeIdListener) { + MarkerLoggerContextFactory.nodeIdListener.set(nodeIdListener); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java new file mode 100644 index 0000000000000..bb072dca66247 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.selector.ClassLoaderContextSelector; + +import java.net.URI; +import java.util.concurrent.atomic.AtomicReference; + +public class MarkerLoggerContextSelector extends ClassLoaderContextSelector { + + private final AtomicReference nodeIdListener ; + + public MarkerLoggerContextSelector(AtomicReference nodeIdListener) { + this.nodeIdListener = nodeIdListener; + } + + @Override + protected LoggerContext createContext(String name, URI configLocation) { + return new MarkerLoggerContext("Default", nodeIdListener); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java new file mode 100644 index 0000000000000..b2e3fcb637e7e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; + +import java.util.concurrent.atomic.AtomicReference; + +public class NodeIdListener implements ClusterStateListener { + + public static final String UNKOWN_NODE_ID = "";//formatIds("unkown_id","unkown_id"); + private static final Logger LOGGER = LogManager.getLogger(NodeIdListener.class); + private AtomicReference nodeId = new AtomicReference<>(UNKOWN_NODE_ID); + + + @Override + public void clusterChanged(ClusterChangedEvent event) { + DiscoveryNode localNode = event.state().getNodes().getLocalNode(); + String clusterUUID = event.state().getMetaData().clusterUUID(); + String nodeId = localNode.getId(); + boolean wasSet = this.nodeId.compareAndSet(UNKOWN_NODE_ID, formatIds(clusterUUID,nodeId)); + if (wasSet) { + LOGGER.info("received first cluster state update. Setting nodeId={}", nodeId); + } + } + + private static String formatIds(String clusterUUID, String nodeId) { + return String.format("\"cluster_uuid\": \"%s\", \"node_id2\": \"%s\", ",clusterUUID,nodeId); + } + + public AtomicReference getNodeId() { + return nodeId; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java new file mode 100644 index 0000000000000..8288c2a2c4a3d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.util.LazyInitializable; + +import java.util.concurrent.atomic.AtomicReference; + + +@Plugin(category = PatternConverter.CATEGORY, name = "NodeIdPatternConverter") +@ConverterKeys({"node_id_raw", "node_id_es"}) +public final class NodeIdPatternConverter extends LogEventPatternConverter implements ClusterStateListener { + /** + * The name of this node. + */ + private static final SetOnce NODE_ID = new SetOnce<>(); + AtomicReference nodeId = new AtomicReference<>(); + + private static LazyInitializable INSTANCE = new LazyInitializable(() -> new NodeIdPatternConverter()); + + /** + * Called by log4j2 to initialize this converter. + */ + public static NodeIdPatternConverter newInstance(final String[] options) { + try { + return INSTANCE.getOrCompute(); + } catch (Exception e) { + return null; + } + } + + public NodeIdPatternConverter() { + super("NodeName", "node_id_raw"); + } + + @Override + public void format(LogEvent event, StringBuilder toAppendTo) { + toAppendTo.append(nodeId.get()); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + DiscoveryNode localNode = event.state().getNodes().getLocalNode(); + String id = localNode.getId(); + boolean wasSet = nodeId.compareAndSet(null, id); + if (wasSet) { + System.setProperty("node_id_raw", id); + //TODO deregister as no longer the id will change ? + } + } + + @Override + public String toString() { + return nodeId.get(); + } + +} diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index cf564ebb6d898..0f15aa57cb8c7 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,6 +67,9 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.MarkerLoggerContextFactory; +import org.elasticsearch.common.logging.NodeIdListener; +import org.elasticsearch.common.logging.NodeIdPatternConverter; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -184,6 +187,7 @@ * in order to use a {@link Client} to perform actions/operations against the cluster. */ public class Node implements Closeable { + public static final Setting WRITE_PORTS_FILE_SETTING = Setting.boolSetting("node.portsfile", false, Property.NodeScope); public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope); @@ -437,6 +441,14 @@ protected Node( namedWriteableRegistry).stream()) .collect(Collectors.toList()); + NodeIdPatternConverter nodeIdPatternConverter = NodeIdPatternConverter.newInstance(new String[]{}); + clusterService.addListener(nodeIdPatternConverter); + + NodeIdListener nodeIdListener = new NodeIdListener(); + clusterService.addListener(nodeIdListener); + //TODO any other way to pass cluster state listener to context factory? + ((MarkerLoggerContextFactory)LogManager.getFactory()).setNodeIdListener(nodeIdListener); + ActionModule actionModule = new ActionModule(false, settings, clusterModule.getIndexNameExpressionResolver(), settingsModule.getIndexScopedSettings(), settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(), threadPool, pluginsService.filterPlugins(ActionPlugin.class), client, circuitBreakerService, usageService); diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy index 4df99ef6f8836..9205ed45d33bb 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -55,6 +55,11 @@ grant codeBase "${codebase.plugin-classloader}" { //// Everything else: grant { + permission java.util.PropertyPermission "*", "write"; + + + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // checked by scripting engines, and before hacks and other issues in // third party code, to safeguard these against unprivileged code like scripts. From 3ea4695bc30ff30e435927199ddbda355c36f278 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 19 Dec 2018 15:44:47 +0100 Subject: [PATCH 02/64] revert back changes due to use of json layout --- distribution/src/config/log4j2.properties | 10 +++++----- plugins/discovery-ec2/build.gradle | 4 ++-- plugins/repository-s3/build.gradle | 4 ++-- server/build.gradle | 4 ---- .../common/logging/NodeIdPatternConverter.java | 2 +- .../org/elasticsearch/bootstrap/security.policy | 6 +----- 6 files changed, 11 insertions(+), 19 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 107563b0541b0..f1349eeb36e0b 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -14,7 +14,7 @@ appender.console.layout.pattern ={\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_raw", \ + "node_id": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n @@ -30,7 +30,7 @@ appender.rolling.layout.pattern ={\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_raw", \ + "node_id": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n @@ -68,7 +68,7 @@ appender.deprecation_rolling.layout.pattern = {\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_raw", \ + "node_id": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n @@ -96,7 +96,7 @@ appender.index_search_slowlog_rolling.layout.pattern = {\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_raw", \ + "node_id": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n @@ -123,7 +123,7 @@ appender.index_indexing_slowlog_rolling.layout.pattern = {\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_raw", \ + "node_id": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 84d81b142ad86..96fe8bb3fded4 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -33,8 +33,8 @@ dependencies { compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" - compile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' + compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' } dependencyLicenses { diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 8c01e8524702b..a584ec2767b41 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -46,8 +46,8 @@ dependencies { compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - compile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' + compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" compile "joda-time:joda-time:${versions.joda}" diff --git a/server/build.gradle b/server/build.gradle index 68e6b30036dd2..c3a8958f3d8a8 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -123,10 +123,6 @@ dependencies { // repackaged jna with native bits linked against all elastic supported platforms compile "org.elasticsearch:jna:${versions.jna}" - compile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - compile("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") - - if (!isEclipse && !isIdea) { java9Compile sourceSets.main.output } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java index 8288c2a2c4a3d..1c9a595165021 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java @@ -70,7 +70,7 @@ public void clusterChanged(ClusterChangedEvent event) { String id = localNode.getId(); boolean wasSet = nodeId.compareAndSet(null, id); if (wasSet) { - System.setProperty("node_id_raw", id); + System.setProperty("node_id_sys_prop", id); //TODO deregister as no longer the id will change ? } } diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy index 9205ed45d33bb..bdff0c3b594a2 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -55,11 +55,6 @@ grant codeBase "${codebase.plugin-classloader}" { //// Everything else: grant { - permission java.util.PropertyPermission "*", "write"; - - - permission java.lang.RuntimePermission "accessDeclaredMembers"; - permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // checked by scripting engines, and before hacks and other issues in // third party code, to safeguard these against unprivileged code like scripts. @@ -83,6 +78,7 @@ grant { permission java.util.PropertyPermission "solr.solr.home", "write"; permission java.util.PropertyPermission "solr.data.dir", "write"; permission java.util.PropertyPermission "solr.directoryFactory", "write"; + permission java.util.PropertyPermission "node_id_sys_prop", "write"; // set by ESTestCase to improve test reproducibility // TODO: set this with gradle or some other way that repros with seed? From 5593c46679deffbedf8d2e2b1648c1feeaa1cd9f Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 19 Dec 2018 16:26:31 +0100 Subject: [PATCH 03/64] remove unused improts --- .../org/elasticsearch/common/logging/MarkerLogger.java | 5 +---- .../common/logging/NodeIdPatternConverter.java | 9 +++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java index 4e4980156b85b..1c6b92f6e87f4 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java +++ b/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java @@ -23,10 +23,7 @@ import org.apache.logging.log4j.Marker; import org.apache.logging.log4j.MarkerManager; import org.apache.logging.log4j.core.Logger; -import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.message.Message; -import org.apache.logging.log4j.message.MessageFactory; -import org.apache.logging.log4j.spi.ExtendedLogger; import org.apache.logging.log4j.util.StringBuilderFormattable; import org.apache.logging.log4j.util.Strings; @@ -123,7 +120,7 @@ static class AtomicRefMarker implements Marker, StringBuilderFormattable { /** * Constructs a new Marker. */ - public AtomicRefMarker(AtomicReference nodeId) { + AtomicRefMarker(AtomicReference nodeId) { this.nodeId = nodeId; } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java index 1c9a595165021..4bff79b84f8e0 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.core.pattern.ConverterKeys; import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -36,13 +35,11 @@ @Plugin(category = PatternConverter.CATEGORY, name = "NodeIdPatternConverter") @ConverterKeys({"node_id_raw", "node_id_es"}) public final class NodeIdPatternConverter extends LogEventPatternConverter implements ClusterStateListener { - /** - * The name of this node. - */ - private static final SetOnce NODE_ID = new SetOnce<>(); + AtomicReference nodeId = new AtomicReference<>(); - private static LazyInitializable INSTANCE = new LazyInitializable(() -> new NodeIdPatternConverter()); + private static LazyInitializable INSTANCE = + new LazyInitializable(() -> new NodeIdPatternConverter()); /** * Called by log4j2 to initialize this converter. From aeb686e343d1ab3b96fe5445d0cdbcb824d64271 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 19 Dec 2018 17:10:53 +0100 Subject: [PATCH 04/64] cleanup in plugin --- distribution/src/config/log4j2.properties | 19 +++++++++++++------ .../common/logging/NodeIdListener.java | 3 ++- .../logging/NodeIdPatternConverter.java | 9 +++++++-- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index f1349eeb36e0b..84445168b48db 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -14,7 +14,8 @@ appender.console.layout.pattern ={\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_sys_prop", \ + "node_id": "%node_id_from_plugin", \ + "node_id_sys_prop": "_${sys:node_id_sys_prop}" , \ %marker\ "message": "%.-10000m"\ }%n @@ -30,10 +31,12 @@ appender.rolling.layout.pattern ={\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_sys_prop", \ + "node_id": "%node_id_from_plugin", \ + "node_id_sys_prop": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n + appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz @@ -68,7 +71,8 @@ appender.deprecation_rolling.layout.pattern = {\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_sys_prop", \ + "node_id": "%node_id_from_plugin", \ + "node_id_sys_prop": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n @@ -96,7 +100,8 @@ appender.index_search_slowlog_rolling.layout.pattern = {\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_sys_prop", \ + "node_id": "%node_id_from_plugin", \ + "node_id_sys_prop": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n @@ -118,15 +123,17 @@ appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys appender.index_indexing_slowlog_rolling.layout.type = PatternLayout appender.index_indexing_slowlog_rolling.layout.pattern = {\ "type": "rolling", \ - "timestamp": "%d{ISO8601}", \ + "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_sys_prop", \ + "node_id": "%node_id_from_plugin", \ + "node_id_sys_prop": "%node_id_sys_prop", \ %marker\ "message": "%.-10000m"\ }%n + appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java index b2e3fcb637e7e..5d727889f36b0 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; public class NodeIdListener implements ClusterStateListener { @@ -46,7 +47,7 @@ public void clusterChanged(ClusterChangedEvent event) { } private static String formatIds(String clusterUUID, String nodeId) { - return String.format("\"cluster_uuid\": \"%s\", \"node_id2\": \"%s\", ",clusterUUID,nodeId); + return String.format(Locale.ROOT,"\"cluster_uuid\": \"%s\", \"node_id2\": \"%s\", ",clusterUUID,nodeId); } public AtomicReference getNodeId() { diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java index 4bff79b84f8e0..cd1c568251c47 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java @@ -27,13 +27,14 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.util.LazyInitializable; import java.util.concurrent.atomic.AtomicReference; @Plugin(category = PatternConverter.CATEGORY, name = "NodeIdPatternConverter") -@ConverterKeys({"node_id_raw", "node_id_es"}) +@ConverterKeys({"node_id_from_plugin"}) public final class NodeIdPatternConverter extends LogEventPatternConverter implements ClusterStateListener { AtomicReference nodeId = new AtomicReference<>(); @@ -53,7 +54,7 @@ public static NodeIdPatternConverter newInstance(final String[] options) { } public NodeIdPatternConverter() { - super("NodeName", "node_id_raw"); + super("NodeName", "node_id_from_plugin"); } @Override @@ -62,11 +63,15 @@ public void format(LogEvent event, StringBuilder toAppendTo) { } @Override + @SuppressForbidden(reason = "sets system property for logging variable propagation") public void clusterChanged(ClusterChangedEvent event) { DiscoveryNode localNode = event.state().getNodes().getLocalNode(); String id = localNode.getId(); + //option 2 boolean wasSet = nodeId.compareAndSet(null, id); + if (wasSet) { + //option1 System.setProperty("node_id_sys_prop", id); //TODO deregister as no longer the id will change ? } From fe87cc566e9209b3096fdaf850009c10ac3fae97 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 20 Dec 2018 14:51:49 +0100 Subject: [PATCH 05/64] adding new option with clustestatelistener, pattern converter and threadlocal removing worse options --- distribution/src/config/jvm.options | 2 - distribution/src/config/log4j2.properties | 29 +-- .../common/logging/MarkerLogger.java | 178 ------------------ .../common/logging/MarkerLoggerContext.java | 42 ----- .../logging/MarkerLoggerContextFactory.java | 37 ---- .../logging/MarkerLoggerContextSelector.java | 40 ---- ...er.java => NodeAndClusterIdConverter.java} | 53 +++--- .../common/logging/NodeIdListener.java | 56 ------ .../java/org/elasticsearch/node/Node.java | 13 +- .../elasticsearch/bootstrap/security.policy | 1 - 10 files changed, 42 insertions(+), 409 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java delete mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java delete mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java delete mode 100644 server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java rename server/src/main/java/org/elasticsearch/common/logging/{NodeIdPatternConverter.java => NodeAndClusterIdConverter.java} (50%) delete mode 100644 server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 7c9f0ac029271..2b30d6a87b4a1 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -120,5 +120,3 @@ ${error.file} # temporary workaround for C2 bug with JDK 10 on hardware with AVX-512 10-:-XX:UseAVX=2 - --Dlog4j2.loggerContextFactory=org.elasticsearch.common.logging.MarkerLoggerContextFactory diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 84445168b48db..0510c3be65822 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -8,15 +8,13 @@ appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout appender.console.layout.pattern ={\ - "type": "console", \ + "type": "rolling", \ "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_from_plugin", \ - "node_id_sys_prop": "_${sys:node_id_sys_prop}" , \ - %marker\ + %node_and_cluster_id \ "message": "%.-10000m"\ }%n @@ -31,16 +29,11 @@ appender.rolling.layout.pattern ={\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_from_plugin", \ - "node_id_sys_prop": "%node_id_sys_prop", \ - %marker\ + %node_and_cluster_id \ "message": "%.-10000m"\ }%n appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz - - - appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 @@ -71,9 +64,7 @@ appender.deprecation_rolling.layout.pattern = {\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_from_plugin", \ - "node_id_sys_prop": "%node_id_sys_prop", \ - %marker\ + %node_and_cluster_id \ "message": "%.-10000m"\ }%n @@ -100,9 +91,7 @@ appender.index_search_slowlog_rolling.layout.pattern = {\ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_from_plugin", \ - "node_id_sys_prop": "%node_id_sys_prop", \ - %marker\ + %node_and_cluster_id \ "message": "%.-10000m"\ }%n appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz @@ -123,14 +112,12 @@ appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys appender.index_indexing_slowlog_rolling.layout.type = PatternLayout appender.index_indexing_slowlog_rolling.layout.pattern = {\ "type": "rolling", \ - "timestamp": "%d{ISO8601}", \ + "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ "cluster_name": ${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ - "node_id": "%node_id_from_plugin", \ - "node_id_sys_prop": "%node_id_sys_prop", \ - %marker\ + %node_and_cluster_id \ "message": "%.-10000m"\ }%n @@ -144,4 +131,4 @@ appender.index_indexing_slowlog_rolling.strategy.max = 4 logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling -logger.index_indexing_slowlog.additivity = false +logger.index_indexing_slowlog.additivity = false \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java deleted file mode 100644 index 1c6b92f6e87f4..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/logging/MarkerLogger.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Marker; -import org.apache.logging.log4j.MarkerManager; -import org.apache.logging.log4j.core.Logger; -import org.apache.logging.log4j.message.Message; -import org.apache.logging.log4j.util.StringBuilderFormattable; -import org.apache.logging.log4j.util.Strings; - -import java.util.WeakHashMap; -import java.util.concurrent.atomic.AtomicReference; - -public class MarkerLogger extends Logger { - - - /* - * We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker; - * however, we have transient markers from index-level and shard-level components so this would effectively be a memory leak. Since we - * can not tie into the lifecycle of these components, we have to use a mechanism that enables garbage collection of such markers when - * they are no longer in use. - */ - private static final WeakHashMap markers = new WeakHashMap<>(); - - /** - * Return the size of the cached markers. This size can vary as markers are cached but collected during GC activity when a given prefix - * is no longer in use. - * - * @return the size of the cached markers - */ - static int markersSize() { - return markers.size(); - } - - /** - * The marker for this prefix logger. - */ - private final Marker marker; - - /** - * Obtain the prefix for this prefix logger. This can be used to create a logger with the same prefix as this one. - * - * @return the prefix - */ - public String prefix() { - return marker.getName(); - } - - /** - * The constructor. - * //TODO consider what happens when UNKOWN_NODE_ID are not in use anymore - */ - protected MarkerLogger(Logger logger, AtomicReference nodeIdListener) { - super(logger.getContext(), logger.getName(), logger.getMessageFactory()); - String prefix = getPrefix(nodeIdListener); - - final Marker actualMarker; - // markers is not thread-safe, so we synchronize access - synchronized (markers) { - final Marker maybeMarker = markers.get(prefix); - if (maybeMarker == null) { - if (nodeIdListener.get() != null && Strings.isNotEmpty(nodeIdListener.get().getNodeId().get())) { - actualMarker = new MarkerManager.Log4jMarker(prefix); - } else { - actualMarker = new AtomicRefMarker(nodeIdListener); - } - - /* - * We must create a new instance here as otherwise the marker will hold a reference to the key in the weak hash map; as - * those references are held strongly, this would give a strong reference back to the key preventing them from ever being - * collected. This also guarantees that no other strong reference can be held to the prefix anywhere. - */ - // noinspection RedundantStringConstructorCall - markers.put(new String(prefix), actualMarker); - } else { - actualMarker = maybeMarker; - } - } - this.marker = actualMarker; - } - - private String getPrefix(AtomicReference nodeIdListenerRef) { - NodeIdListener nodeIdListener = nodeIdListenerRef.get(); - if (nodeIdListener != null) { - AtomicReference nodeId = nodeIdListener.getNodeId(); - return nodeId.get(); - } - return NodeIdListener.UNKOWN_NODE_ID; - } - - - @Override - public void logMessage(final String fqcn, final Level level, final Marker marker, final Message message, final Throwable t) { - super.logMessage(fqcn, level, this.marker, message, t); - } - - static class AtomicRefMarker implements Marker, StringBuilderFormattable { - - private AtomicReference nodeId; - - /** - * Constructs a new Marker. - */ - AtomicRefMarker(AtomicReference nodeId) { - this.nodeId = nodeId; - } - - @Override - public String getName() { - if (nodeId.get() != null && Strings.isNotEmpty(nodeId.get().getNodeId().get())) { - return nodeId.get().getNodeId().get(); - } - return NodeIdListener.UNKOWN_NODE_ID; - } - - @Override - public void formatTo(StringBuilder buffer) { - buffer.append(getName()); - } - //just using the marker for its name in logs since other methos are unimplemented - - @Override - public Marker addParents(Marker... markers) { - return null; - } - - @Override - public Marker[] getParents() { - return new Marker[0]; - } - - @Override - public boolean hasParents() { - return false; - } - - @Override - public boolean isInstanceOf(Marker m) { - return false; - } - - @Override - public boolean isInstanceOf(String name) { - return false; - } - - @Override - public boolean remove(Marker marker) { - return false; - } - - @Override - public Marker setParents(Marker... markers) { - return null; - } - - - } -} diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java deleted file mode 100644 index c5f7ee738bee1..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContext.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.core.Logger; -import org.apache.logging.log4j.core.LoggerContext; - -import java.util.concurrent.atomic.AtomicReference; - -public class MarkerLoggerContext extends LoggerContext { - - private final AtomicReference nodeIdListener; - - public MarkerLoggerContext(String name, AtomicReference nodeIdListener) { - super(name); - this.nodeIdListener = nodeIdListener; - } - - @Override - public Logger getLogger(final String name) { - Logger logger = getLogger(name, null); - MarkerLogger marker = new MarkerLogger(logger,nodeIdListener); - return marker; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java deleted file mode 100644 index 952d92f7f345d..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.core.impl.Log4jContextFactory; - -import java.util.concurrent.atomic.AtomicReference; - -public class MarkerLoggerContextFactory extends Log4jContextFactory { - - private static final AtomicReference nodeIdListener = new AtomicReference<>(); - - public MarkerLoggerContextFactory() { - super(new MarkerLoggerContextSelector(nodeIdListener)); - } - - public void setNodeIdListener(NodeIdListener nodeIdListener) { - MarkerLoggerContextFactory.nodeIdListener.set(nodeIdListener); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java b/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java deleted file mode 100644 index bb072dca66247..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/logging/MarkerLoggerContextSelector.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.core.LoggerContext; -import org.apache.logging.log4j.core.selector.ClassLoaderContextSelector; - -import java.net.URI; -import java.util.concurrent.atomic.AtomicReference; - -public class MarkerLoggerContextSelector extends ClassLoaderContextSelector { - - private final AtomicReference nodeIdListener ; - - public MarkerLoggerContextSelector(AtomicReference nodeIdListener) { - this.nodeIdListener = nodeIdListener; - } - - @Override - protected LoggerContext createContext(String name, URI configLocation) { - return new MarkerLoggerContext("Default", nodeIdListener); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java similarity index 50% rename from server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java rename to server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index cd1c568251c47..20ee1bead9423 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeIdPatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -24,28 +24,30 @@ import org.apache.logging.log4j.core.pattern.ConverterKeys; import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.util.LazyInitializable; +import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; -@Plugin(category = PatternConverter.CATEGORY, name = "NodeIdPatternConverter") -@ConverterKeys({"node_id_from_plugin"}) -public final class NodeIdPatternConverter extends LogEventPatternConverter implements ClusterStateListener { +@Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") +@ConverterKeys({"node_and_cluster_id"}) +public final class NodeAndClusterIdConverter extends LogEventPatternConverter implements ClusterStateListener { - AtomicReference nodeId = new AtomicReference<>(); + private static LazyInitializable INSTANCE = + new LazyInitializable(() -> new NodeAndClusterIdConverter()); - private static LazyInitializable INSTANCE = - new LazyInitializable(() -> new NodeIdPatternConverter()); + private AtomicReference nodeAndClusterIdsReference = new AtomicReference<>(); + private CloseableThreadLocal nodeAndClusterIds = new CloseableThreadLocal(); /** * Called by log4j2 to initialize this converter. */ - public static NodeIdPatternConverter newInstance(final String[] options) { + public static NodeAndClusterIdConverter newInstance(final String[] options) { try { return INSTANCE.getOrCompute(); } catch (Exception e) { @@ -53,33 +55,40 @@ public static NodeIdPatternConverter newInstance(final String[] options) { } } - public NodeIdPatternConverter() { - super("NodeName", "node_id_from_plugin"); + public NodeAndClusterIdConverter() { + super("NodeName", "node_and_cluster_id"); } @Override public void format(LogEvent event, StringBuilder toAppendTo) { - toAppendTo.append(nodeId.get()); + if (nodeAndClusterIds.get() == null && nodeAndClusterIdsReference.get() != null) { + //received a value from the listener + toAppendTo.append(nodeAndClusterIdsReference.get()); + nodeAndClusterIds.set(nodeAndClusterIdsReference.get()); + } else if (nodeAndClusterIds.get() != null) { + //using local value + toAppendTo.append(nodeAndClusterIds.get()); + } else { + // no value received yet + toAppendTo.append(""); + } } @Override - @SuppressForbidden(reason = "sets system property for logging variable propagation") public void clusterChanged(ClusterChangedEvent event) { DiscoveryNode localNode = event.state().getNodes().getLocalNode(); - String id = localNode.getId(); - //option 2 - boolean wasSet = nodeId.compareAndSet(null, id); + String clusterUUID = event.state().getMetaData().clusterUUID(); + String nodeId = localNode.getId(); + boolean wasSet = nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID,nodeId)); if (wasSet) { - //option1 - System.setProperty("node_id_sys_prop", id); - //TODO deregister as no longer the id will change ? + LOGGER.info("received first cluster state update. Setting nodeId={}", nodeId); + } } - @Override - public String toString() { - return nodeId.get(); + private static String formatIds(String clusterUUID, String nodeId) { + return String.format(Locale.ROOT, "\"cluster_uuid\": \"%s\", \"node_id\": \"%s\", ", clusterUUID, nodeId); } - } + diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java deleted file mode 100644 index 5d727889f36b0..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeIdListener.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.node.DiscoveryNode; - -import java.util.Locale; -import java.util.concurrent.atomic.AtomicReference; - -public class NodeIdListener implements ClusterStateListener { - - public static final String UNKOWN_NODE_ID = "";//formatIds("unkown_id","unkown_id"); - private static final Logger LOGGER = LogManager.getLogger(NodeIdListener.class); - private AtomicReference nodeId = new AtomicReference<>(UNKOWN_NODE_ID); - - - @Override - public void clusterChanged(ClusterChangedEvent event) { - DiscoveryNode localNode = event.state().getNodes().getLocalNode(); - String clusterUUID = event.state().getMetaData().clusterUUID(); - String nodeId = localNode.getId(); - boolean wasSet = this.nodeId.compareAndSet(UNKOWN_NODE_ID, formatIds(clusterUUID,nodeId)); - if (wasSet) { - LOGGER.info("received first cluster state update. Setting nodeId={}", nodeId); - } - } - - private static String formatIds(String clusterUUID, String nodeId) { - return String.format(Locale.ROOT,"\"cluster_uuid\": \"%s\", \"node_id2\": \"%s\", ",clusterUUID,nodeId); - } - - public AtomicReference getNodeId() { - return nodeId; - } -} diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 0f15aa57cb8c7..2d101aafe69ae 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,9 +67,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.MarkerLoggerContextFactory; -import org.elasticsearch.common.logging.NodeIdListener; -import org.elasticsearch.common.logging.NodeIdPatternConverter; +import org.elasticsearch.common.logging.NodeAndClusterIdConverter; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -441,13 +439,8 @@ protected Node( namedWriteableRegistry).stream()) .collect(Collectors.toList()); - NodeIdPatternConverter nodeIdPatternConverter = NodeIdPatternConverter.newInstance(new String[]{}); - clusterService.addListener(nodeIdPatternConverter); - - NodeIdListener nodeIdListener = new NodeIdListener(); - clusterService.addListener(nodeIdListener); - //TODO any other way to pass cluster state listener to context factory? - ((MarkerLoggerContextFactory)LogManager.getFactory()).setNodeIdListener(nodeIdListener); + NodeAndClusterIdConverter nodeAndClusterIdConverter = NodeAndClusterIdConverter.newInstance(new String[]{}); + clusterService.addListener(nodeAndClusterIdConverter); ActionModule actionModule = new ActionModule(false, settings, clusterModule.getIndexNameExpressionResolver(), settingsModule.getIndexScopedSettings(), settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(), diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy index bdff0c3b594a2..4df99ef6f8836 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -78,7 +78,6 @@ grant { permission java.util.PropertyPermission "solr.solr.home", "write"; permission java.util.PropertyPermission "solr.data.dir", "write"; permission java.util.PropertyPermission "solr.directoryFactory", "write"; - permission java.util.PropertyPermission "node_id_sys_prop", "write"; // set by ESTestCase to improve test reproducibility // TODO: set this with gradle or some other way that repros with seed? From 73ce3023ce29f71553b822588df581a350269188 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 27 Dec 2018 08:51:31 +0100 Subject: [PATCH 06/64] exception handling --- distribution/src/config/log4j2.properties | 12 ++++++------ .../metadata/MetaDataCreateIndexService.java | 3 ++- .../elasticsearch/rest/BytesRestResponse.java | 18 ++++++++++++++---- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 0510c3be65822..870cddc42b9a5 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -12,7 +12,7 @@ appender.console.layout.pattern ={\ "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ - "cluster_name": ${sys:es.logs.cluster_name}", \ + "cluster_name": "${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ %node_and_cluster_id \ "message": "%.-10000m"\ @@ -23,11 +23,11 @@ appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern ={\ - "type": "rolling", \ + "type": "console", \ "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ - "cluster_name": ${sys:es.logs.cluster_name}", \ + "cluster_name": "${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ %node_and_cluster_id \ "message": "%.-10000m"\ @@ -62,7 +62,7 @@ appender.deprecation_rolling.layout.pattern = {\ "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ - "cluster_name": ${sys:es.logs.cluster_name}", \ + "cluster_name": "${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ %node_and_cluster_id \ "message": "%.-10000m"\ @@ -89,7 +89,7 @@ appender.index_search_slowlog_rolling.layout.pattern = {\ "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ - "cluster_name": ${sys:es.logs.cluster_name}", \ + "cluster_name": "${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ %node_and_cluster_id \ "message": "%.-10000m"\ @@ -115,7 +115,7 @@ appender.index_indexing_slowlog_rolling.layout.pattern = {\ "timestamp": "%d{ISO8601}", \ "level": "%-5p", \ "class": "%c{1.}", \ - "cluster_name": ${sys:es.logs.cluster_name}", \ + "cluster_name": "${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ %node_and_cluster_id \ "message": "%.-10000m"\ diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index d9120342cf4cd..c9f577e0d3f73 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -146,7 +146,8 @@ public static void validateIndexName(String index, ClusterState state) { throw new InvalidIndexNameException(index, "must be lowercase"); } if (state.routingTable().hasIndex(index)) { - throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); + throw new NullPointerException("asdf"); +// throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); } if (state.metaData().hasIndex(index)) { throw new ResourceAlreadyExistsException(state.metaData().index(index).getIndex()); diff --git a/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index d6ed68bcafa35..4f95412a9247a 100644 --- a/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -124,13 +124,23 @@ private static XContentBuilder build(RestChannel channel, RestStatus status, Exc if (params.paramAsBoolean("error_trace", !REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) { params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); } else if (e != null) { - Supplier messageSupplier = () -> new ParameterizedMessage("path: {}, params: {}", - channel.request().rawPath(), channel.request().params()); + + + XContentBuilder builder = channel.newErrorBuilder().startObject(); + ToXContent.DelegatingMapParams generateStacktraceParams = + new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); + ElasticsearchException.generateFailureXContent(builder, generateStacktraceParams, e, channel.detailedErrorsEnabled()); + builder.endObject(); + String exceptionStacktrace = BytesReference.bytes(builder).utf8ToString(); + + Supplier messageSupplier = () -> new ParameterizedMessage("\", \"path\": \"{}\", \"params\": \"{}\", \n\"details\":{} ", + channel.request().rawPath(), channel.request().params(), exceptionStacktrace); if (status.getStatus() < 500) { - SUPPRESSED_ERROR_LOGGER.debug(messageSupplier, e); + SUPPRESSED_ERROR_LOGGER.debug(messageSupplier); } else { - SUPPRESSED_ERROR_LOGGER.warn(messageSupplier, e); + + SUPPRESSED_ERROR_LOGGER.warn(messageSupplier); } } From 3f838c7e83704c1951400c717fa8fadb5c684c80 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 27 Dec 2018 15:06:35 +0100 Subject: [PATCH 07/64] json message and exception works --- distribution/src/config/log4j2.properties | 8 +- .../metadata/MetaDataCreateIndexService.java | 3 +- .../JsonThrowablePatternConverter.java | 105 ++++++++++++++++++ .../elasticsearch/rest/BytesRestResponse.java | 18 +-- 4 files changed, 116 insertions(+), 18 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 870cddc42b9a5..263f908b92fe1 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -7,6 +7,7 @@ logger.action.level = debug appender.console.type = Console appender.console.name = console appender.console.layout.type = PatternLayout +appender.console.layout.alwaysWriteExceptions = false appender.console.layout.pattern ={\ "type": "rolling", \ "timestamp": "%d{ISO8601}", \ @@ -15,13 +16,15 @@ appender.console.layout.pattern ={\ "cluster_name": "${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ %node_and_cluster_id \ - "message": "%.-10000m"\ + "message": "%enc{%.-10000m}{JSON}" \ + %replace{%cEx}{",}{",\n} \ }%n appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.layout.type = PatternLayout +appender.rolling.layout.alwaysWriteExceptions = false appender.rolling.layout.pattern ={\ "type": "console", \ "timestamp": "%d{ISO8601}", \ @@ -30,7 +33,8 @@ appender.rolling.layout.pattern ={\ "cluster_name": "${sys:es.logs.cluster_name}", \ "node_name": "%node_name", \ %node_and_cluster_id \ - "message": "%.-10000m"\ + "message": "%enc{%.-10000m}{JSON}" \ + %replace{%cEx}{",}{",\n} \ }%n appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index c9f577e0d3f73..d9120342cf4cd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -146,8 +146,7 @@ public static void validateIndexName(String index, ClusterState state) { throw new InvalidIndexNameException(index, "must be lowercase"); } if (state.routingTable().hasIndex(index)) { - throw new NullPointerException("asdf"); -// throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); + throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); } if (state.metaData().hasIndex(index)) { throw new ResourceAlreadyExistsException(state.metaData().index(index).getIndex()); diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java new file mode 100644 index 0000000000000..97874b9c874c0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache license, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the license for the specific language governing permissions and + * limitations under the license. + */ +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; +import org.apache.logging.log4j.util.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; + +import java.io.IOException; + +/** + * This is a modification of a @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter + *

+ * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array + * "exception": [ stacktrace... ] + */ +@Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY) +@ConverterKeys({"cEx"}) +public final class JsonThrowablePatternConverter extends ThrowablePatternConverter { + + private final ExtendedThrowablePatternConverter throwablePatternConverter; + + /** + * Private constructor. + * + * @param config TODO + * @param options options, may be null. + */ + private JsonThrowablePatternConverter(final Configuration config, final String[] options) { + super("CUstomExtendedThrowable", "throwable", options, config); + this.throwablePatternConverter = ExtendedThrowablePatternConverter.newInstance(config, options); + } + + /** + * Gets an instance of the class. + * + * @param config The current Configuration. + * @param options pattern options, may be null. If first element is "short", + * only the first line of the throwable will be formatted. + * @return instance of class. + */ + public static JsonThrowablePatternConverter newInstance(final Configuration config, final String[] options) { + return new JsonThrowablePatternConverter(config, options); + } + + /** + * {@inheritDoc} + */ + @Override + public void format(final LogEvent event, final StringBuilder toAppendTo) { + String stacktrace = formatStacktrace(event); + if (Strings.isNotEmpty(stacktrace)) { + String jsonException = formatJson(stacktrace); + + toAppendTo.append(", "); + toAppendTo.append(jsonException); + } + } + + private String formatStacktrace(LogEvent event) { + StringBuilder stringBuilder = new StringBuilder(); + throwablePatternConverter.format(event, stringBuilder); + return stringBuilder.toString(); + } + + private String formatJson(String extStackTrace) { + String[] split = extStackTrace.split(options.getSeparator() + "\t|" + options.getSeparator()); + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.value(split); + String stacktraceAsArray = BytesReference.bytes(builder).utf8ToString(); + return "\"exception\": " + stacktraceAsArray; + } catch (IOException e) { + e.printStackTrace();//TODO + } + return "error"; + } + + @Override + public boolean handlesThrowable() { + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index 4f95412a9247a..d6ed68bcafa35 100644 --- a/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -124,23 +124,13 @@ private static XContentBuilder build(RestChannel channel, RestStatus status, Exc if (params.paramAsBoolean("error_trace", !REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) { params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); } else if (e != null) { - - - XContentBuilder builder = channel.newErrorBuilder().startObject(); - ToXContent.DelegatingMapParams generateStacktraceParams = - new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); - ElasticsearchException.generateFailureXContent(builder, generateStacktraceParams, e, channel.detailedErrorsEnabled()); - builder.endObject(); - String exceptionStacktrace = BytesReference.bytes(builder).utf8ToString(); - - Supplier messageSupplier = () -> new ParameterizedMessage("\", \"path\": \"{}\", \"params\": \"{}\", \n\"details\":{} ", - channel.request().rawPath(), channel.request().params(), exceptionStacktrace); + Supplier messageSupplier = () -> new ParameterizedMessage("path: {}, params: {}", + channel.request().rawPath(), channel.request().params()); if (status.getStatus() < 500) { - SUPPRESSED_ERROR_LOGGER.debug(messageSupplier); + SUPPRESSED_ERROR_LOGGER.debug(messageSupplier, e); } else { - - SUPPRESSED_ERROR_LOGGER.warn(messageSupplier); + SUPPRESSED_ERROR_LOGGER.warn(messageSupplier, e); } } From e699d2788c6fac2d6d0e61ccd2a250b34d69c26a Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 27 Dec 2018 17:03:05 +0100 Subject: [PATCH 08/64] common wrapper layout class --- .../src/docker/config/log4j2.properties | 4 +- distribution/src/config/log4j2.properties | 68 ++------------- .../metadata/MetaDataCreateIndexService.java | 6 +- .../common/logging/ESJsonLayout.java | 86 +++++++++++++++++++ 4 files changed, 102 insertions(+), 62 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index 9ad290ad82679..f3e70aa8091b8 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -2,8 +2,8 @@ status = error appender.console.type = Console appender.console.name = console -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.console.layout.type = ESJsonLayout +#TODO test thad rootLogger.level = info rootLogger.appenderRef.console.ref = console diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 263f908b92fe1..229d6b4af9123 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -6,36 +6,13 @@ logger.action.level = debug appender.console.type = Console appender.console.name = console -appender.console.layout.type = PatternLayout -appender.console.layout.alwaysWriteExceptions = false -appender.console.layout.pattern ={\ - "type": "rolling", \ - "timestamp": "%d{ISO8601}", \ - "level": "%-5p", \ - "class": "%c{1.}", \ - "cluster_name": "${sys:es.logs.cluster_name}", \ - "node_name": "%node_name", \ - %node_and_cluster_id \ - "message": "%enc{%.-10000m}{JSON}" \ - %replace{%cEx}{",}{",\n} \ - }%n +appender.console.layout.type = ESJsonLayout + appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log -appender.rolling.layout.type = PatternLayout -appender.rolling.layout.alwaysWriteExceptions = false -appender.rolling.layout.pattern ={\ - "type": "console", \ - "timestamp": "%d{ISO8601}", \ - "level": "%-5p", \ - "class": "%c{1.}", \ - "cluster_name": "${sys:es.logs.cluster_name}", \ - "node_name": "%node_name", \ - %node_and_cluster_id \ - "message": "%enc{%.-10000m}{JSON}" \ - %replace{%cEx}{",}{",\n} \ - }%n +appender.rolling.layout.type = ESJsonLayout appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz appender.rolling.policies.type = Policies @@ -60,17 +37,8 @@ rootLogger.appenderRef.rolling.ref = rolling appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log -appender.deprecation_rolling.layout.type = PatternLayout -appender.deprecation_rolling.layout.pattern = {\ - "type": "rolling", \ - "timestamp": "%d{ISO8601}", \ - "level": "%-5p", \ - "class": "%c{1.}", \ - "cluster_name": "${sys:es.logs.cluster_name}", \ - "node_name": "%node_name", \ - %node_and_cluster_id \ - "message": "%.-10000m"\ - }%n +appender.deprecation_rolling.layout.type = ESJsonLayout + appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz appender.deprecation_rolling.policies.type = Policies @@ -87,17 +55,8 @@ logger.deprecation.additivity = false appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log -appender.index_search_slowlog_rolling.layout.type = PatternLayout -appender.index_search_slowlog_rolling.layout.pattern = {\ - "type": "rolling", \ - "timestamp": "%d{ISO8601}", \ - "level": "%-5p", \ - "class": "%c{1.}", \ - "cluster_name": "${sys:es.logs.cluster_name}", \ - "node_name": "%node_name", \ - %node_and_cluster_id \ - "message": "%.-10000m"\ - }%n +appender.index_search_slowlog_rolling.layout.type = ESJsonLayout + appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy @@ -113,17 +72,8 @@ logger.index_search_slowlog_rolling.additivity = false appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log -appender.index_indexing_slowlog_rolling.layout.type = PatternLayout -appender.index_indexing_slowlog_rolling.layout.pattern = {\ - "type": "rolling", \ - "timestamp": "%d{ISO8601}", \ - "level": "%-5p", \ - "class": "%c{1.}", \ - "cluster_name": "${sys:es.logs.cluster_name}", \ - "node_name": "%node_name", \ - %node_and_cluster_id \ - "message": "%.-10000m"\ - }%n +appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout + appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz appender.index_indexing_slowlog_rolling.policies.type = Policies diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index d9120342cf4cd..5f1c84724cb6e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -146,7 +146,11 @@ public static void validateIndexName(String index, ClusterState state) { throw new InvalidIndexNameException(index, "must be lowercase"); } if (state.routingTable().hasIndex(index)) { - throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); + String x = "{ \"asdf\": \"console\", \"asdfasdf\": \"2018-12-27T15:25:47,213\", \"asdfdd\": [ \"java.lang" + + ".NullPointerException: asdf\" ]}"; + logger.warn("test "+x); + throw new NullPointerException("asdf"); +// throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); } if (state.metaData().hasIndex(index)) { throw new ResourceAlreadyExistsException(state.metaData().index(index).getIndex()); diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java new file mode 100644 index 0000000000000..ac6d687ebc3ad --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginFactory; +import org.apache.logging.log4j.core.layout.AbstractStringLayout; +import org.apache.logging.log4j.core.layout.ByteBufferDestination; +import org.apache.logging.log4j.core.layout.PatternLayout; + +import java.nio.charset.Charset; +import java.util.Map; + +@Plugin(name = "ESJsonLayout", category = "Core", elementType = "layout", printObject = true) +public class ESJsonLayout extends AbstractStringLayout { + + private static final String PATTERN = "{" + + "\"type\": \"console\", " + + "\"timestamp\": \"%d{ISO8601}\", " + + "\"level\": \"%-5p\", " + + "\"class\": \"%c{1.}\", " + + "\"cluster_name\": \"${sys:es.logs.cluster_name}\", " + + "\"node_name\": \"%node_name\", " + + "%node_and_cluster_id " + + "\"message\": \"%enc{%.-10000m}{JSON}\" " + + "%replace{%cEx}{\",}{\",\n} " + + "}%n"; + + private final PatternLayout patternLayout; + + protected ESJsonLayout(boolean locationInfo, boolean properties, boolean complete, + Charset charset) { + super(charset); + this.patternLayout = PatternLayout.newBuilder() + .withPattern(PATTERN) + .withAlwaysWriteExceptions(false) + .build(); + } + + @PluginFactory + public static ESJsonLayout createLayout(@PluginAttribute("locationInfo") boolean locationInfo, + @PluginAttribute("properties") boolean properties, + @PluginAttribute("complete") boolean complete, + @PluginAttribute(value = "charset", defaultString = "UTF-8") Charset charset) { + return new ESJsonLayout(locationInfo, properties, complete, charset); + } + + @Override + public String toSerializable(final LogEvent event) { + return patternLayout.toSerializable(event); + } + + @Override + public Map getContentFormat() { + return patternLayout.getContentFormat(); + } + + @Override + public void encode(final LogEvent event, final ByteBufferDestination destination) { + patternLayout.encode(event, destination); + } + + @Override + public String toString() { + return patternLayout.toString(); + } +} From af54281b60bdf2bad958088babcc8ee7aa2dbeab Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 31 Dec 2018 09:00:35 +0100 Subject: [PATCH 09/64] removed debugging code --- .../cluster/metadata/MetaDataCreateIndexService.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 5f1c84724cb6e..d9120342cf4cd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -146,11 +146,7 @@ public static void validateIndexName(String index, ClusterState state) { throw new InvalidIndexNameException(index, "must be lowercase"); } if (state.routingTable().hasIndex(index)) { - String x = "{ \"asdf\": \"console\", \"asdfasdf\": \"2018-12-27T15:25:47,213\", \"asdfdd\": [ \"java.lang" + - ".NullPointerException: asdf\" ]}"; - logger.warn("test "+x); - throw new NullPointerException("asdf"); -// throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); + throw new ResourceAlreadyExistsException(state.routingTable().index(index).getIndex()); } if (state.metaData().hasIndex(index)) { throw new ResourceAlreadyExistsException(state.metaData().index(index).getIndex()); From 3bb69eb031b8ceb24859ef6b1892a2867299258c Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 31 Dec 2018 17:48:05 +0100 Subject: [PATCH 10/64] passing dignity test --- .../qa/die_with_dignity/DieWithDignityIT.java | 59 +++-- .../qa/die_with_dignity/JsonLogLine.java | 216 ++++++++++++++++++ .../qa/die_with_dignity/JsonLogs.java | 79 +++++++ .../common/logging/ESJsonLayout.java | 4 +- .../JsonThrowablePatternConverter.java | 2 +- .../logging/NodeAndClusterIdConverter.java | 2 +- 6 files changed, 340 insertions(+), 22 deletions(-) create mode 100644 qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogLine.java create mode 100644 qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 9250122025c0a..91c746f1a5012 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import java.io.BufferedReader; import java.io.IOException; @@ -38,6 +39,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; @@ -85,28 +87,49 @@ public void testDieWithDignity() throws Exception { } }); + // parse the logs and ensure that Elasticsearch died with the expected cause - final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); - - final Iterator it = lines.iterator(); - - boolean fatalError = false; - boolean fatalErrorInThreadExiting = false; - - while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { - final String line = it.next(); - if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) { - fatalError = true; - } else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" - + " fatal error in thread \\[Thread-\\d+\\], exiting$")) { - fatalErrorInThreadExiting = true; - assertTrue(it.hasNext()); - assertThat(it.next(), equalTo("java.lang.OutOfMemoryError: die with dignity")); + Path path = PathUtils.get(System.getProperty("log")); + try(InputStream inputStream = Files.newInputStream(path)){ + JsonLogs jsonLogs = new JsonLogs(inputStream); + final Iterator it = jsonLogs.iterator(); + + boolean fatalError = false; + boolean fatalErrorInThreadExiting = false; + + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { + final JsonLogLine line = it.next(); + System.out.println("xxx "+ isFatalError(line) +" " + isFatalErrorInThreadExiting(line)+" "+line); + if (isFatalError(line)) { + fatalError = true; + } else if (isFatalErrorInThreadExiting(line)) { + fatalErrorInThreadExiting = true; + assertThat(line.exceptions(), + hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); + } } + + assertTrue(fatalError); + assertTrue(fatalErrorInThreadExiting); } - assertTrue(fatalError); - assertTrue(fatalErrorInThreadExiting); + } + + private boolean isFatalErrorInThreadExiting(JsonLogLine line) { +// return line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" +// + " fatal error in thread \\[Thread-\\d+\\], exiting$"); + return line.level().trim().equals("ERROR") //TODO remove trim + && line.clazz().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") + && line.nodeName().equals("node-0") + && line.message().matches("fatal error in thread \\[Thread-\\d+\\], exiting$"); + } + + private boolean isFatalError(JsonLogLine line) { +// return line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error"); + return line.level().trim().equals("ERROR") //TODO remove trim + && line.clazz().equals("o.e.ExceptionsHelper") + && line.nodeName().equals("node-0") + && line.message().contains("fatal error"); } @Override diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogLine.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogLine.java new file mode 100644 index 0000000000000..fc5c31e980fc0 --- /dev/null +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogLine.java @@ -0,0 +1,216 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.qa.die_with_dignity; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; + +import java.util.List; + +public class JsonLogLine { + public static final ConstructingObjectParser PARSER = createParser(false); + private String type; + private String timestamp; + private String level; + private String clazz; + private String clusterName; + private String nodeName; + private String clusterUuid; + private String nodeId; + private String message; + private List exceptions; + + private JsonLogLine(String type, String timestamp, String level, String clazz, String clusterName, + String nodeName, String clusterUuid, String nodeId, + String message, + List exceptions) { + this.type = type; + this.timestamp = timestamp; + this.level = level; + this.clazz = clazz; + this.clusterName = clusterName; + this.nodeName = nodeName; + this.clusterUuid = clusterUuid; + this.nodeId = nodeId; + this.message = message; + this.exceptions = exceptions; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("JsonLogLine{"); + sb.append("type='").append(type).append('\''); + sb.append(", timestamp='").append(timestamp).append('\''); + sb.append(", level='").append(level).append('\''); + sb.append(", clazz='").append(clazz).append('\''); + sb.append(", clusterName='").append(clusterName).append('\''); + sb.append(", nodeName='").append(nodeName).append('\''); + sb.append(", clusterUuid='").append(clusterUuid).append('\''); + sb.append(", nodeId='").append(nodeId).append('\''); + sb.append(", message='").append(message).append('\''); + sb.append(", exceptions=").append(exceptions); + sb.append('}'); + return sb.toString(); + } + + public String type() { + return type; + } + + public String timestamp() { + return timestamp; + } + + public String level() { + return level; + } + + public String clazz() { + return clazz; + } + + public String clusterName() { + return clusterName; + } + + public String nodeName() { + return nodeName; + } + + public String clusterUuid() { + return clusterUuid; + } + + public String nodeId() { + return nodeId; + } + + public String message() { + return message; + } + + public List exceptions() { + return exceptions; + } + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>("jsong_log_line_parser", ignoreUnknownFields, + a -> JsonLogLine.builder() + .withType((String) a[0]) + .withTimestamp((String) a[1]) + .withLevel((String) a[2]) + .withClazz((String) a[3]) + .withClusterName((String) a[4]) + .withNodeName((String) a[5]) + .withClusterUuid((String) a[6]) + .withNodeId((String) a[7]) + .withMessage((String) a[8]) + .withExceptions((List) a[9]) + .build() + ); + + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("type")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("timestamp")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("level")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("class")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster.name")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("node.name")); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("cluster.uuid")); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("node.id")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("message")); + parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("exception")); + + + return parser; + } + + public static Builder builder() { + return new Builder(); + } + + static class Builder { + String type; + String timestamp; + String level; + String clazz; + String clusterName; + String nodeName; + String clusterUuid; + String nodeId; + String message; + List exception; + + public Builder withType(String type) { + this.type = type; + return this; + } + + public Builder withTimestamp(String timestamp) { + this.timestamp = timestamp; + return this; + } + + public Builder withLevel(String level) { + this.level = level; + return this; + } + + public Builder withClazz(String clazz) { + this.clazz = clazz; + return this; + } + + public Builder withClusterName(String clusterName) { + this.clusterName = clusterName; + return this; + } + + public Builder withNodeName(String nodeName) { + this.nodeName = nodeName; + return this; + } + + public Builder withClusterUuid(String clusterUuid) { + this.clusterUuid = clusterUuid; + return this; + } + + public Builder withNodeId(String nodeId) { + this.nodeId = nodeId; + return this; + } + + public Builder withMessage(String message) { + this.message = message; + return this; + } + + public Builder withExceptions(List exception) { + this.exception = exception; + return this; + } + + public JsonLogLine build() { + return new JsonLogLine(type, timestamp, level, clazz, clusterName, + nodeName, clusterUuid, nodeId, message, exception); + } + } +} diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java new file mode 100644 index 0000000000000..9aa11de015f08 --- /dev/null +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.qa.die_with_dignity; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Iterator; + +public class JsonLogs implements Iterable { + + private final XContentParser parser; + + public JsonLogs(String file) throws IOException { + this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + new FileInputStream(file)); + } + + public JsonLogs(InputStream inputStream) throws IOException { + this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inputStream); + } + + public static JsonLogs from(Path path) throws IOException { + return new JsonLogs(Files.newInputStream(path)); + } + + @Override + public Iterator iterator() { + return new JsonIterator(); + } + + private class JsonIterator implements Iterator { + + @Override + public boolean hasNext() { + return !parser.isClosed(); + } + + @Override + public JsonLogLine next() { + JsonLogLine apply = JsonLogLine.PARSER.apply(parser, null); + nextToken(); + return apply; + } + + private void nextToken() { + try { + parser.nextToken(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index ac6d687ebc3ad..bf00ddc946820 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -38,8 +38,8 @@ public class ESJsonLayout extends AbstractStringLayout { "\"timestamp\": \"%d{ISO8601}\", " + "\"level\": \"%-5p\", " + "\"class\": \"%c{1.}\", " + - "\"cluster_name\": \"${sys:es.logs.cluster_name}\", " + - "\"node_name\": \"%node_name\", " + + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\", " + "%node_and_cluster_id " + "\"message\": \"%enc{%.-10000m}{JSON}\" " + "%replace{%cEx}{\",}{\",\n} " + diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index 97874b9c874c0..5b010795e3e90 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -49,7 +49,7 @@ public final class JsonThrowablePatternConverter extends ThrowablePatternConvert * @param options options, may be null. */ private JsonThrowablePatternConverter(final Configuration config, final String[] options) { - super("CUstomExtendedThrowable", "throwable", options, config); + super("CustomExtendedThrowable", "throwable", options, config); this.throwablePatternConverter = ExtendedThrowablePatternConverter.newInstance(config, options); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 20ee1bead9423..93c197aa3b3b0 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -88,7 +88,7 @@ public void clusterChanged(ClusterChangedEvent event) { } private static String formatIds(String clusterUUID, String nodeId) { - return String.format(Locale.ROOT, "\"cluster_uuid\": \"%s\", \"node_id\": \"%s\", ", clusterUUID, nodeId); + return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\", ", clusterUUID, nodeId); } } From 5e1f125c7c8195ddb1237d9a47e279c79aaec696 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 2 Jan 2019 09:46:16 +0100 Subject: [PATCH 11/64] fixing build --- .../qa/die_with_dignity/DieWithDignityIT.java | 1 - .../elasticsearch/qa/die_with_dignity/JsonLogs.java | 12 ------------ .../logging/JsonThrowablePatternConverter.java | 3 +-- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 91c746f1a5012..bf1b576b56d63 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -99,7 +99,6 @@ public void testDieWithDignity() throws Exception { while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { final JsonLogLine line = it.next(); - System.out.println("xxx "+ isFatalError(line) +" " + isFatalErrorInThreadExiting(line)+" "+line); if (isFatalError(line)) { fatalError = true; } else if (isFatalErrorInThreadExiting(line)) { diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java index 9aa11de015f08..224f1f4730ee6 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java @@ -24,31 +24,19 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.Iterator; public class JsonLogs implements Iterable { private final XContentParser parser; - public JsonLogs(String file) throws IOException { - this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - new FileInputStream(file)); - } - public JsonLogs(InputStream inputStream) throws IOException { this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, inputStream); } - public static JsonLogs from(Path path) throws IOException { - return new JsonLogs(Files.newInputStream(path)); - } - @Override public Iterator iterator() { return new JsonIterator(); diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index 5b010795e3e90..1c6c40a4665fd 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -93,9 +93,8 @@ private String formatJson(String extStackTrace) { String stacktraceAsArray = BytesReference.bytes(builder).utf8ToString(); return "\"exception\": " + stacktraceAsArray; } catch (IOException e) { - e.printStackTrace();//TODO + throw new RuntimeException(e); } - return "error"; } @Override From 2c2653cd572723cce5a1c9b645cf8adb7b6fdf2d Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 2 Jan 2019 12:46:36 +0100 Subject: [PATCH 12/64] fix failing test --- .../qa/die_with_dignity/DieWithDignityIT.java | 9 ++-- .../common/logging}/JsonLogLine.java | 2 +- .../common/logging}/JsonLogs.java | 24 ++++++++--- .../logging/NodeNameInLogsIntegTestCase.java | 42 +++++++------------ 4 files changed, 38 insertions(+), 39 deletions(-) rename {qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity => test/framework/src/main/java/org/elasticsearch/common/logging}/JsonLogLine.java (99%) rename {qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity => test/framework/src/main/java/org/elasticsearch/common/logging}/JsonLogs.java (77%) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index bf1b576b56d63..2aea41d977177 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -23,6 +23,8 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.JsonLogLine; +import org.elasticsearch.common.logging.JsonLogs; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -90,8 +92,7 @@ public void testDieWithDignity() throws Exception { // parse the logs and ensure that Elasticsearch died with the expected cause Path path = PathUtils.get(System.getProperty("log")); - try(InputStream inputStream = Files.newInputStream(path)){ - JsonLogs jsonLogs = new JsonLogs(inputStream); + try(JsonLogs jsonLogs = new JsonLogs(path)){ final Iterator it = jsonLogs.iterator(); boolean fatalError = false; @@ -111,12 +112,9 @@ public void testDieWithDignity() throws Exception { assertTrue(fatalError); assertTrue(fatalErrorInThreadExiting); } - } private boolean isFatalErrorInThreadExiting(JsonLogLine line) { -// return line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" -// + " fatal error in thread \\[Thread-\\d+\\], exiting$"); return line.level().trim().equals("ERROR") //TODO remove trim && line.clazz().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") && line.nodeName().equals("node-0") @@ -124,7 +122,6 @@ private boolean isFatalErrorInThreadExiting(JsonLogLine line) { } private boolean isFatalError(JsonLogLine line) { -// return line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error"); return line.level().trim().equals("ERROR") //TODO remove trim && line.clazz().equals("o.e.ExceptionsHelper") && line.nodeName().equals("node-0") diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java similarity index 99% rename from qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogLine.java rename to test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java index fc5c31e980fc0..088489556fa66 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogLine.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.qa.die_with_dignity; +package org.elasticsearch.common.logging; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java similarity index 77% rename from qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java rename to test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java index 224f1f4730ee6..cc7dd4bbc82be 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/JsonLogs.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java @@ -17,24 +17,33 @@ * under the License. */ -package org.elasticsearch.qa.die_with_dignity; +package org.elasticsearch.common.logging; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import java.io.BufferedReader; +import java.io.Closeable; import java.io.IOException; -import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Iterator; -public class JsonLogs implements Iterable { +public class JsonLogs implements Iterable, Closeable { private final XContentParser parser; + private final BufferedReader reader; - public JsonLogs(InputStream inputStream) throws IOException { + public JsonLogs(BufferedReader reader) throws IOException { + this.reader = reader; this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - inputStream); + reader); + } + + public JsonLogs(Path path) throws IOException { + this(Files.newBufferedReader(path)); } @Override @@ -42,6 +51,11 @@ public Iterator iterator() { return new JsonIterator(); } + @Override + public void close() throws IOException { + reader.close(); + } + private class JsonIterator implements Iterator { @Override diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java index a8a142096e3dd..e8f4e21bd3e7c 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java @@ -26,10 +26,9 @@ import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.regex.Pattern; -import java.util.regex.Matcher; +import java.util.Iterator; -import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; /** * Tests that extend this class verify that the node name appears in the first @@ -59,32 +58,21 @@ public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase { protected abstract BufferedReader openReader(Path logFile); public void testNodeNameIsOnAllLinesOfLog() throws IOException { - BufferedReader logReader = openReader(getLogFile()); - try { - String line = logReader.readLine(); - assertNotNull("no logs at all?!", line); - Matcher m = Pattern.compile("\\] \\[([^\\]]+)\\] ").matcher(line); - if (false == m.find()) { - fail("Didn't see the node name in [" + line + "]"); - } - String nodeName = m.group(1); + try (JsonLogs jsonLogs = new JsonLogs(openReader(getLogFile()))) { + Iterator it = jsonLogs.iterator(); + + assertTrue("no logs at all?!", it.hasNext()); + JsonLogLine firstLine = it.next(); + + String nodeName = firstLine.nodeName(); assertThat(nodeName, nodeNameMatcher()); - int lineNumber = 1; - while (true) { - if (lineNumber < LINES_TO_CHECK) { - break; - } - line = logReader.readLine(); - if (line == null) { - break; // eof - } - lineNumber++; - assertThat(line, containsString("] [" + nodeName + "] ")); + + for (int lineNumber = 1; lineNumber < LINES_TO_CHECK && it.hasNext(); lineNumber++) { + JsonLogLine logLine = it.next(); + assertThat(logLine.nodeName(), equalTo(nodeName)); } - } finally { - logReader.close(); } } @@ -93,8 +81,8 @@ private Path getLogFile() { String logFileString = System.getProperty("tests.logfile"); if (null == logFileString) { fail("tests.logfile must be set to run this test. It is automatically " - + "set by gradle. If you must set it yourself then it should be the absolute path to the " - + "log file."); + + "set by gradle. If you must set it yourself then it should be the absolute path to the " + + "log file."); } return Paths.get(logFileString); } From 97d2cec0d68503a5d55062227fb895aa4db5ba8c Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 2 Jan 2019 15:34:11 +0100 Subject: [PATCH 13/64] fix failing test --- .../common/logging/ESJsonLayout.java | 4 +- .../xpack/ccr/FollowIndexIT.java | 47 ++++++++++--------- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index bf00ddc946820..22f3d5be195b9 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -35,8 +35,8 @@ public class ESJsonLayout extends AbstractStringLayout { private static final String PATTERN = "{" + "\"type\": \"console\", " + - "\"timestamp\": \"%d{ISO8601}\", " + - "\"level\": \"%-5p\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + + "\"level\": \"%p\", " + "\"class\": \"%c{1.}\", " + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + "\"node.name\": \"%node_name\", " + diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 26d7825198d8a..54f6869e6d79d 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -11,18 +11,22 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.JsonLogLine; +import org.elasticsearch.common.logging.JsonLogs; import org.elasticsearch.common.settings.Settings; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import java.io.IOException; -import java.nio.file.Files; -import java.util.Iterator; -import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.ObjectPath.eval; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.core.Is.is; public class FollowIndexIT extends ESCCRRestTestCase { @@ -80,25 +84,10 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { // (does not work on windows...) if (Constants.WINDOWS == false) { assertBusy(() -> { - final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); - final Iterator it = lines.iterator(); - boolean warn = false; - while (it.hasNext()) { - final String line = it.next(); - if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " + - "failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) { - warn = true; - break; - } + try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(System.getProperty("log")))) { + assertThat(jsonLogs, hasItem(autoFollowCoordinatorWarn())); } - assertTrue(warn); - assertTrue(it.hasNext()); - final String lineAfterWarn = it.next(); - assertThat( - lineAfterWarn, - equalTo("org.elasticsearch.ElasticsearchStatusException: " + - "can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " + - "the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); + }); } }); @@ -108,6 +97,22 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { assertThat(e.getMessage(), containsString("the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); } + private Matcher autoFollowCoordinatorWarn() { + return new FeatureMatcher(Matchers.is(true),"autoFollowCoordinatorWarn","autoFollowCoordinatorWarn") { + + @Override + protected Boolean featureValueOf(JsonLogLine actual) { + return actual.level().equals("WARN") && + actual.clazz().equals("o.e.x.c.a.AutoFollowCoordinator") && + actual.nodeName().equals("node-0") && + actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") && + actual.exceptions().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + + "as the remote cluster [leader_cluster] is not licensed for [ccr]; the license mode [BASIC]" + + " on cluster [leader_cluster] does not enable [ccr]"); + } + }; + } + private void createNewIndexAndIndexDocs(RestClient client, String index) throws IOException { Settings settings = Settings.builder() .put("index.soft_deletes.enabled", true) From fb2f53149a8aa0192efe7350f872863e02e90ba8 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 2 Jan 2019 17:29:38 +0100 Subject: [PATCH 14/64] fix import --- .../src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 54f6869e6d79d..cd399f019fc67 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -22,7 +22,6 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.ObjectPath.eval; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; From 20ee653330b816085c5104f8c6a792941628557a Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 2 Jan 2019 19:05:34 +0100 Subject: [PATCH 15/64] extending logs test --- ...T.java => NodeAndClusterInfoInLogsIT.java} | 4 +- ...T.java => NodeAndClusterInfoInLogsIT.java} | 4 +- .../common/logging/JsonLogs.java | 8 ++++ ...a => NodeAndClusterInfoIntegTestCase.java} | 38 ++++++++++++++----- 4 files changed, 40 insertions(+), 14 deletions(-) rename distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/{NodeNameInLogsIT.java => NodeAndClusterInfoInLogsIT.java} (91%) rename qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/{NodeNameInLogsIT.java => NodeAndClusterInfoInLogsIT.java} (91%) rename test/framework/src/main/java/org/elasticsearch/common/logging/{NodeNameInLogsIntegTestCase.java => NodeAndClusterInfoIntegTestCase.java} (63%) diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeAndClusterInfoInLogsIT.java similarity index 91% rename from distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeAndClusterInfoInLogsIT.java index a854e6e66462a..60dfb7478a6e8 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeAndClusterInfoInLogsIT.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.rest; -import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; +import org.elasticsearch.common.logging.NodeAndClusterInfoIntegTestCase; import org.hamcrest.Matcher; import java.io.IOException; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.is; -public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { +public class NodeAndClusterInfoInLogsIT extends NodeAndClusterInfoIntegTestCase { @Override protected Matcher nodeNameMatcher() { return is("node-0"); diff --git a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeAndClusterInfoInLogsIT.java similarity index 91% rename from qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java rename to qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeAndClusterInfoInLogsIT.java index 44d5bb6c900f5..5a7761be3e8d4 100644 --- a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java +++ b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeAndClusterInfoInLogsIT.java @@ -19,7 +19,7 @@ package org.elasticsearch.unconfigured_node_name; -import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; +import org.elasticsearch.common.logging.NodeAndClusterInfoIntegTestCase; import org.hamcrest.Matcher; import java.io.IOException; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.not; -public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { +public class NodeAndClusterInfoInLogsIT extends NodeAndClusterInfoIntegTestCase { @Override protected Matcher nodeNameMatcher() { return not(""); diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java index cc7dd4bbc82be..5ad4603b89933 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java @@ -30,6 +30,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Iterator; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; public class JsonLogs implements Iterable, Closeable { @@ -56,6 +58,10 @@ public void close() throws IOException { reader.close(); } + public Stream stream() { + return StreamSupport.stream(spliterator(), false); + } + private class JsonIterator implements Iterator { @Override @@ -78,4 +84,6 @@ private void nextToken() { } } } + + } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java similarity index 63% rename from test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java rename to test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java index e8f4e21bd3e7c..4d4ecee25aabc 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java @@ -26,9 +26,10 @@ import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Iterator; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isEmptyOrNullString; +import static org.hamcrest.Matchers.not; /** * Tests that extend this class verify that the node name appears in the first @@ -37,7 +38,7 @@ * DEBUG or TRACE level logging. Those nodes log a few lines before they * resolve the node name. */ -public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase { +public abstract class NodeAndClusterInfoIntegTestCase extends ESRestTestCase { /** * Number of lines in the log file to check for the node name. We don't * just check the entire log file because it could be quite long and @@ -59,23 +60,40 @@ public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase { public void testNodeNameIsOnAllLinesOfLog() throws IOException { try (JsonLogs jsonLogs = new JsonLogs(openReader(getLogFile()))) { - Iterator it = jsonLogs.iterator(); - assertTrue("no logs at all?!", it.hasNext()); - JsonLogLine firstLine = it.next(); + JsonLogLine firstLine = null; + String expectedNodeId = null; + String expectedClusterId = null; + for (JsonLogLine jsonLogLine : jsonLogs) { + if (firstLine == null) { + firstLine = jsonLogLine; + } + if (jsonLogLine.nodeId() != null && expectedNodeId == null) { + //nodeId and clusterid are set together + expectedNodeId = jsonLogLine.nodeId(); + expectedClusterId = jsonLogLine.clusterUuid(); + } - String nodeName = firstLine.nodeName(); - assertThat(nodeName, nodeNameMatcher()); + assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.clazz(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); - for (int lineNumber = 1; lineNumber < LINES_TO_CHECK && it.hasNext(); lineNumber++) { - JsonLogLine logLine = it.next(); - assertThat(logLine.nodeName(), equalTo(nodeName)); + //all lines should have the same nodeName and clusterName + assertThat(jsonLogLine.nodeName(), equalTo(firstLine.nodeName())); + assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); + + //initially empty, but once found all lines shoudl have same nodeId and clusterid + assertThat(jsonLogLine.nodeId(), equalTo(expectedNodeId)); + assertThat(jsonLogLine.clusterUuid(), equalTo(expectedClusterId)); } } } + @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") private Path getLogFile() { String logFileString = System.getProperty("tests.logfile"); From 55dc6eb44cbf1a4e48b7446f2d28d714d8b3a6ae Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 3 Jan 2019 17:51:58 +0100 Subject: [PATCH 16/64] fix parsing and exception formatting --- .../qa/die_with_dignity/DieWithDignityIT.java | 2 +- .../common/logging/ESJsonLayout.java | 2 +- .../JsonThrowablePatternConverter.java | 27 ++++---- .../JsonThrowablePatternConverterTests.java | 68 +++++++++++++++++++ .../common/logging/JsonLogLine.java | 24 +++---- .../NodeAndClusterInfoIntegTestCase.java | 2 +- .../xpack/ccr/FollowIndexIT.java | 2 +- 7 files changed, 99 insertions(+), 28 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 2aea41d977177..eeb92ed3032cd 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -104,7 +104,7 @@ public void testDieWithDignity() throws Exception { fatalError = true; } else if (isFatalErrorInThreadExiting(line)) { fatalErrorInThreadExiting = true; - assertThat(line.exceptions(), + assertThat(line.stacktrace(), hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); } } diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index 22f3d5be195b9..77b8b9f543eff 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -42,7 +42,7 @@ public class ESJsonLayout extends AbstractStringLayout { "\"node.name\": \"%node_name\", " + "%node_and_cluster_id " + "\"message\": \"%enc{%.-10000m}{JSON}\" " + - "%replace{%cEx}{\",}{\",\n} " + + "%exceptionAsJson " + "}%n"; private final PatternLayout patternLayout; diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index 1c6c40a4665fd..4dd240a8167ed 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -16,6 +16,7 @@ */ package org.elasticsearch.common.logging; +import com.fasterxml.jackson.core.io.JsonStringEncoder; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.plugins.Plugin; @@ -24,11 +25,8 @@ import org.apache.logging.log4j.core.pattern.PatternConverter; import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; import org.apache.logging.log4j.util.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import java.io.IOException; +import java.util.StringJoiner; /** * This is a modification of a @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter @@ -37,9 +35,10 @@ * "exception": [ stacktrace... ] */ @Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY) -@ConverterKeys({"cEx"}) +@ConverterKeys({"exceptionAsJson"}) public final class JsonThrowablePatternConverter extends ThrowablePatternConverter { + private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private final ExtendedThrowablePatternConverter throwablePatternConverter; /** @@ -87,16 +86,20 @@ private String formatStacktrace(LogEvent event) { private String formatJson(String extStackTrace) { String[] split = extStackTrace.split(options.getSeparator() + "\t|" + options.getSeparator()); - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.value(split); - String stacktraceAsArray = BytesReference.bytes(builder).utf8ToString(); - return "\"exception\": " + stacktraceAsArray; - } catch (IOException e) { - throw new RuntimeException(e); + + StringJoiner stringJoiner = new StringJoiner(",\n", "\n\"stacktrace\": [", "]"); + for (String line : split) { + stringJoiner.add(wrapAsJson(line)); } + return stringJoiner.toString(); + } + + private String wrapAsJson(String line) { + char[] chars = JSON_STRING_ENCODER.quoteAsString(line); + return "\"" + new String(chars) + "\""; } + @Override public boolean handlesThrowable() { return true; diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java new file mode 100644 index 0000000000000..aaecca0a6e69c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -0,0 +1,68 @@ +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.impl.Log4jLogEvent; +import org.apache.logging.log4j.message.SimpleMessage; +import org.elasticsearch.test.ESTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.StringReader; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +public class JsonThrowablePatternConverterTests extends ESTestCase { + + //TODO To be extended and cleaned up + public void testStacktraceWithJson() throws IOException { + LogManager.getLogger().info("asdf"); + + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + LogEvent event = Log4jLogEvent.newBuilder() + .setMessage(new SimpleMessage("message")) + .setThrown(new Exception(json)) + .build(); + + + JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); + + StringBuilder builder = new StringBuilder(); + converter.format(event, builder); + + String x = "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", \"class\": \"o.e.a.s" + + ".TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", \"cluster.uuid\": " + + "\"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " + builder.toString() + "}"; + JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(x))); + +// for (JsonLogLine jsonLogLine : jsonLogs) { +// assertThat +// } + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java index 088489556fa66..587ce3b00119a 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -35,12 +35,12 @@ public class JsonLogLine { private String clusterUuid; private String nodeId; private String message; - private List exceptions; + private List stacktrace; private JsonLogLine(String type, String timestamp, String level, String clazz, String clusterName, String nodeName, String clusterUuid, String nodeId, String message, - List exceptions) { + List stacktrace) { this.type = type; this.timestamp = timestamp; this.level = level; @@ -50,7 +50,7 @@ private JsonLogLine(String type, String timestamp, String level, String clazz, S this.clusterUuid = clusterUuid; this.nodeId = nodeId; this.message = message; - this.exceptions = exceptions; + this.stacktrace = stacktrace; } @Override @@ -65,7 +65,7 @@ public String toString() { sb.append(", clusterUuid='").append(clusterUuid).append('\''); sb.append(", nodeId='").append(nodeId).append('\''); sb.append(", message='").append(message).append('\''); - sb.append(", exceptions=").append(exceptions); + sb.append(", stacktrace=").append(stacktrace); sb.append('}'); return sb.toString(); } @@ -106,8 +106,8 @@ public String message() { return message; } - public List exceptions() { - return exceptions; + public List stacktrace() { + return stacktrace; } @SuppressWarnings("unchecked") @@ -123,7 +123,7 @@ private static ConstructingObjectParser createParser(boolean .withClusterUuid((String) a[6]) .withNodeId((String) a[7]) .withMessage((String) a[8]) - .withExceptions((List) a[9]) + .withStacktrace((List) a[9]) .build() ); @@ -136,7 +136,7 @@ private static ConstructingObjectParser createParser(boolean parser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("cluster.uuid")); parser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("node.id")); parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("message")); - parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("exception")); + parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("stacktrace")); return parser; @@ -156,7 +156,7 @@ static class Builder { String clusterUuid; String nodeId; String message; - List exception; + List stacktrace; public Builder withType(String type) { this.type = type; @@ -203,14 +203,14 @@ public Builder withMessage(String message) { return this; } - public Builder withExceptions(List exception) { - this.exception = exception; + public Builder withStacktrace(List stacktrace) { + this.stacktrace = stacktrace; return this; } public JsonLogLine build() { return new JsonLogLine(type, timestamp, level, clazz, clusterName, - nodeName, clusterUuid, nodeId, message, exception); + nodeName, clusterUuid, nodeId, message, stacktrace); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java index 4d4ecee25aabc..7340302f8e3d4 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.not; /** - * Tests that extend this class verify that the node name appears in the first + * Tests that extend this class verify that the node name, cluster name, nodeId, clusterId appear in the first * few log lines on startup. Note that this won't pass for clusters that don't * the node name defined in elasticsearch.yml and start with * DEBUG or TRACE level logging. Those nodes log a few lines before they diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index cd399f019fc67..54768c2e61355 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -105,7 +105,7 @@ protected Boolean featureValueOf(JsonLogLine actual) { actual.clazz().equals("o.e.x.c.a.AutoFollowCoordinator") && actual.nodeName().equals("node-0") && actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") && - actual.exceptions().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + + actual.stacktrace().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + "as the remote cluster [leader_cluster] is not licensed for [ccr]; the license mode [BASIC]" + " on cluster [leader_cluster] does not enable [ccr]"); } From b7ad6507c02a7261b51ca29f78b4cf5ba6985e06 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 3 Jan 2019 18:25:29 +0100 Subject: [PATCH 17/64] fix failing test --- .../qa/die_with_dignity/DieWithDignityIT.java | 4 +- .../JsonThrowablePatternConverter.java | 5 +-- .../JsonThrowablePatternConverterTests.java | 42 ++++++++++++++----- 3 files changed, 35 insertions(+), 16 deletions(-) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index eeb92ed3032cd..009228ebb7a46 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -115,14 +115,14 @@ public void testDieWithDignity() throws Exception { } private boolean isFatalErrorInThreadExiting(JsonLogLine line) { - return line.level().trim().equals("ERROR") //TODO remove trim + return line.level().equals("ERROR") && line.clazz().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") && line.nodeName().equals("node-0") && line.message().matches("fatal error in thread \\[Thread-\\d+\\], exiting$"); } private boolean isFatalError(JsonLogLine line) { - return line.level().trim().equals("ERROR") //TODO remove trim + return line.level().equals("ERROR") && line.clazz().equals("o.e.ExceptionsHelper") && line.nodeName().equals("node-0") && line.message().contains("fatal error"); diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index 4dd240a8167ed..47177db243cee 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -95,11 +95,10 @@ private String formatJson(String extStackTrace) { } private String wrapAsJson(String line) { - char[] chars = JSON_STRING_ENCODER.quoteAsString(line); - return "\"" + new String(chars) + "\""; + byte[] bytes = JSON_STRING_ENCODER.quoteAsUTF8(line); + return "\"" + new String(bytes) + "\""; } - @Override public boolean handlesThrowable() { return true; diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index aaecca0a6e69c..abcfcd03bb39c 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -9,6 +9,9 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.StringReader; + +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.isEmptyString; /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -30,7 +33,18 @@ public class JsonThrowablePatternConverterTests extends ESTestCase { - //TODO To be extended and cleaned up + JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); + + public void testNoStacktrace() { + LogEvent event = Log4jLogEvent.newBuilder() + .build(); + + String result = format(event); + + assertThat(result, isEmptyString()); + } + + public void testStacktraceWithJson() throws IOException { LogManager.getLogger().info("asdf"); @@ -49,20 +63,26 @@ public void testStacktraceWithJson() throws IOException { .setThrown(new Exception(json)) .build(); + String result = format(event); - JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); + String sampleLine = "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", \"class\": " + + "\"o.e.a.s.TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", \"cluster.uuid\": " + + "\"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " + result + "}"; - StringBuilder builder = new StringBuilder(); - converter.format(event, builder); + //confirms exception is correctly parsed + JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(sampleLine))); + + JsonLogLine jsonLogLine = jsonLogs.stream().findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); + + assertThat("stacktrace should formatted in multiple lines", jsonLogLine.stacktrace().size(), greaterThan(0)); + } - String x = "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", \"class\": \"o.e.a.s" + - ".TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", \"cluster.uuid\": " + - "\"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " + builder.toString() + "}"; - JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(x))); -// for (JsonLogLine jsonLogLine : jsonLogs) { -// assertThat -// } + private String format(LogEvent event) { + StringBuilder builder = new StringBuilder(); + converter.format(event, builder); + return builder.toString(); } } From 39a1ef7f5cbf83e59378a6ff70f22e5fb12fa7b4 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 3 Jan 2019 18:48:47 +0100 Subject: [PATCH 18/64] fix checkstyle --- .../logging/JsonThrowablePatternConverter.java | 3 ++- .../logging/JsonThrowablePatternConverterTests.java | 12 +++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index 47177db243cee..ea9b14e60105d 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; import org.apache.logging.log4j.util.Strings; +import java.nio.charset.Charset; import java.util.StringJoiner; /** @@ -96,7 +97,7 @@ private String formatJson(String extStackTrace) { private String wrapAsJson(String line) { byte[] bytes = JSON_STRING_ENCODER.quoteAsUTF8(line); - return "\"" + new String(bytes) + "\""; + return "\"" + new String(bytes, Charset.defaultCharset()) + "\""; } @Override diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index abcfcd03bb39c..de7d184b76cb0 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -10,7 +10,7 @@ import java.io.IOException; import java.io.StringReader; -import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isEmptyString; /* * Licensed to Elasticsearch under one or more contributor @@ -58,9 +58,10 @@ public void testStacktraceWithJson() throws IOException { " \"boost\" : 1.0\n" + " }\n" + "}"; + Exception thrown = new Exception(json); LogEvent event = Log4jLogEvent.newBuilder() .setMessage(new SimpleMessage("message")) - .setThrown(new Exception(json)) + .setThrown(thrown) .build(); String result = format(event); @@ -75,14 +76,15 @@ public void testStacktraceWithJson() throws IOException { JsonLogLine jsonLogLine = jsonLogs.stream().findFirst() .orElseThrow(() -> new AssertionError("no logs parsed")); - assertThat("stacktrace should formatted in multiple lines", jsonLogLine.stacktrace().size(), greaterThan(0)); + int jsonLength = json.split("\n").length; + int stacktraceLength = thrown.getStackTrace().length; + assertThat("stacktrace should formatted in multiple lines", + jsonLogLine.stacktrace().size(), equalTo(jsonLength + stacktraceLength)); } - private String format(LogEvent event) { StringBuilder builder = new StringBuilder(); converter.format(event, builder); return builder.toString(); } - } From dec2024b277e6a8034cdb3bfb2bac726f4007027 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 4 Jan 2019 11:19:59 +0100 Subject: [PATCH 19/64] small cleanup --- ...sIT.java => JsonLogsFormatAndParseIT.java} | 8 +- .../qa/die_with_dignity/DieWithDignityIT.java | 4 +- ...sIT.java => JsonLogsFormatAndParseIT.java} | 6 +- .../common/logging/ESJsonLayout.java | 4 +- .../JsonThrowablePatternConverter.java | 20 +- .../JsonThrowablePatternConverterTests.java | 13 +- .../common/logging/JsonLogLine.java | 177 ++++++------------ ...stCase.java => JsonLogsIntegTestCase.java} | 11 +- .../xpack/ccr/FollowIndexIT.java | 2 +- 9 files changed, 92 insertions(+), 153 deletions(-) rename distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/{NodeAndClusterInfoInLogsIT.java => JsonLogsFormatAndParseIT.java} (88%) rename qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/{NodeAndClusterInfoInLogsIT.java => JsonLogsFormatAndParseIT.java} (91%) rename test/framework/src/main/java/org/elasticsearch/common/logging/{NodeAndClusterInfoIntegTestCase.java => JsonLogsIntegTestCase.java} (87%) diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeAndClusterInfoInLogsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java similarity index 88% rename from distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeAndClusterInfoInLogsIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java index 60dfb7478a6e8..12c916946085b 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeAndClusterInfoInLogsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java @@ -19,11 +19,11 @@ package org.elasticsearch.test.rest; -import org.elasticsearch.common.logging.NodeAndClusterInfoIntegTestCase; +import org.elasticsearch.common.logging.JsonLogsIntegTestCase; import org.hamcrest.Matcher; -import java.io.IOException; import java.io.BufferedReader; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.is; -public class NodeAndClusterInfoInLogsIT extends NodeAndClusterInfoIntegTestCase { +public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { @Override protected Matcher nodeNameMatcher() { return is("node-0"); @@ -41,7 +41,7 @@ protected Matcher nodeNameMatcher() { @Override protected BufferedReader openReader(Path logFile) { assumeFalse("Skipping test because it is being run against an external cluster.", - logFile.getFileName().toString().equals("--external--")); + logFile.getFileName().toString().equals("--external--")); return AccessController.doPrivileged((PrivilegedAction) () -> { try { return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 009228ebb7a46..26bf750e18437 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -116,14 +116,14 @@ public void testDieWithDignity() throws Exception { private boolean isFatalErrorInThreadExiting(JsonLogLine line) { return line.level().equals("ERROR") - && line.clazz().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") + && line.component().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") && line.nodeName().equals("node-0") && line.message().matches("fatal error in thread \\[Thread-\\d+\\], exiting$"); } private boolean isFatalError(JsonLogLine line) { return line.level().equals("ERROR") - && line.clazz().equals("o.e.ExceptionsHelper") + && line.component().equals("o.e.ExceptionsHelper") && line.nodeName().equals("node-0") && line.message().contains("fatal error"); } diff --git a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeAndClusterInfoInLogsIT.java b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java similarity index 91% rename from qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeAndClusterInfoInLogsIT.java rename to qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java index 5a7761be3e8d4..50cc20b0e5789 100644 --- a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeAndClusterInfoInLogsIT.java +++ b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java @@ -19,11 +19,11 @@ package org.elasticsearch.unconfigured_node_name; -import org.elasticsearch.common.logging.NodeAndClusterInfoIntegTestCase; +import org.elasticsearch.common.logging.JsonLogsIntegTestCase; import org.hamcrest.Matcher; -import java.io.IOException; import java.io.BufferedReader; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.not; -public class NodeAndClusterInfoInLogsIT extends NodeAndClusterInfoIntegTestCase { +public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { @Override protected Matcher nodeNameMatcher() { return not(""); diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index 77b8b9f543eff..f0152ccc56000 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -35,9 +35,9 @@ public class ESJsonLayout extends AbstractStringLayout { private static final String PATTERN = "{" + "\"type\": \"console\", " + - "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + // this is ISO8601 with additional timezone ID "\"level\": \"%p\", " + - "\"class\": \"%c{1.}\", " + + "\"component\": \"%c{1.}\", " + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + "\"node.name\": \"%node_name\", " + "%node_and_cluster_id " + diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index ea9b14e60105d..a0b0f60fccd65 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -33,7 +33,7 @@ * This is a modification of a @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter *

* Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array - * "exception": [ stacktrace... ] + * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] */ @Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY) @ConverterKeys({"exceptionAsJson"}) @@ -43,10 +43,7 @@ public final class JsonThrowablePatternConverter extends ThrowablePatternConvert private final ExtendedThrowablePatternConverter throwablePatternConverter; /** - * Private constructor. - * - * @param config TODO - * @param options options, may be null. + * Private constructor. Parameters only used to configure wrapped throwablePatternConverter */ private JsonThrowablePatternConverter(final Configuration config, final String[] options) { super("CustomExtendedThrowable", "throwable", options, config); @@ -70,12 +67,12 @@ public static JsonThrowablePatternConverter newInstance(final Configuration conf */ @Override public void format(final LogEvent event, final StringBuilder toAppendTo) { - String stacktrace = formatStacktrace(event); - if (Strings.isNotEmpty(stacktrace)) { - String jsonException = formatJson(stacktrace); + String consoleStacktrace = formatStacktrace(event); + if (Strings.isNotEmpty(consoleStacktrace)) { + String jsonStacktrace = formatJson(consoleStacktrace); toAppendTo.append(", "); - toAppendTo.append(jsonException); + toAppendTo.append(jsonStacktrace); } } @@ -85,8 +82,9 @@ private String formatStacktrace(LogEvent event) { return stringBuilder.toString(); } - private String formatJson(String extStackTrace) { - String[] split = extStackTrace.split(options.getSeparator() + "\t|" + options.getSeparator()); + private String formatJson(String consoleStacktrace) { + String lineSeparator = options.getSeparator() + "\t|" + options.getSeparator(); + String[] split = consoleStacktrace.split(lineSeparator); StringJoiner stringJoiner = new StringJoiner(",\n", "\n\"stacktrace\": [", "]"); for (String line : split) { diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index de7d184b76cb0..1032e1455bef2 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -66,12 +66,8 @@ public void testStacktraceWithJson() throws IOException { String result = format(event); - String sampleLine = "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", \"class\": " + - "\"o.e.a.s.TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", \"cluster.uuid\": " + - "\"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " + result + "}"; - //confirms exception is correctly parsed - JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(sampleLine))); + JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(result))); JsonLogLine jsonLogLine = jsonLogs.stream().findFirst() .orElseThrow(() -> new AssertionError("no logs parsed")); @@ -85,6 +81,11 @@ public void testStacktraceWithJson() throws IOException { private String format(LogEvent event) { StringBuilder builder = new StringBuilder(); converter.format(event, builder); - return builder.toString(); + String jsonStacktraceElement = builder.toString(); + + return "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", " + + "\"component\": \"o.e.a.s.TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", " + + "\"cluster.uuid\": \"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " + + jsonStacktraceElement + "}"; } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java index 587ce3b00119a..f31cae3dffbad 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -20,16 +20,17 @@ package org.elasticsearch.common.logging; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import java.util.List; public class JsonLogLine { - public static final ConstructingObjectParser PARSER = createParser(false); + public static final ObjectParser PARSER = createParser(false); + private String type; private String timestamp; private String level; - private String clazz; + private String component; private String clusterName; private String nodeName; private String clusterUuid; @@ -37,29 +38,13 @@ public class JsonLogLine { private String message; private List stacktrace; - private JsonLogLine(String type, String timestamp, String level, String clazz, String clusterName, - String nodeName, String clusterUuid, String nodeId, - String message, - List stacktrace) { - this.type = type; - this.timestamp = timestamp; - this.level = level; - this.clazz = clazz; - this.clusterName = clusterName; - this.nodeName = nodeName; - this.clusterUuid = clusterUuid; - this.nodeId = nodeId; - this.message = message; - this.stacktrace = stacktrace; - } - @Override public String toString() { final StringBuilder sb = new StringBuilder("JsonLogLine{"); sb.append("type='").append(type).append('\''); sb.append(", timestamp='").append(timestamp).append('\''); sb.append(", level='").append(level).append('\''); - sb.append(", clazz='").append(clazz).append('\''); + sb.append(", component='").append(component).append('\''); sb.append(", clusterName='").append(clusterName).append('\''); sb.append(", nodeName='").append(nodeName).append('\''); sb.append(", clusterUuid='").append(clusterUuid).append('\''); @@ -82,8 +67,8 @@ public String level() { return level; } - public String clazz() { - return clazz; + public String component() { + return component; } public String clusterName() { @@ -110,107 +95,59 @@ public List stacktrace() { return stacktrace; } - @SuppressWarnings("unchecked") - private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { - ConstructingObjectParser parser = new ConstructingObjectParser<>("jsong_log_line_parser", ignoreUnknownFields, - a -> JsonLogLine.builder() - .withType((String) a[0]) - .withTimestamp((String) a[1]) - .withLevel((String) a[2]) - .withClazz((String) a[3]) - .withClusterName((String) a[4]) - .withNodeName((String) a[5]) - .withClusterUuid((String) a[6]) - .withNodeId((String) a[7]) - .withMessage((String) a[8]) - .withStacktrace((List) a[9]) - .build() - ); - - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("type")); - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("timestamp")); - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("level")); - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("class")); - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster.name")); - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("node.name")); - parser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("cluster.uuid")); - parser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("node.id")); - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("message")); - parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("stacktrace")); + public void setType(String type) { + this.type = type; + } + + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + public void setLevel(String level) { + this.level = level; + } - return parser; + public void setComponent(String component) { + this.component = component; } - public static Builder builder() { - return new Builder(); - } - - static class Builder { - String type; - String timestamp; - String level; - String clazz; - String clusterName; - String nodeName; - String clusterUuid; - String nodeId; - String message; - List stacktrace; - - public Builder withType(String type) { - this.type = type; - return this; - } - - public Builder withTimestamp(String timestamp) { - this.timestamp = timestamp; - return this; - } - - public Builder withLevel(String level) { - this.level = level; - return this; - } - - public Builder withClazz(String clazz) { - this.clazz = clazz; - return this; - } - - public Builder withClusterName(String clusterName) { - this.clusterName = clusterName; - return this; - } - - public Builder withNodeName(String nodeName) { - this.nodeName = nodeName; - return this; - } - - public Builder withClusterUuid(String clusterUuid) { - this.clusterUuid = clusterUuid; - return this; - } - - public Builder withNodeId(String nodeId) { - this.nodeId = nodeId; - return this; - } - - public Builder withMessage(String message) { - this.message = message; - return this; - } - - public Builder withStacktrace(List stacktrace) { - this.stacktrace = stacktrace; - return this; - } - - public JsonLogLine build() { - return new JsonLogLine(type, timestamp, level, clazz, clusterName, - nodeName, clusterUuid, nodeId, message, stacktrace); - } + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public void setNodeName(String nodeName) { + this.nodeName = nodeName; + } + + public void setClusterUuid(String clusterUuid) { + this.clusterUuid = clusterUuid; + } + + public void setNodeId(String nodeId) { + this.nodeId = nodeId; + } + + public void setMessage(String message) { + this.message = message; + } + + public void setStacktrace(List stacktrace) { + this.stacktrace = stacktrace; + } + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>("search_template", ignoreUnknownFields, null); + parser.declareString(JsonLogLine::setType, new ParseField("type")); + parser.declareString(JsonLogLine::setTimestamp, new ParseField("timestamp")); + parser.declareString(JsonLogLine::setLevel, new ParseField("level")); + parser.declareString(JsonLogLine::setComponent, new ParseField("component")); + parser.declareString(JsonLogLine::setClusterName, new ParseField("cluster.name")); + parser.declareString(JsonLogLine::setNodeName, new ParseField("node.name")); + parser.declareString(JsonLogLine::setClusterUuid, new ParseField("cluster.uuid")); + parser.declareString(JsonLogLine::setNodeId, new ParseField("node.id")); + parser.declareString(JsonLogLine::setMessage, new ParseField("message")); + parser.declareStringArray(JsonLogLine::setStacktrace, new ParseField("stacktrace")); + + return parser; } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java similarity index 87% rename from test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java rename to test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index 7340302f8e3d4..1fd9b45c67a68 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeAndClusterInfoIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -32,13 +32,16 @@ import static org.hamcrest.Matchers.not; /** - * Tests that extend this class verify that the node name, cluster name, nodeId, clusterId appear in the first - * few log lines on startup. Note that this won't pass for clusters that don't + * Tests that extend this class verify that all json layout fields appear in the first few log lines after startup + * Fields available straight away : type, timestamp, level, component, message, nodeName, clusterName + * nodeId and clusterId are available later once the clusterState was received. + * NodeName, ClusterName, NodeId, ClusterId should be consistent across all log lines + * Note that this won't pass for clusters that don't have * the node name defined in elasticsearch.yml and start with * DEBUG or TRACE level logging. Those nodes log a few lines before they * resolve the node name. */ -public abstract class NodeAndClusterInfoIntegTestCase extends ESRestTestCase { +public abstract class JsonLogsIntegTestCase extends ESRestTestCase { /** * Number of lines in the log file to check for the node name. We don't * just check the entire log file because it could be quite long and @@ -79,7 +82,7 @@ public void testNodeNameIsOnAllLinesOfLog() throws IOException { assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.clazz(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.component(), not(isEmptyOrNullString())); assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); //all lines should have the same nodeName and clusterName diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 54768c2e61355..b1cc6ccd565a8 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -102,7 +102,7 @@ private Matcher autoFollowCoordinatorWarn() { @Override protected Boolean featureValueOf(JsonLogLine actual) { return actual.level().equals("WARN") && - actual.clazz().equals("o.e.x.c.a.AutoFollowCoordinator") && + actual.component().equals("o.e.x.c.a.AutoFollowCoordinator") && actual.nodeName().equals("node-0") && actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") && actual.stacktrace().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + From 1da7c971e4bf2f75611a21ac838bd1598d9d6513 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 4 Jan 2019 11:51:55 +0100 Subject: [PATCH 20/64] json logs cleanup --- .../common/logging/JsonLogsIntegTestCase.java | 75 +++++++++++-------- 1 file changed, 45 insertions(+), 30 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index 1fd9b45c67a68..b43348c91e91f 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.logging; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.test.rest.ESRestTestCase; @@ -26,6 +27,7 @@ import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.function.Predicate; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isEmptyOrNullString; @@ -61,41 +63,54 @@ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { */ protected abstract BufferedReader openReader(Path logFile); - public void testNodeNameIsOnAllLinesOfLog() throws IOException { + + public void testElementsPresentOnAllLinesOfLog() throws IOException { try (JsonLogs jsonLogs = new JsonLogs(openReader(getLogFile()))) { - JsonLogLine firstLine = null; - String expectedNodeId = null; - String expectedClusterId = null; - for (JsonLogLine jsonLogLine : jsonLogs) { - if (firstLine == null) { - firstLine = jsonLogLine; - } - - if (jsonLogLine.nodeId() != null && expectedNodeId == null) { - //nodeId and clusterid are set together - expectedNodeId = jsonLogLine.nodeId(); - expectedClusterId = jsonLogLine.clusterUuid(); - } - - - assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.component(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); - - //all lines should have the same nodeName and clusterName - assertThat(jsonLogLine.nodeName(), equalTo(firstLine.nodeName())); - assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); - - //initially empty, but once found all lines shoudl have same nodeId and clusterid - assertThat(jsonLogLine.nodeId(), equalTo(expectedNodeId)); - assertThat(jsonLogLine.clusterUuid(), equalTo(expectedClusterId)); - } + JsonLogLine firstLine = jsonLogs.stream() + .findFirst() + .orElseThrow(() -> new AssertionError("no logs at all?!")); + + jsonLogs.stream() + .limit(LINES_TO_CHECK) + .forEach(jsonLogLine -> { + assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.component(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); + + // all lines should have the same nodeName and clusterName + assertThat(jsonLogLine.nodeName(), equalTo(firstLine.nodeName())); + assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); + }); + } + } + + public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { + try (JsonLogs jsonLogs = new JsonLogs(openReader(getLogFile()))) { + SetOnce firstLineWithIds = new SetOnce<>(); + + jsonLogs.stream() + .dropWhile(nodeIdNotPresent(firstLineWithIds)) + .limit(LINES_TO_CHECK) + .forEach(jsonLogLine -> { + //initially empty, but once found all lines should have same nodeId and clusterid + assertThat(jsonLogLine.nodeId(), equalTo(firstLineWithIds.get().nodeId())); + assertThat(jsonLogLine.clusterUuid(), equalTo(firstLineWithIds.get().clusterUuid())); + }); } } + private Predicate nodeIdNotPresent(SetOnce firstLine) { + return line -> { + if (line.nodeId() != null) { + firstLine.set(line); + return false; + } + return true; + }; + } @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") private Path getLogFile() { From 28c20c1d93cdc6b3b69967222883fb2d639cfc4c Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 4 Jan 2019 12:02:34 +0100 Subject: [PATCH 21/64] test cleanup ignore failing compilation error refactor test not to use dropwhile fix failing test adding debuging info to failing test --- .../qa/die_with_dignity/DieWithDignityIT.java | 56 ++++++++++++------- .../JsonThrowablePatternConverterTests.java | 11 +++- .../common/logging/JsonLogLine.java | 2 +- .../common/logging/JsonLogsIntegTestCase.java | 37 ++++++------ 4 files changed, 62 insertions(+), 44 deletions(-) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 26bf750e18437..41e156d7ed676 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -21,6 +21,7 @@ import org.apache.http.ConnectionClosedException; import org.apache.lucene.util.Constants; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.JsonLogLine; @@ -89,28 +90,43 @@ public void testDieWithDignity() throws Exception { } }); - - // parse the logs and ensure that Elasticsearch died with the expected cause - Path path = PathUtils.get(System.getProperty("log")); - try(JsonLogs jsonLogs = new JsonLogs(path)){ - final Iterator it = jsonLogs.iterator(); - - boolean fatalError = false; - boolean fatalErrorInThreadExiting = false; - - while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { - final JsonLogLine line = it.next(); - if (isFatalError(line)) { - fatalError = true; - } else if (isFatalErrorInThreadExiting(line)) { - fatalErrorInThreadExiting = true; - assertThat(line.stacktrace(), - hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); - } +try { + // parse the logs and ensure that Elasticsearch died with the expected cause + Path path = PathUtils.get(System.getProperty("log")); + try (JsonLogs jsonLogs = new JsonLogs(path)) { + final Iterator it = jsonLogs.iterator(); + + boolean fatalError = false; + boolean fatalErrorInThreadExiting = false; + + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { + final JsonLogLine line = it.next(); + if (isFatalError(line)) { + fatalError = true; + } else if (isFatalErrorInThreadExiting(line)) { + fatalErrorInThreadExiting = true; + assertThat(line.stacktrace(), + hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); } + } + //temporarily adding logging +// if(!fatalError || !fatalErrorInThreadExiting){ + +// } + assertTrue(fatalError); + assertTrue(fatalErrorInThreadExiting); + } +}catch(AssertionError ae){ + Path path = PathUtils.get(System.getProperty("log")); + debugLogs(path); + throw ae; +} + } - assertTrue(fatalError); - assertTrue(fatalErrorInThreadExiting); + private void debugLogs(Path path) throws IOException { + try(BufferedReader reader = Files.newBufferedReader(path)){ + Terminal terminal = Terminal.DEFAULT; + reader.lines().forEach(line ->terminal.println(line)); } } diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index 1032e1455bef2..54a3a61940d84 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -5,13 +5,13 @@ import org.apache.logging.log4j.core.impl.Log4jLogEvent; import org.apache.logging.log4j.message.SimpleMessage; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import java.io.BufferedReader; import java.io.IOException; import java.io.StringReader; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.isEmptyString; /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -35,13 +35,18 @@ public class JsonThrowablePatternConverterTests extends ESTestCase { JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); - public void testNoStacktrace() { + public void testNoStacktrace() throws IOException { LogEvent event = Log4jLogEvent.newBuilder() .build(); String result = format(event); - assertThat(result, isEmptyString()); + JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(result))); + JsonLogLine jsonLogLine = jsonLogs.stream() + .findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); + + assertThat(jsonLogLine.stacktrace(), Matchers.nullValue()); } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java index f31cae3dffbad..1b55b6378c234 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -136,7 +136,7 @@ public void setStacktrace(List stacktrace) { } private static ObjectParser createParser(boolean ignoreUnknownFields) { - ObjectParser parser = new ObjectParser<>("search_template", ignoreUnknownFields, null); + ObjectParser parser = new ObjectParser<>("search_template", ignoreUnknownFields, JsonLogLine::new); parser.declareString(JsonLogLine::setType, new ParseField("type")); parser.declareString(JsonLogLine::setTimestamp, new ParseField("timestamp")); parser.declareString(JsonLogLine::setLevel, new ParseField("level")); diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index b43348c91e91f..3614ede3dfab5 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.logging; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.test.rest.ESRestTestCase; @@ -27,7 +26,7 @@ import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.function.Predicate; +import java.util.Iterator; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isEmptyOrNullString; @@ -89,27 +88,25 @@ public void testElementsPresentOnAllLinesOfLog() throws IOException { public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { try (JsonLogs jsonLogs = new JsonLogs(openReader(getLogFile()))) { - SetOnce firstLineWithIds = new SetOnce<>(); + Iterator iterator = jsonLogs.iterator(); + + JsonLogLine firstLine = null; + while (iterator.hasNext()) { + JsonLogLine jsonLogLine = iterator.next(); + if (jsonLogLine.nodeId() != null) { + firstLine = jsonLogLine; + } + } - jsonLogs.stream() - .dropWhile(nodeIdNotPresent(firstLineWithIds)) - .limit(LINES_TO_CHECK) - .forEach(jsonLogLine -> { - //initially empty, but once found all lines should have same nodeId and clusterid - assertThat(jsonLogLine.nodeId(), equalTo(firstLineWithIds.get().nodeId())); - assertThat(jsonLogLine.clusterUuid(), equalTo(firstLineWithIds.get().clusterUuid())); - }); - } - } + //once the nodeId and clusterId are received, they should be the same on remaining lines - private Predicate nodeIdNotPresent(SetOnce firstLine) { - return line -> { - if (line.nodeId() != null) { - firstLine.set(line); - return false; + int i = 0; + while (iterator.hasNext() && i++ < LINES_TO_CHECK) { + JsonLogLine jsonLogLine = iterator.next(); + assertThat(jsonLogLine.nodeId(), equalTo(firstLine.nodeId())); + assertThat(jsonLogLine.clusterUuid(), equalTo(firstLine.clusterUuid())); } - return true; - }; + } } @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") From 0686fee10c1af1229bd9a955f321069c6d1239a3 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 4 Jan 2019 22:48:07 +0100 Subject: [PATCH 22/64] sometimes HttpServerTransport is logging first, and then the server dies. Not logging the exception from ElasticsearchUncaughtExceptionHandler --- .../qa/die_with_dignity/DieWithDignityIT.java | 70 ++++++++++--------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 41e156d7ed676..aaf8a9ab25357 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -58,7 +58,7 @@ public void testDieWithDignity() throws Exception { final int pid = Integer.parseInt(pidFileLines.get(0)); Files.delete(pidFile); IOException e = expectThrows(IOException.class, - () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); + () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); Matcher failureMatcher = instanceOf(ConnectionClosedException.class); if (Constants.WINDOWS) { /* @@ -69,9 +69,9 @@ public void testDieWithDignity() throws Exception { * https://issues.apache.org/jira/browse/HTTPASYNC-134 * * So we catch it here and consider it "ok". - */ + */ failureMatcher = either(failureMatcher) - .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); + .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); } assertThat(e, failureMatcher); @@ -90,43 +90,47 @@ public void testDieWithDignity() throws Exception { } }); -try { - // parse the logs and ensure that Elasticsearch died with the expected cause - Path path = PathUtils.get(System.getProperty("log")); - try (JsonLogs jsonLogs = new JsonLogs(path)) { - final Iterator it = jsonLogs.iterator(); - - boolean fatalError = false; - boolean fatalErrorInThreadExiting = false; - - while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { - final JsonLogLine line = it.next(); - if (isFatalError(line)) { - fatalError = true; - } else if (isFatalErrorInThreadExiting(line)) { - fatalErrorInThreadExiting = true; - assertThat(line.stacktrace(), - hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); + try { + // parse the logs and ensure that Elasticsearch died with the expected cause + Path path = PathUtils.get(System.getProperty("log")); + try (JsonLogs jsonLogs = new JsonLogs(path)) { + final Iterator it = jsonLogs.iterator(); + + boolean fatalError = false; + boolean fatalErrorInThreadExiting = false; + + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { + final JsonLogLine line = it.next(); + if (isFatalError(line)) { + fatalError = true; + } else if (isFatalErrorInThreadExiting(line) || isWarnExceptionReceived(line)) { + fatalErrorInThreadExiting = true; + assertThat(line.stacktrace(), + hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); + } + } + + assertTrue(fatalError); + assertTrue(fatalErrorInThreadExiting); } + } catch (AssertionError ae) { + Path path = PathUtils.get(System.getProperty("log")); + debugLogs(path); + throw ae; } - //temporarily adding logging -// if(!fatalError || !fatalErrorInThreadExiting){ - -// } - assertTrue(fatalError); - assertTrue(fatalErrorInThreadExiting); } -}catch(AssertionError ae){ - Path path = PathUtils.get(System.getProperty("log")); - debugLogs(path); - throw ae; -} + + private boolean isWarnExceptionReceived(JsonLogLine line) { + return line.level().equals("ERROR") + && line.component().equals("o.e.h.AbstractHttpServerTransport") + && line.nodeName().equals("node-0") + && line.message().contains("caught exception while handling client http traffic"); } private void debugLogs(Path path) throws IOException { - try(BufferedReader reader = Files.newBufferedReader(path)){ + try (BufferedReader reader = Files.newBufferedReader(path)) { Terminal terminal = Terminal.DEFAULT; - reader.lines().forEach(line ->terminal.println(line)); + reader.lines().forEach(line -> terminal.println(line)); } } From 669e9ecab04fdf7a7c64254e0cded5c7b46a1483 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 9 Jan 2019 17:36:17 +0100 Subject: [PATCH 23/64] additional json tests --- distribution/src/config/log4j2.properties | 5 + .../common/logging/JsonLoggerTests.java | 220 ++++++++++++++++++ .../logging/json_layout/log4j2.properties | 21 ++ .../common/logging/ESJsonLayout.java | 44 +++- 4 files changed, 278 insertions(+), 12 deletions(-) create mode 100644 qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java create mode 100644 qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 229d6b4af9123..909fc8247af26 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -7,12 +7,14 @@ logger.action.level = debug appender.console.type = Console appender.console.name = console appender.console.layout.type = ESJsonLayout +appender.console.layout.type_name = console appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = rolling appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz appender.rolling.policies.type = Policies @@ -38,6 +40,7 @@ appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log appender.deprecation_rolling.layout.type = ESJsonLayout +appender.deprecation_rolling.layout.type_name = deprecation_rolling appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz @@ -56,6 +59,7 @@ appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log appender.index_search_slowlog_rolling.layout.type = ESJsonLayout +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz appender.index_search_slowlog_rolling.policies.type = Policies @@ -73,6 +77,7 @@ appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout +appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java new file mode 100644 index 0000000000000..5c04b7ef2e2f1 --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -0,0 +1,220 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configurator; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; + +public class JsonLoggerTests extends ESTestCase { + + @BeforeClass + public static void initNodeName() { + LogConfigurator.setNodeName("sample-name"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + LogConfigurator.registerErrorListener(); + } + + @Override + public void tearDown() throws Exception { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + Configurator.shutdown(context); + super.tearDown(); + } + + @SuppressWarnings("unchecked") + public void testJsonLayout() throws IOException, UserException { + setupLogging("json_layout"); + + final Logger testLogger = LogManager.getLogger("test"); + + testLogger.error("This is an error message"); + testLogger.warn("This is a warning message"); + testLogger.info("This is an info message"); + testLogger.debug("This is a debug message"); + testLogger.trace("This is a trace message"); + final String path = + System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; + + try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.ERROR, "sample-name", "test", "This is an error message"), + logLine("file", Level.WARN, "sample-name", "test", "This is a warning message"), + logLine("file", Level.INFO, "sample-name", "test", "This is an info message"), + logLine("file", Level.DEBUG, "sample-name", "test", "This is a debug message"), + logLine("file", Level.TRACE, "sample-name", "test", "This is a trace message") + )); + } + } + + public void testJsonInMessage() throws IOException, UserException { + setupLogging("json_layout"); + + final Logger testLogger = LogManager.getLogger("test"); + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + testLogger.info(json); + + final String path = + System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; + + try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.INFO, "sample-name", "test", json) + )); + } + } + + public void testStacktrace() throws IOException, UserException { + setupLogging("json_layout"); + + final Logger testLogger = LogManager.getLogger("test"); + + + testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message"))); + + final String path = + System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; + + try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + assertThat(jsonLogs, Matchers.contains( + Matchers.allOf( + logLine("file", Level.ERROR, "sample-name", "test", "error message"), + stacktraceWith("java.lang.Exception: exception message"), + stacktraceWith("Caused by: java.lang.RuntimeException: cause message") + ) + )); + } + } + + public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserException { + setupLogging("json_layout"); + + final Logger testLogger = LogManager.getLogger("test"); + + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + testLogger.error("error message " + json, new Exception(json)); + + final String path = + System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; + + try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + assertThat(jsonLogs, Matchers.contains( + Matchers.allOf( + //message field will have a single line with json escaped + logLine("file", Level.ERROR, "sample-name", "test", "error message " + json), + + //stacktrace field will have each json line will in a separate array element + stacktraceWith(("java.lang.Exception: " + json).split("\n")) + ) + )); + } + } + + private void setupLogging(final String config) throws IOException, UserException { + setupLogging(config, Settings.EMPTY); + } + + private void setupLogging(final String config, final Settings settings) throws IOException, UserException { + assert !Environment.PATH_HOME_SETTING.exists(settings); + final Path configDir = getDataPath(config); + final Settings mergedSettings = Settings.builder() + .put(settings) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + // need to use custom config path so we can use a custom log4j2.properties file for the test + final Environment environment = new Environment(mergedSettings, configDir); + LogConfigurator.configure(environment); + } + + private Matcher logLine(String type, Level level, String nodeName, String component, String message) { + return new FeatureMatcher(Matchers.is(true), "logLine", "logLine") { + + @Override + protected Boolean featureValueOf(JsonLogLine actual) { + return actual.type().equals(type) && + actual.level().equals(level.toString()) && + actual.nodeName().equals(nodeName) && + actual.component().equals(component) && + actual.message().equals(message); + } + }; + } + + private Matcher stacktraceWith(String... lines) { + return new FeatureMatcher>(Matchers.hasItems(lines), + "stacktrace", "stacktrace") { + + @Override + protected List featureValueOf(JsonLogLine actual) { + return actual.stacktrace(); + } + }; + } +} diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties new file mode 100644 index 0000000000000..4bbd0b038ab8a --- /dev/null +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties @@ -0,0 +1,21 @@ +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = ESJsonLayout +appender.console.layout.type_name = console + +appender.file.type = File +appender.file.name = file +appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.file.layout.type = ESJsonLayout +appender.file.layout.type_name = file + + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.file.ref = file + +logger.test.name = test +logger.test.level = trace +logger.test.appenderRef.console.ref = console +logger.test.appenderRef.file.ref = file +logger.test.additivity = false diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index f0152ccc56000..e2941342f57e5 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -19,23 +19,39 @@ package org.elasticsearch.common.logging; +import org.apache.logging.log4j.core.Layout; import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.Node; import org.apache.logging.log4j.core.config.plugins.Plugin; import org.apache.logging.log4j.core.config.plugins.PluginAttribute; import org.apache.logging.log4j.core.config.plugins.PluginFactory; import org.apache.logging.log4j.core.layout.AbstractStringLayout; import org.apache.logging.log4j.core.layout.ByteBufferDestination; import org.apache.logging.log4j.core.layout.PatternLayout; +import org.elasticsearch.common.Strings; import java.nio.charset.Charset; import java.util.Map; -@Plugin(name = "ESJsonLayout", category = "Core", elementType = "layout", printObject = true) +@Plugin(name = "ESJsonLayout", category = Node.CATEGORY, elementType = Layout.ELEMENT_TYPE, printObject = true) public class ESJsonLayout extends AbstractStringLayout { + /** + * type - the name of the appender + * timestamp - ISO8601 with additional timezone ID + * level - INFO, WARN etc + * component - logger name, most of the times class name + * cluster.name - taken from sys:es.logs.cluster_name system property because it is always set + * node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml + * node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present + * once clusterStateUpdate is first received + * message - a json escaped message. Multiline messages will be converted to single line with new line explicitly replaced to \n + * exceptionAsJson - in json as stacktrace. Only present when throwable is passed as a parameter to a Logger. Taken from + * JsonThrowablePatternConverter + */ private static final String PATTERN = "{" + - "\"type\": \"console\", " + - "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + // this is ISO8601 with additional timezone ID + "\"type\": \"${TYPE}\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + "\"level\": \"%p\", " + "\"component\": \"%c{1.}\", " + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + @@ -47,21 +63,25 @@ public class ESJsonLayout extends AbstractStringLayout { private final PatternLayout patternLayout; - protected ESJsonLayout(boolean locationInfo, boolean properties, boolean complete, - Charset charset) { + protected ESJsonLayout(String typeName, Charset charset) { super(charset); this.patternLayout = PatternLayout.newBuilder() - .withPattern(PATTERN) - .withAlwaysWriteExceptions(false) - .build(); + .withPattern(pattern(typeName)) + .withAlwaysWriteExceptions(false) + .build(); + } + + private String pattern(String type) { + if (Strings.isEmpty(type)) { + throw new IllegalArgumentException("layout parameter 'type_name' cannot be empty"); + } + return PATTERN.replace("${TYPE}", type); } @PluginFactory - public static ESJsonLayout createLayout(@PluginAttribute("locationInfo") boolean locationInfo, - @PluginAttribute("properties") boolean properties, - @PluginAttribute("complete") boolean complete, + public static ESJsonLayout createLayout(@PluginAttribute("type_name") String type, @PluginAttribute(value = "charset", defaultString = "UTF-8") Charset charset) { - return new ESJsonLayout(locationInfo, properties, complete, charset); + return new ESJsonLayout(type, charset); } @Override From de17fc1debfdb1172108379af2ffabcd61102013 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 10 Jan 2019 10:04:22 +0100 Subject: [PATCH 24/64] docker log4j config cleanup --- .../src/docker/config/log4j2.properties | 45 ++++++++++++++++--- distribution/src/config/log4j2.properties | 8 ++-- .../logging/NodeAndClusterIdConverter.java | 8 ++-- 3 files changed, 47 insertions(+), 14 deletions(-) diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index f3e70aa8091b8..b25893b634dc7 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -1,9 +1,44 @@ status = error -appender.console.type = Console -appender.console.name = console -appender.console.layout.type = ESJsonLayout -#TODO test thad +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = main rootLogger.level = info -rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ESJsonLayout +appender.deprecation_rolling.layout.type_name = deprecation + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ESJsonLayout +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout +appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog + + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog +logger.index_indexing_slowlog.additivity = false \ No newline at end of file diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 909fc8247af26..015ae976e6c6c 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -14,7 +14,7 @@ appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.layout.type = ESJsonLayout -appender.rolling.layout.type_name = rolling +appender.rolling.layout.type_name = main appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz appender.rolling.policies.type = Policies @@ -40,7 +40,7 @@ appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log appender.deprecation_rolling.layout.type = ESJsonLayout -appender.deprecation_rolling.layout.type_name = deprecation_rolling +appender.deprecation_rolling.layout.type_name = deprecation appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz @@ -59,7 +59,7 @@ appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log appender.index_search_slowlog_rolling.layout.type = ESJsonLayout -appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz appender.index_search_slowlog_rolling.policies.type = Policies @@ -77,7 +77,7 @@ appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout -appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 93c197aa3b3b0..2d939559cadd6 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -68,10 +68,9 @@ public void format(LogEvent event, StringBuilder toAppendTo) { } else if (nodeAndClusterIds.get() != null) { //using local value toAppendTo.append(nodeAndClusterIds.get()); - } else { - // no value received yet - toAppendTo.append(""); } + // nodeId/clusterUuid not received yet, not appending + } @Override @@ -82,8 +81,7 @@ public void clusterChanged(ClusterChangedEvent event) { boolean wasSet = nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID,nodeId)); if (wasSet) { - LOGGER.info("received first cluster state update. Setting nodeId={}", nodeId); - + LOGGER.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId,clusterUUID); } } From 147ca9c36c57003dd43aef1236db5f14195e84c9 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 10 Jan 2019 15:20:42 +0100 Subject: [PATCH 25/64] incorrect docker appender ref --- distribution/docker/src/docker/config/log4j2.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index b25893b634dc7..bc6d842dbc159 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -40,5 +40,5 @@ appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlo logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace -logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling logger.index_indexing_slowlog.additivity = false \ No newline at end of file From 7a2b53750774bc447ea3e8681b7360fdda100214 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 10 Jan 2019 17:42:50 +0100 Subject: [PATCH 26/64] the right order of reading values from clusterListener --- .../common/logging/NodeAndClusterIdConverter.java | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 2d939559cadd6..eb90059509c3c 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -61,16 +61,15 @@ public NodeAndClusterIdConverter() { @Override public void format(LogEvent event, StringBuilder toAppendTo) { - if (nodeAndClusterIds.get() == null && nodeAndClusterIdsReference.get() != null) { - //received a value from the listener - toAppendTo.append(nodeAndClusterIdsReference.get()); - nodeAndClusterIds.set(nodeAndClusterIdsReference.get()); - } else if (nodeAndClusterIds.get() != null) { + if (nodeAndClusterIds.get() != null) { //using local value toAppendTo.append(nodeAndClusterIds.get()); + } else if (nodeAndClusterIdsReference.get() != null) { + //reading a value from the listener for the first time + toAppendTo.append(nodeAndClusterIdsReference.get()); + nodeAndClusterIds.set(nodeAndClusterIdsReference.get()); } // nodeId/clusterUuid not received yet, not appending - } @Override @@ -78,10 +77,10 @@ public void clusterChanged(ClusterChangedEvent event) { DiscoveryNode localNode = event.state().getNodes().getLocalNode(); String clusterUUID = event.state().getMetaData().clusterUUID(); String nodeId = localNode.getId(); - boolean wasSet = nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID,nodeId)); + boolean wasSet = nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID, nodeId)); if (wasSet) { - LOGGER.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId,clusterUUID); + LOGGER.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId, clusterUUID); } } From 6a01097ce6c49666998f4e387c4c790e8306bd96 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 11 Jan 2019 08:55:51 +0100 Subject: [PATCH 27/64] add missing marker in a pattern --- .../common/logging/JsonLoggerTests.java | 61 +++++++++++++------ .../common/logging/ESJsonLayout.java | 4 +- .../logging/NodeAndClusterIdConverter.java | 2 +- 3 files changed, 44 insertions(+), 23 deletions(-) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 5c04b7ef2e2f1..cbfc6c7b5f47e 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.hamcrest.FeatureMatcher; import org.hamcrest.Matcher; @@ -69,11 +70,10 @@ public void testJsonLayout() throws IOException, UserException { testLogger.info("This is an info message"); testLogger.debug("This is a debug message"); testLogger.trace("This is a trace message"); - final String path = - System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; + final String path = System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { assertThat(jsonLogs, Matchers.contains( @@ -86,6 +86,30 @@ public void testJsonLayout() throws IOException, UserException { } } + @SuppressWarnings("unchecked") + public void testPrefixLoggerInJson() throws IOException, UserException { + setupLogging("json_layout"); + + Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); + + shardIdLogger.info("This is an info message with a shardId"); + + Logger prefixLogger = new PrefixLogger(LogManager.getLogger("prefixLogger"), "PREFIX"); + prefixLogger.info("This is an info message with a prefix"); + + final String path = System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; + + try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"), + logLine("file", Level.INFO, "sample-name", "prefixLogger", "PREFIX This is an info message with a prefix") + )); + } + } + public void testJsonInMessage() throws IOException, UserException { setupLogging("json_layout"); @@ -103,11 +127,10 @@ public void testJsonInMessage() throws IOException, UserException { testLogger.info(json); - final String path = - System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; + final String path = System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { assertThat(jsonLogs, Matchers.contains( @@ -124,11 +147,10 @@ public void testStacktrace() throws IOException, UserException { testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message"))); - final String path = - System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; + final String path = System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { assertThat(jsonLogs, Matchers.contains( @@ -158,11 +180,10 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserExce "}"; testLogger.error("error message " + json, new Exception(json)); - final String path = - System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; + final String path = System.getProperty("es.logs.base_path") + + System.getProperty("file.separator") + + System.getProperty("es.logs.cluster_name") + + ".log"; try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { assertThat(jsonLogs, Matchers.contains( diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index e2941342f57e5..cfd558fc87026 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -56,8 +56,8 @@ public class ESJsonLayout extends AbstractStringLayout { "\"component\": \"%c{1.}\", " + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + "\"node.name\": \"%node_name\", " + - "%node_and_cluster_id " + - "\"message\": \"%enc{%.-10000m}{JSON}\" " + + "%notEmpty{%node_and_cluster_id, } " + + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\" " + "%exceptionAsJson " + "}%n"; diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index eb90059509c3c..97eed798b0a5b 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -85,7 +85,7 @@ public void clusterChanged(ClusterChangedEvent event) { } private static String formatIds(String clusterUUID, String nodeId) { - return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\", ", clusterUUID, nodeId); + return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\"", clusterUUID, nodeId); } } From 0af53c05e97b271e5e27bcaf977039a250b45068 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 11 Jan 2019 10:54:13 +0100 Subject: [PATCH 28/64] empty lines cleanup --- distribution/src/config/log4j2.properties | 3 --- .../elasticsearch/common/logging/JsonLoggerTests.java | 3 --- .../org/elasticsearch/common/logging/ESJsonLayout.java | 2 +- .../common/logging/NodeAndClusterIdConverter.java | 9 ++++++--- 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 015ae976e6c6c..86d6f8c9e7623 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -9,7 +9,6 @@ appender.console.name = console appender.console.layout.type = ESJsonLayout appender.console.layout.type_name = console - appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log @@ -42,7 +41,6 @@ appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separ appender.deprecation_rolling.layout.type = ESJsonLayout appender.deprecation_rolling.layout.type_name = deprecation - appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy @@ -79,7 +77,6 @@ appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog - appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index cbfc6c7b5f47e..a10387f34adbf 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -91,7 +91,6 @@ public void testPrefixLoggerInJson() throws IOException, UserException { setupLogging("json_layout"); Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); - shardIdLogger.info("This is an info message with a shardId"); Logger prefixLogger = new PrefixLogger(LogManager.getLogger("prefixLogger"), "PREFIX"); @@ -143,8 +142,6 @@ public void testStacktrace() throws IOException, UserException { setupLogging("json_layout"); final Logger testLogger = LogManager.getLogger("test"); - - testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message"))); final String path = System.getProperty("es.logs.base_path") + diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index cfd558fc87026..362d734510e52 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -37,7 +37,7 @@ public class ESJsonLayout extends AbstractStringLayout { /** - * type - the name of the appender + * type - the type of logs. These represent appenders and help docker distinguish log streams. * timestamp - ISO8601 with additional timezone ID * level - INFO, WARN etc * component - logger name, most of the times class name diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 97eed798b0a5b..5520698a593e6 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.logging; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.config.plugins.Plugin; import org.apache.logging.log4j.core.pattern.ConverterKeys; @@ -41,8 +43,9 @@ public final class NodeAndClusterIdConverter extends LogEventPatternConverter im private static LazyInitializable INSTANCE = new LazyInitializable(() -> new NodeAndClusterIdConverter()); - private AtomicReference nodeAndClusterIdsReference = new AtomicReference<>(); - private CloseableThreadLocal nodeAndClusterIds = new CloseableThreadLocal(); + private final Logger logger = LogManager.getLogger(getClass()); + private final AtomicReference nodeAndClusterIdsReference = new AtomicReference<>(); + private final CloseableThreadLocal nodeAndClusterIds = new CloseableThreadLocal(); /** * Called by log4j2 to initialize this converter. @@ -80,7 +83,7 @@ public void clusterChanged(ClusterChangedEvent event) { boolean wasSet = nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID, nodeId)); if (wasSet) { - LOGGER.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId, clusterUUID); + logger.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId, clusterUUID); } } From 4aa84d7970974fd7f9ae3d99ad99857f92812af8 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 14 Jan 2019 17:30:04 +0100 Subject: [PATCH 29/64] addressing Nik's comments --- .../qa/die_with_dignity/DieWithDignityIT.java | 7 +-- .../common/logging/JsonLoggerTests.java | 22 +++++--- .../logging/NodeAndClusterIdConverter.java | 47 +++++++++-------- .../NodeAndClusterIdStateListener.java | 48 +++++++++++++++++ .../java/org/elasticsearch/node/Node.java | 4 +- .../JsonThrowablePatternConverterTests.java | 21 ++++---- .../common/logging/JsonLogLine.java | 5 ++ .../common/logging/JsonLogsIntegTestCase.java | 51 ++++++++++--------- .../{JsonLogs.java => JsonLogsStream.java} | 41 +++++++++------ .../xpack/ccr/FollowIndexIT.java | 19 +++---- 10 files changed, 172 insertions(+), 93 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java rename test/framework/src/main/java/org/elasticsearch/common/logging/{JsonLogs.java => JsonLogsStream.java} (66%) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index aaf8a9ab25357..14b8c5809386a 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.JsonLogLine; -import org.elasticsearch.common.logging.JsonLogs; +import org.elasticsearch.common.logging.JsonLogsStream; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -38,6 +38,7 @@ import java.nio.file.Path; import java.util.Iterator; import java.util.List; +import java.util.stream.Stream; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -93,8 +94,8 @@ public void testDieWithDignity() throws Exception { try { // parse the logs and ensure that Elasticsearch died with the expected cause Path path = PathUtils.get(System.getProperty("log")); - try (JsonLogs jsonLogs = new JsonLogs(path)) { - final Iterator it = jsonLogs.iterator(); + try (Stream stream = JsonLogsStream.from(path)) { + final Iterator it = stream.iterator(); boolean fatalError = false; boolean fatalErrorInThreadExiting = false; diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index a10387f34adbf..2180b7bb06e86 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -38,6 +38,8 @@ import java.io.IOException; import java.nio.file.Path; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; public class JsonLoggerTests extends ESTestCase { @@ -74,8 +76,9 @@ public void testJsonLayout() throws IOException, UserException { System.getProperty("file.separator") + System.getProperty("es.logs.cluster_name") + ".log"; + try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + List jsonLogs = stream.collect(Collectors.toList()); - try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { assertThat(jsonLogs, Matchers.contains( logLine("file", Level.ERROR, "sample-name", "test", "This is an error message"), logLine("file", Level.WARN, "sample-name", "test", "This is a warning message"), @@ -84,6 +87,7 @@ public void testJsonLayout() throws IOException, UserException { logLine("file", Level.TRACE, "sample-name", "test", "This is a trace message") )); } + } @SuppressWarnings("unchecked") @@ -101,11 +105,12 @@ public void testPrefixLoggerInJson() throws IOException, UserException { System.getProperty("es.logs.cluster_name") + ".log"; - try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"), logLine("file", Level.INFO, "sample-name", "prefixLogger", "PREFIX This is an info message with a prefix") - )); + )); } } @@ -131,7 +136,9 @@ public void testJsonInMessage() throws IOException, UserException { System.getProperty("es.logs.cluster_name") + ".log"; - try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + + try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( logLine("file", Level.INFO, "sample-name", "test", json) )); @@ -149,7 +156,8 @@ public void testStacktrace() throws IOException, UserException { System.getProperty("es.logs.cluster_name") + ".log"; - try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( Matchers.allOf( logLine("file", Level.ERROR, "sample-name", "test", "error message"), @@ -182,7 +190,9 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserExce System.getProperty("es.logs.cluster_name") + ".log"; - try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(path))) { + try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + List jsonLogs = stream.collect(Collectors.toList()); + assertThat(jsonLogs, Matchers.contains( Matchers.allOf( //message field will have a single line with json escaped diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 5520698a593e6..3c6f3721ad80a 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -19,32 +19,30 @@ package org.elasticsearch.common.logging; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.config.plugins.Plugin; import org.apache.logging.log4j.core.pattern.ConverterKeys; import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.util.LazyInitializable; import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; - +/** + * Pattern converter to format the node_and_cluster_id variable into a json fields node.id and cluster.uuid + * Keeping those two fields together assures that the will be atomically set and become visible in logs at the same time + */ @Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") @ConverterKeys({"node_and_cluster_id"}) -public final class NodeAndClusterIdConverter extends LogEventPatternConverter implements ClusterStateListener { +public final class NodeAndClusterIdConverter extends LogEventPatternConverter { private static LazyInitializable INSTANCE = - new LazyInitializable(() -> new NodeAndClusterIdConverter()); + new LazyInitializable(NodeAndClusterIdConverter::new); + + private static final AtomicReference nodeAndClusterIdsReference = new AtomicReference<>(); - private final Logger logger = LogManager.getLogger(getClass()); - private final AtomicReference nodeAndClusterIdsReference = new AtomicReference<>(); private final CloseableThreadLocal nodeAndClusterIds = new CloseableThreadLocal(); /** @@ -62,6 +60,23 @@ public NodeAndClusterIdConverter() { super("NodeName", "node_and_cluster_id"); } + /** + * Updates only once the clusterID and nodeId + * @param clusterUUID a clusterId received from cluster state update + * @param nodeId a nodeId received from cluster state update + * @return true if the update was for the first time (successful) or false if for another calls (does not updates) + */ + public static boolean setOnce(String clusterUUID, String nodeId) { + return nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID, nodeId)); + } + + /** + * Formats the node.id and cluster.uuid into json fields. + * If it reads these values for the first time - it will get them from AtomicReference nodeAndClusterIdsReference + * all succeeding calls of this method will read this from ThreadLocal nodeAndClusterIds - which is supposed to cache that value to + * avoid expensive AtomicReference read (TODO discuss the performance) + * @param event - a log event is ignored in this method as it uses the nodeId and clusterId to format + */ @Override public void format(LogEvent event, StringBuilder toAppendTo) { if (nodeAndClusterIds.get() != null) { @@ -75,18 +90,6 @@ public void format(LogEvent event, StringBuilder toAppendTo) { // nodeId/clusterUuid not received yet, not appending } - @Override - public void clusterChanged(ClusterChangedEvent event) { - DiscoveryNode localNode = event.state().getNodes().getLocalNode(); - String clusterUUID = event.state().getMetaData().clusterUUID(); - String nodeId = localNode.getId(); - boolean wasSet = nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID, nodeId)); - - if (wasSet) { - logger.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId, clusterUUID); - } - } - private static String formatIds(String clusterUUID, String nodeId) { return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\"", clusterUUID, nodeId); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java new file mode 100644 index 0000000000000..d096c4ee9c688 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; + +/** + * The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update + * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter} + */ +public class NodeAndClusterIdStateListener implements ClusterStateListener { + + private final Logger logger = LogManager.getLogger(getClass()); + + @Override + public void clusterChanged(ClusterChangedEvent event) { + DiscoveryNode localNode = event.state().getNodes().getLocalNode(); + String clusterUUID = event.state().getMetaData().clusterUUID(); + String nodeId = localNode.getId(); + + boolean wasSet = NodeAndClusterIdConverter.setOnce(clusterUUID, nodeId); + + if (wasSet) { + logger.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId, clusterUUID); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 2d101aafe69ae..94b78a2abceac 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,7 +67,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.NodeAndClusterIdConverter; +import org.elasticsearch.common.logging.NodeAndClusterIdStateListener; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -439,7 +439,7 @@ protected Node( namedWriteableRegistry).stream()) .collect(Collectors.toList()); - NodeAndClusterIdConverter nodeAndClusterIdConverter = NodeAndClusterIdConverter.newInstance(new String[]{}); + NodeAndClusterIdStateListener nodeAndClusterIdConverter = new NodeAndClusterIdStateListener(); clusterService.addListener(nodeAndClusterIdConverter); ActionModule actionModule = new ActionModule(false, settings, clusterModule.getIndexNameExpressionResolver(), diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index 54a3a61940d84..a8a5b06d93ba9 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -37,14 +37,13 @@ public class JsonThrowablePatternConverterTests extends ESTestCase { public void testNoStacktrace() throws IOException { LogEvent event = Log4jLogEvent.newBuilder() - .build(); + .build(); String result = format(event); - JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(result))); - JsonLogLine jsonLogLine = jsonLogs.stream() - .findFirst() - .orElseThrow(() -> new AssertionError("no logs parsed")); + JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result))) + .findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); assertThat(jsonLogLine.stacktrace(), Matchers.nullValue()); } @@ -65,17 +64,17 @@ public void testStacktraceWithJson() throws IOException { "}"; Exception thrown = new Exception(json); LogEvent event = Log4jLogEvent.newBuilder() - .setMessage(new SimpleMessage("message")) - .setThrown(thrown) - .build(); + .setMessage(new SimpleMessage("message")) + .setThrown(thrown) + .build(); String result = format(event); //confirms exception is correctly parsed - JsonLogs jsonLogs = new JsonLogs(new BufferedReader(new StringReader(result))); - JsonLogLine jsonLogLine = jsonLogs.stream().findFirst() - .orElseThrow(() -> new AssertionError("no logs parsed")); + JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result))) + .findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); int jsonLength = json.split("\n").length; int stacktraceLength = thrown.getStackTrace().length; diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java index 1b55b6378c234..fa8f3d7d27018 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -24,6 +24,11 @@ import java.util.List; + +/** + * Represents a single log line in a json format. + * Parsing log lines with this class confirms the json format of logs + */ public class JsonLogLine { public static final ObjectParser PARSER = createParser(false); diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index 3614ede3dfab5..e05003793e36f 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -27,6 +27,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Iterator; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isEmptyOrNullString; @@ -44,9 +45,8 @@ */ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { /** - * Number of lines in the log file to check for the node name. We don't - * just check the entire log file because it could be quite long and - * exceptions don't include the node name. + * Number of lines in the log file to check for the node_name, node.id or cluster.uuid. We don't + * just check the entire log file because it could be quite long */ private static final int LINES_TO_CHECK = 10; @@ -64,31 +64,34 @@ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { public void testElementsPresentOnAllLinesOfLog() throws IOException { - try (JsonLogs jsonLogs = new JsonLogs(openReader(getLogFile()))) { - - JsonLogLine firstLine = jsonLogs.stream() - .findFirst() - .orElseThrow(() -> new AssertionError("no logs at all?!")); - - jsonLogs.stream() - .limit(LINES_TO_CHECK) - .forEach(jsonLogLine -> { - assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.component(), not(isEmptyOrNullString())); - assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); - - // all lines should have the same nodeName and clusterName - assertThat(jsonLogLine.nodeName(), equalTo(firstLine.nodeName())); - assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); - }); + JsonLogLine firstLine = findFirstLine(); + + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + stream.limit(LINES_TO_CHECK) + .forEach(jsonLogLine -> { + assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.component(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); + + // all lines should have the same nodeName and clusterName + assertThat(jsonLogLine.nodeName(), equalTo(firstLine.nodeName())); + assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); + }); + } + } + + private JsonLogLine findFirstLine() throws IOException { + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + return stream.findFirst() + .orElseThrow(() -> new AssertionError("no logs at all?!")); } } public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { - try (JsonLogs jsonLogs = new JsonLogs(openReader(getLogFile()))) { - Iterator iterator = jsonLogs.iterator(); + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + Iterator iterator = stream.iterator(); JsonLogLine firstLine = null; while (iterator.hasNext()) { diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java similarity index 66% rename from test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java rename to test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java index 5ad4603b89933..d274b36314e2f 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogs.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java @@ -25,41 +25,52 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import java.io.BufferedReader; -import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Iterator; +import java.util.Spliterator; +import java.util.Spliterators; import java.util.stream.Stream; import java.util.stream.StreamSupport; -public class JsonLogs implements Iterable, Closeable { +/** + * Returns a stream of json log lines. + * This is intended to be used for easy and readable assertions for logger tests + */ +public class JsonLogsStream { private final XContentParser parser; private final BufferedReader reader; - public JsonLogs(BufferedReader reader) throws IOException { + private JsonLogsStream(BufferedReader reader) throws IOException { this.reader = reader; this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, reader); } - public JsonLogs(Path path) throws IOException { - this(Files.newBufferedReader(path)); + public static Stream from(BufferedReader reader) throws IOException { + return new JsonLogsStream(reader).stream(); } - @Override - public Iterator iterator() { - return new JsonIterator(); + public static Stream from(Path path) throws IOException { + return new JsonLogsStream(Files.newBufferedReader(path)).stream(); } - @Override - public void close() throws IOException { - reader.close(); + public Stream stream() { + Spliterator spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED); + return StreamSupport.stream(spliterator, false) + .onClose(this::close); } - public Stream stream() { - return StreamSupport.stream(spliterator(), false); + private void close() { + try { + parser.close(); + reader.close(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } private class JsonIterator implements Iterator { @@ -80,10 +91,8 @@ private void nextToken() { try { parser.nextToken(); } catch (IOException e) { - throw new RuntimeException(e); + throw new UncheckedIOException(e); } } } - - } diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index b1cc6ccd565a8..bf298d182ea3f 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -12,20 +12,21 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.JsonLogLine; -import org.elasticsearch.common.logging.JsonLogs; +import org.elasticsearch.common.logging.JsonLogsStream; import org.elasticsearch.common.settings.Settings; import org.hamcrest.FeatureMatcher; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import java.io.IOException; +import java.nio.file.Path; import java.util.Map; +import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.ObjectPath.eval; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.core.Is.is; public class FollowIndexIT extends ESCCRRestTestCase { @@ -83,10 +84,10 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { // (does not work on windows...) if (Constants.WINDOWS == false) { assertBusy(() -> { - try (JsonLogs jsonLogs = new JsonLogs(PathUtils.get(System.getProperty("log")))) { - assertThat(jsonLogs, hasItem(autoFollowCoordinatorWarn())); + Path path = PathUtils.get(System.getProperty("log")); + try (Stream stream = JsonLogsStream.from(path)) { + assertTrue(stream.anyMatch(autoFollowCoordinatorWarn()::matches)); } - }); } }); @@ -97,13 +98,13 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { } private Matcher autoFollowCoordinatorWarn() { - return new FeatureMatcher(Matchers.is(true),"autoFollowCoordinatorWarn","autoFollowCoordinatorWarn") { + return new FeatureMatcher(Matchers.is(true), "autoFollowCoordinatorWarn", "autoFollowCoordinatorWarn") { @Override protected Boolean featureValueOf(JsonLogLine actual) { return actual.level().equals("WARN") && actual.component().equals("o.e.x.c.a.AutoFollowCoordinator") && - actual.nodeName().equals("node-0") && + actual.nodeName().equals("node-0") && actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") && actual.stacktrace().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + "as the remote cluster [leader_cluster] is not licensed for [ccr]; the license mode [BASIC]" + @@ -114,8 +115,8 @@ protected Boolean featureValueOf(JsonLogLine actual) { private void createNewIndexAndIndexDocs(RestClient client, String index) throws IOException { Settings settings = Settings.builder() - .put("index.soft_deletes.enabled", true) - .build(); + .put("index.soft_deletes.enabled", true) + .build(); Request request = new Request("PUT", "/" + index); request.setJsonEntity("{\"settings\": " + Strings.toString(settings) + ", \"mappings\": {\"_doc\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}} }"); From 1d0d66aaa466ebc7255742bb1e7766acb219b66f Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 14 Jan 2019 18:41:38 +0100 Subject: [PATCH 30/64] follow up after Daniel's comments --- .../src/docker/config/log4j2.properties | 2 +- distribution/src/config/log4j2.properties | 2 +- .../common/logging/JsonLoggerTests.java | 57 ++++++------------- .../common/logging/ESJsonLayout.java | 35 ++++++++---- .../JsonThrowablePatternConverter.java | 11 ++-- .../logging/NodeAndClusterIdConverter.java | 29 ++-------- .../NodeAndClusterIdStateListener.java | 3 +- .../java/org/elasticsearch/node/Node.java | 1 - .../JsonThrowablePatternConverterTests.java | 3 - .../common/logging/JsonLogsIntegTestCase.java | 21 ++++--- .../common/logging/JsonLogsStream.java | 6 +- 11 files changed, 66 insertions(+), 104 deletions(-) diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index bc6d842dbc159..a870435ba6818 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -41,4 +41,4 @@ appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlo logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling -logger.index_indexing_slowlog.additivity = false \ No newline at end of file +logger.index_indexing_slowlog.additivity = false diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 86d6f8c9e7623..3c2ce5dce86f3 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -87,4 +87,4 @@ appender.index_indexing_slowlog_rolling.strategy.max = 4 logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling -logger.index_indexing_slowlog.additivity = false \ No newline at end of file +logger.index_indexing_slowlog.additivity = false diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 2180b7bb06e86..04c0144087d42 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -52,6 +52,7 @@ public static void initNodeName() { public void setUp() throws Exception { super.setUp(); LogConfigurator.registerErrorListener(); + setupLogging("json_layout"); } @Override @@ -63,8 +64,6 @@ public void tearDown() throws Exception { @SuppressWarnings("unchecked") public void testJsonLayout() throws IOException, UserException { - setupLogging("json_layout"); - final Logger testLogger = LogManager.getLogger("test"); testLogger.error("This is an error message"); @@ -72,11 +71,8 @@ public void testJsonLayout() throws IOException, UserException { testLogger.info("This is an info message"); testLogger.debug("This is a debug message"); testLogger.trace("This is a trace message"); - final String path = System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; - try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( @@ -92,20 +88,14 @@ public void testJsonLayout() throws IOException, UserException { @SuppressWarnings("unchecked") public void testPrefixLoggerInJson() throws IOException, UserException { - setupLogging("json_layout"); - Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); shardIdLogger.info("This is an info message with a shardId"); Logger prefixLogger = new PrefixLogger(LogManager.getLogger("prefixLogger"), "PREFIX"); prefixLogger.info("This is an info message with a prefix"); - final String path = System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; - - try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"), @@ -115,8 +105,6 @@ public void testPrefixLoggerInJson() throws IOException, UserException { } public void testJsonInMessage() throws IOException, UserException { - setupLogging("json_layout"); - final Logger testLogger = LogManager.getLogger("test"); String json = "{\n" + " \"terms\" : {\n" + @@ -131,13 +119,8 @@ public void testJsonInMessage() throws IOException, UserException { testLogger.info(json); - final String path = System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; - - - try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( logLine("file", Level.INFO, "sample-name", "test", json) @@ -146,17 +129,11 @@ public void testJsonInMessage() throws IOException, UserException { } public void testStacktrace() throws IOException, UserException { - setupLogging("json_layout"); - final Logger testLogger = LogManager.getLogger("test"); testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message"))); - final String path = System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; - - try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( Matchers.allOf( @@ -169,8 +146,6 @@ public void testStacktrace() throws IOException, UserException { } public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserException { - setupLogging("json_layout"); - final Logger testLogger = LogManager.getLogger("test"); String json = "{\n" + @@ -185,12 +160,8 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserExce "}"; testLogger.error("error message " + json, new Exception(json)); - final String path = System.getProperty("es.logs.base_path") + - System.getProperty("file.separator") + - System.getProperty("es.logs.cluster_name") + - ".log"; - - try (Stream stream = JsonLogsStream.from(PathUtils.get(path))) { + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { List jsonLogs = stream.collect(Collectors.toList()); assertThat(jsonLogs, Matchers.contains( @@ -205,12 +176,16 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserExce } } + private Path clusterLogsPath() { + return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name"), ".log"); + } + private void setupLogging(final String config) throws IOException, UserException { setupLogging(config, Settings.EMPTY); } private void setupLogging(final String config, final Settings settings) throws IOException, UserException { - assert !Environment.PATH_HOME_SETTING.exists(settings); + assertFalse("Environment path.home variable should not be set", Environment.PATH_HOME_SETTING.exists(settings)); final Path configDir = getDataPath(config); final Settings mergedSettings = Settings.builder() .put(settings) diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index 362d734510e52..e42c260e3ab46 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -33,21 +33,29 @@ import java.nio.charset.Charset; import java.util.Map; +/** + * Formats log events as strings in a json format. + *

+ * The class is wrapping the {@link PatternLayout} with a pattern to format into json. This gives more flexibility and control over how the + * log messages are formatted in {@link org.apache.logging.log4j.core.layout.JsonLayout} + */ @Plugin(name = "ESJsonLayout", category = Node.CATEGORY, elementType = Layout.ELEMENT_TYPE, printObject = true) public class ESJsonLayout extends AbstractStringLayout { - /** - * type - the type of logs. These represent appenders and help docker distinguish log streams. - * timestamp - ISO8601 with additional timezone ID - * level - INFO, WARN etc - * component - logger name, most of the times class name - * cluster.name - taken from sys:es.logs.cluster_name system property because it is always set - * node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml - * node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present + * Fields used in a pattern to format a json log line: + *

    + *
  • type - the type of logs. These represent appenders and help docker distinguish log streams. + *
  • timestamp - ISO8601 with additional timezone ID + *
  • level - INFO, WARN etc + *
  • component - logger name, most of the times class name + *
  • cluster.name - taken from sys:es.logs.cluster_name system property because it is always set + *
  • node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml + *
  • node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present * once clusterStateUpdate is first received - * message - a json escaped message. Multiline messages will be converted to single line with new line explicitly replaced to \n - * exceptionAsJson - in json as stacktrace. Only present when throwable is passed as a parameter to a Logger. Taken from - * JsonThrowablePatternConverter + *
  • message - a json escaped message. Multiline messages will be converted to single line with new line explicitly replaced to \n + *
  • exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger. + * Taken from JsonThrowablePatternConverter + *
*/ private static final String PATTERN = "{" + "\"type\": \"${TYPE}\", " + @@ -101,6 +109,9 @@ public void encode(final LogEvent event, final ByteBufferDestination destination @Override public String toString() { - return patternLayout.toString(); + final StringBuilder sb = new StringBuilder("ESJsonLayout{"); + sb.append("patternLayout=").append(patternLayout); + sb.append('}'); + return sb.toString(); } } diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index a0b0f60fccd65..8cbff6bc21595 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -30,23 +30,24 @@ import java.util.StringJoiner; /** - * This is a modification of a @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter - *

+ * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] + * + * Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from + * LoggingEvent into a multiline string */ @Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY) @ConverterKeys({"exceptionAsJson"}) public final class JsonThrowablePatternConverter extends ThrowablePatternConverter { - private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private final ExtendedThrowablePatternConverter throwablePatternConverter; /** - * Private constructor. Parameters only used to configure wrapped throwablePatternConverter + * Private as only expected to be used by log4j2 newInstance method */ private JsonThrowablePatternConverter(final Configuration config, final String[] options) { - super("CustomExtendedThrowable", "throwable", options, config); + super("JsonThrowablePatternConverter", "throwable", options, config); this.throwablePatternConverter = ExtendedThrowablePatternConverter.newInstance(config, options); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 3c6f3721ad80a..c1df659872ede 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -24,8 +24,6 @@ import org.apache.logging.log4j.core.pattern.ConverterKeys; import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.common.util.LazyInitializable; import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; @@ -37,23 +35,13 @@ @Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") @ConverterKeys({"node_and_cluster_id"}) public final class NodeAndClusterIdConverter extends LogEventPatternConverter { - - private static LazyInitializable INSTANCE = - new LazyInitializable(NodeAndClusterIdConverter::new); - private static final AtomicReference nodeAndClusterIdsReference = new AtomicReference<>(); - private final CloseableThreadLocal nodeAndClusterIds = new CloseableThreadLocal(); - /** * Called by log4j2 to initialize this converter. */ public static NodeAndClusterIdConverter newInstance(final String[] options) { - try { - return INSTANCE.getOrCompute(); - } catch (Exception e) { - return null; - } + return new NodeAndClusterIdConverter(); } public NodeAndClusterIdConverter() { @@ -62,8 +50,9 @@ public NodeAndClusterIdConverter() { /** * Updates only once the clusterID and nodeId + * * @param clusterUUID a clusterId received from cluster state update - * @param nodeId a nodeId received from cluster state update + * @param nodeId a nodeId received from cluster state update * @return true if the update was for the first time (successful) or false if for another calls (does not updates) */ public static boolean setOnce(String clusterUUID, String nodeId) { @@ -72,20 +61,13 @@ public static boolean setOnce(String clusterUUID, String nodeId) { /** * Formats the node.id and cluster.uuid into json fields. - * If it reads these values for the first time - it will get them from AtomicReference nodeAndClusterIdsReference - * all succeeding calls of this method will read this from ThreadLocal nodeAndClusterIds - which is supposed to cache that value to - * avoid expensive AtomicReference read (TODO discuss the performance) + * * @param event - a log event is ignored in this method as it uses the nodeId and clusterId to format */ @Override public void format(LogEvent event, StringBuilder toAppendTo) { - if (nodeAndClusterIds.get() != null) { - //using local value - toAppendTo.append(nodeAndClusterIds.get()); - } else if (nodeAndClusterIdsReference.get() != null) { - //reading a value from the listener for the first time + if (nodeAndClusterIdsReference.get() != null) { toAppendTo.append(nodeAndClusterIdsReference.get()); - nodeAndClusterIds.set(nodeAndClusterIdsReference.get()); } // nodeId/clusterUuid not received yet, not appending } @@ -94,4 +76,3 @@ private static String formatIds(String clusterUUID, String nodeId) { return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\"", clusterUUID, nodeId); } } - diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index d096c4ee9c688..eec31246db5f6 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -30,7 +30,6 @@ * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter} */ public class NodeAndClusterIdStateListener implements ClusterStateListener { - private final Logger logger = LogManager.getLogger(getClass()); @Override @@ -42,7 +41,7 @@ public void clusterChanged(ClusterChangedEvent event) { boolean wasSet = NodeAndClusterIdConverter.setOnce(clusterUUID, nodeId); if (wasSet) { - logger.info("received first cluster state update. Setting nodeId={} and clusterUuid={}", nodeId, clusterUUID); + logger.info("Eeceived first cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); } } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 94b78a2abceac..6ec23890c87a8 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -185,7 +185,6 @@ * in order to use a {@link Client} to perform actions/operations against the cluster. */ public class Node implements Closeable { - public static final Setting WRITE_PORTS_FILE_SETTING = Setting.boolSetting("node.portsfile", false, Property.NodeScope); public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope); diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index a8a5b06d93ba9..b76d0dcf09c95 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -32,13 +32,11 @@ */ public class JsonThrowablePatternConverterTests extends ESTestCase { - JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); public void testNoStacktrace() throws IOException { LogEvent event = Log4jLogEvent.newBuilder() .build(); - String result = format(event); JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result))) @@ -48,7 +46,6 @@ public void testNoStacktrace() throws IOException { assertThat(jsonLogLine.stacktrace(), Matchers.nullValue()); } - public void testStacktraceWithJson() throws IOException { LogManager.getLogger().info("asdf"); diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index e05003793e36f..7e6151272a78d 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -35,13 +35,13 @@ /** * Tests that extend this class verify that all json layout fields appear in the first few log lines after startup - * Fields available straight away : type, timestamp, level, component, message, nodeName, clusterName - * nodeId and clusterId are available later once the clusterState was received. - * NodeName, ClusterName, NodeId, ClusterId should be consistent across all log lines - * Note that this won't pass for clusters that don't have - * the node name defined in elasticsearch.yml and start with - * DEBUG or TRACE level logging. Those nodes log a few lines before they - * resolve the node name. + * Fields available from the first log line: type, timestamp, level, component, message, nodeName, clusterName + * whereas: nodeId and clusterId are available later once the clusterState was received. + * + * NodeName, ClusterName, NodeId, ClusterId should not change across all log lines + * + * Note that this won't pass for nodes in clusters that don't have the node name defined in elasticsearch.yml and start + * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName */ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { /** @@ -51,7 +51,7 @@ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { private static final int LINES_TO_CHECK = 10; /** - * The node name to expect in the logs file. + * The node name to expect in the log file. */ protected abstract org.hamcrest.Matcher nodeNameMatcher(); @@ -62,7 +62,6 @@ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { */ protected abstract BufferedReader openReader(Path logFile); - public void testElementsPresentOnAllLinesOfLog() throws IOException { JsonLogLine firstLine = findFirstLine(); @@ -76,7 +75,7 @@ public void testElementsPresentOnAllLinesOfLog() throws IOException { assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); // all lines should have the same nodeName and clusterName - assertThat(jsonLogLine.nodeName(), equalTo(firstLine.nodeName())); + assertThat(jsonLogLine.nodeName(), equalTo(nodeNameMatcher())); assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); }); } @@ -115,7 +114,7 @@ public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") private Path getLogFile() { String logFileString = System.getProperty("tests.logfile"); - if (null == logFileString) { + if (logFileString == null) { fail("tests.logfile must be set to run this test. It is automatically " + "set by gradle. If you must set it yourself then it should be the absolute path to the " + "log file."); diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java index d274b36314e2f..1a8743c3af5a8 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java @@ -61,10 +61,10 @@ public static Stream from(Path path) throws IOException { public Stream stream() { Spliterator spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED); return StreamSupport.stream(spliterator, false) - .onClose(this::close); + .onClose(this::close); } - private void close() { + private void close() { try { parser.close(); reader.close(); @@ -77,7 +77,7 @@ private class JsonIterator implements Iterator { @Override public boolean hasNext() { - return !parser.isClosed(); + return parser.isClosed() == false; } @Override From 0e84d027377f8a84af98048d536ace7f4d30ef02 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 14 Jan 2019 19:42:15 +0100 Subject: [PATCH 31/64] failing test --- distribution/src/config/log4j2.properties | 2 +- .../common/logging/JsonLoggerTests.java | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 3c2ce5dce86f3..ef54b60c5b88b 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -11,7 +11,7 @@ appender.console.layout.type_name = console appender.rolling.type = RollingFile appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log appender.rolling.layout.type = ESJsonLayout appender.rolling.layout.type_name = main diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 04c0144087d42..7ba5f8610f922 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -73,7 +73,7 @@ public void testJsonLayout() throws IOException, UserException { testLogger.trace("This is a trace message"); final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { - List jsonLogs = stream.collect(Collectors.toList()); + List jsonLogs = collectLines(stream); assertThat(jsonLogs, Matchers.contains( logLine("file", Level.ERROR, "sample-name", "test", "This is an error message"), @@ -83,7 +83,6 @@ public void testJsonLayout() throws IOException, UserException { logLine("file", Level.TRACE, "sample-name", "test", "This is a trace message") )); } - } @SuppressWarnings("unchecked") @@ -96,7 +95,7 @@ public void testPrefixLoggerInJson() throws IOException, UserException { final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { - List jsonLogs = stream.collect(Collectors.toList()); + List jsonLogs = collectLines(stream); assertThat(jsonLogs, Matchers.contains( logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"), logLine("file", Level.INFO, "sample-name", "prefixLogger", "PREFIX This is an info message with a prefix") @@ -121,7 +120,7 @@ public void testJsonInMessage() throws IOException, UserException { final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { - List jsonLogs = stream.collect(Collectors.toList()); + List jsonLogs = collectLines(stream); assertThat(jsonLogs, Matchers.contains( logLine("file", Level.INFO, "sample-name", "test", json) )); @@ -134,7 +133,7 @@ public void testStacktrace() throws IOException, UserException { final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { - List jsonLogs = stream.collect(Collectors.toList()); + List jsonLogs = collectLines(stream); assertThat(jsonLogs, Matchers.contains( Matchers.allOf( logLine("file", Level.ERROR, "sample-name", "test", "error message"), @@ -162,7 +161,7 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserExce final Path path = clusterLogsPath(); try (Stream stream = JsonLogsStream.from(path)) { - List jsonLogs = stream.collect(Collectors.toList()); + List jsonLogs = collectLines(stream); assertThat(jsonLogs, Matchers.contains( Matchers.allOf( @@ -176,8 +175,14 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserExce } } + private List collectLines(Stream stream) { + return stream + .skip(1)//skip the first line from super class + .collect(Collectors.toList()); + } + private Path clusterLogsPath() { - return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name"), ".log"); + return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name") + ".log"); } private void setupLogging(final String config) throws IOException, UserException { From 490b56dbcf9615e4a931dc30f0bcd230393395af Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 14 Jan 2019 19:42:58 +0100 Subject: [PATCH 32/64] unused imports --- .../common/logging/JsonLoggerTests.java | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 7ba5f8610f922..6609d2731e379 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.config.Configurator; -import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -63,7 +62,7 @@ public void tearDown() throws Exception { } @SuppressWarnings("unchecked") - public void testJsonLayout() throws IOException, UserException { + public void testJsonLayout() throws IOException { final Logger testLogger = LogManager.getLogger("test"); testLogger.error("This is an error message"); @@ -86,7 +85,7 @@ public void testJsonLayout() throws IOException, UserException { } @SuppressWarnings("unchecked") - public void testPrefixLoggerInJson() throws IOException, UserException { + public void testPrefixLoggerInJson() throws IOException { Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); shardIdLogger.info("This is an info message with a shardId"); @@ -103,7 +102,7 @@ public void testPrefixLoggerInJson() throws IOException, UserException { } } - public void testJsonInMessage() throws IOException, UserException { + public void testJsonInMessage() throws IOException { final Logger testLogger = LogManager.getLogger("test"); String json = "{\n" + " \"terms\" : {\n" + @@ -127,7 +126,7 @@ public void testJsonInMessage() throws IOException, UserException { } } - public void testStacktrace() throws IOException, UserException { + public void testStacktrace() throws IOException { final Logger testLogger = LogManager.getLogger("test"); testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message"))); @@ -144,7 +143,7 @@ public void testStacktrace() throws IOException, UserException { } } - public void testJsonInStacktraceMessageIsSplitted() throws IOException, UserException { + public void testJsonInStacktraceMessageIsSplitted() throws IOException { final Logger testLogger = LogManager.getLogger("test"); String json = "{\n" + @@ -185,11 +184,11 @@ private Path clusterLogsPath() { return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name") + ".log"); } - private void setupLogging(final String config) throws IOException, UserException { + private void setupLogging(final String config) throws IOException { setupLogging(config, Settings.EMPTY); } - private void setupLogging(final String config, final Settings settings) throws IOException, UserException { + private void setupLogging(final String config, final Settings settings) throws IOException { assertFalse("Environment path.home variable should not be set", Environment.PATH_HOME_SETTING.exists(settings)); final Path configDir = getDataPath(config); final Settings mergedSettings = Settings.builder() From 1119f5e9606086ab7193f23384ef2be6019a7fae Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 14 Jan 2019 20:35:41 +0100 Subject: [PATCH 33/64] failing tests --- qa/die-with-dignity/build.gradle | 2 +- .../org/elasticsearch/common/logging/JsonLoggerTests.java | 5 +++-- x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 26d567ca3ef6c..ae1acf3285b73 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -28,7 +28,7 @@ integTestRunner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}.log" + systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.log" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 6609d2731e379..c8bd8b55eea55 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -24,6 +24,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.config.Configurator; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -184,11 +185,11 @@ private Path clusterLogsPath() { return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name") + ".log"); } - private void setupLogging(final String config) throws IOException { + private void setupLogging(final String config) throws IOException, UserException { setupLogging(config, Settings.EMPTY); } - private void setupLogging(final String config, final Settings settings) throws IOException { + private void setupLogging(final String config, final Settings settings) throws IOException, UserException { assertFalse("Environment path.home variable should not be set", Environment.PATH_HOME_SETTING.exists(settings)); final Path configDir = getDataPath(config); final Settings mergedSettings = Settings.builder() diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index 9147d5251b5be..e3fabfa8fba89 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -32,7 +32,7 @@ task writeJavaPolicy { javaPolicy.write( [ "grant {", - " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log\", \"read\";", + " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.log\", \"read\";", "};" ].join("\n")) } @@ -54,7 +54,7 @@ followClusterTestRunner { systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log" + systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.log" finalizedBy 'leaderClusterTestCluster#stop' } From 66b14206d9dcdb0d2bd3b0b7a2aafa42a4c0662c Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 14 Jan 2019 21:29:56 +0100 Subject: [PATCH 34/64] rename test log name --- distribution/archives/integ-test-zip/build.gradle | 2 +- qa/unconfigured-node-name/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 30fa4d3c03805..fde5345338463 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -27,7 +27,7 @@ integTestRunner { */ if (System.getProperty("tests.rest.cluster") == null) { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" } else { systemProperty 'tests.logfile', '--external--' } diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index f8fb696e8ca85..033a067ed885f 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -30,5 +30,5 @@ integTestCluster { integTestRunner { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" } From 1f91bad17b59117a281cb7c237ae2830dd3100be Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 15 Jan 2019 08:22:43 +0100 Subject: [PATCH 35/64] method rename --- .../elasticsearch/qa/die_with_dignity/DieWithDignityIT.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 14b8c5809386a..e0dcc06cf573c 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -104,7 +104,7 @@ public void testDieWithDignity() throws Exception { final JsonLogLine line = it.next(); if (isFatalError(line)) { fatalError = true; - } else if (isFatalErrorInThreadExiting(line) || isWarnExceptionReceived(line)) { + } else if (isFatalErrorInThreadExiting(line) || isErrorExceptionReceived(line)) { fatalErrorInThreadExiting = true; assertThat(line.stacktrace(), hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); @@ -121,7 +121,7 @@ public void testDieWithDignity() throws Exception { } } - private boolean isWarnExceptionReceived(JsonLogLine line) { + private boolean isErrorExceptionReceived(JsonLogLine line) { return line.level().equals("ERROR") && line.component().equals("o.e.h.AbstractHttpServerTransport") && line.nodeName().equals("node-0") From bcf5f859971b47d3f856de79055b71982edb46e4 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 15 Jan 2019 08:34:21 +0100 Subject: [PATCH 36/64] rename name to server --- distribution/docker/src/docker/config/log4j2.properties | 2 +- distribution/src/config/log4j2.properties | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index a870435ba6818..69b97715b001c 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -7,7 +7,7 @@ logger.action.level = debug appender.rolling.type = Console appender.rolling.name = rolling appender.rolling.layout.type = ESJsonLayout -appender.rolling.layout.type_name = main +appender.rolling.layout.type_name = server rootLogger.level = info rootLogger.appenderRef.rolling.ref = rolling diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index ef54b60c5b88b..ca79f98b6c2b9 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -13,7 +13,7 @@ appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log appender.rolling.layout.type = ESJsonLayout -appender.rolling.layout.type_name = main +appender.rolling.layout.type_name = server appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz appender.rolling.policies.type = Policies From c7bd58a07f4633f621a7a0486b19907cf9d50a95 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 15 Jan 2019 09:00:44 +0100 Subject: [PATCH 37/64] rename revert and level corrected --- .../elasticsearch/qa/die_with_dignity/DieWithDignityIT.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index e0dcc06cf573c..16398b380cfe1 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -104,7 +104,7 @@ public void testDieWithDignity() throws Exception { final JsonLogLine line = it.next(); if (isFatalError(line)) { fatalError = true; - } else if (isFatalErrorInThreadExiting(line) || isErrorExceptionReceived(line)) { + } else if (isFatalErrorInThreadExiting(line) || isWarnExceptionReceived(line)) { fatalErrorInThreadExiting = true; assertThat(line.stacktrace(), hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); @@ -121,8 +121,8 @@ public void testDieWithDignity() throws Exception { } } - private boolean isErrorExceptionReceived(JsonLogLine line) { - return line.level().equals("ERROR") + private boolean isWarnExceptionReceived(JsonLogLine line) { + return line.level().equals("WARN") && line.component().equals("o.e.h.AbstractHttpServerTransport") && line.nodeName().equals("node-0") && line.message().contains("caught exception while handling client http traffic"); From 66c19429ea1a77c0eb73b7614166cbe90fb0b780 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 15 Jan 2019 10:36:23 +0100 Subject: [PATCH 38/64] wrong assertion --- .../org/elasticsearch/common/logging/JsonLogsIntegTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index 7e6151272a78d..c6631b312aa47 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -75,7 +75,7 @@ public void testElementsPresentOnAllLinesOfLog() throws IOException { assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); // all lines should have the same nodeName and clusterName - assertThat(jsonLogLine.nodeName(), equalTo(nodeNameMatcher())); + assertThat(jsonLogLine.nodeName(), nodeNameMatcher()); assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); }); } From b84cf9ad2b1c3414a711f96aab1a0ab82bed6858 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 15 Jan 2019 11:02:15 +0100 Subject: [PATCH 39/64] rename log name files in package tests --- qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats | 2 +- qa/vagrant/src/test/resources/packaging/utils/utils.bash | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats index db062eb337e74..a0cc58f644b5d 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats @@ -98,7 +98,7 @@ setup() { systemctl start elasticsearch.service wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - assert_file_exist "/var/log/elasticsearch/elasticsearch.log" + assert_file_exist "/var/log/elasticsearch/elasticsearch_server.log" # Converts the epoch back in a human readable format run date --date=@$epoch "+%Y-%m-%d %H:%M:%S" diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 18363a5ac6241..5924a475b7c36 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -439,7 +439,7 @@ describe_port() { } debug_collect_logs() { - local es_logfile="$ESLOG/elasticsearch.log" + local es_logfile="$ESLOG/elasticsearch_server.log" local system_logfile='/var/log/messages' if [ -e "$es_logfile" ]; then From 12677cf77a4c4956990069565448018880e52c22 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 15 Jan 2019 16:13:23 +0100 Subject: [PATCH 40/64] addressing Daniels' second round of comments --- .../common/logging/ESJsonLayout.java | 23 ++++++++++--------- .../JsonThrowablePatternConverter.java | 3 +-- .../logging/NodeAndClusterIdConverter.java | 16 ++++++------- .../NodeAndClusterIdStateListener.java | 2 +- .../common/logging/JsonLogsIntegTestCase.java | 2 +- .../common/logging/JsonLogsStream.java | 5 ++-- 6 files changed, 25 insertions(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java index e42c260e3ab46..af7cd81f202e3 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -44,17 +44,18 @@ public class ESJsonLayout extends AbstractStringLayout { /** * Fields used in a pattern to format a json log line: *

    - *
  • type - the type of logs. These represent appenders and help docker distinguish log streams. - *
  • timestamp - ISO8601 with additional timezone ID - *
  • level - INFO, WARN etc - *
  • component - logger name, most of the times class name - *
  • cluster.name - taken from sys:es.logs.cluster_name system property because it is always set - *
  • node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml - *
  • node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present - * once clusterStateUpdate is first received - *
  • message - a json escaped message. Multiline messages will be converted to single line with new line explicitly replaced to \n - *
  • exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger. - * Taken from JsonThrowablePatternConverter + *
  • type - the type of logs. These represent appenders and help docker distinguish log streams.
  • + *
  • timestamp - ISO8601 with additional timezone ID
  • + *
  • level - INFO, WARN etc
  • + *
  • component - logger name, most of the times class name
  • + *
  • cluster.name - taken from sys:es.logs.cluster_name system property because it is always set
  • + *
  • node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml
  • + *
  • node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present + * once clusterStateUpdate is first received
  • + *
  • message - a json escaped message. Multiline messages will be converted to single line with new line explicitly + * replaced to \n
  • + *
  • exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger. + * Taken from JsonThrowablePatternConverter
  • *
*/ private static final String PATTERN = "{" + diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java index 8cbff6bc21595..97e712512317b 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -40,7 +40,6 @@ @Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY) @ConverterKeys({"exceptionAsJson"}) public final class JsonThrowablePatternConverter extends ThrowablePatternConverter { - private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private final ExtendedThrowablePatternConverter throwablePatternConverter; /** @@ -95,7 +94,7 @@ private String formatJson(String consoleStacktrace) { } private String wrapAsJson(String line) { - byte[] bytes = JSON_STRING_ENCODER.quoteAsUTF8(line); + byte[] bytes = JsonStringEncoder.getInstance().quoteAsUTF8(line); return "\"" + new String(bytes, Charset.defaultCharset()) + "\""; } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index c1df659872ede..130a8e1aadf69 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -29,23 +29,23 @@ import java.util.concurrent.atomic.AtomicReference; /** - * Pattern converter to format the node_and_cluster_id variable into a json fields node.id and cluster.uuid - * Keeping those two fields together assures that the will be atomically set and become visible in logs at the same time + * Pattern converter to format the node_and_cluster_id variable into a json fields node.id and cluster.uuid. + * Keeping those two fields together assures that they will be atomically set and become visible in logs at the same time. */ @Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") @ConverterKeys({"node_and_cluster_id"}) public final class NodeAndClusterIdConverter extends LogEventPatternConverter { - private static final AtomicReference nodeAndClusterIdsReference = new AtomicReference<>(); + private static final AtomicReference nodeAndClusterId = new AtomicReference<>(); /** * Called by log4j2 to initialize this converter. */ - public static NodeAndClusterIdConverter newInstance(final String[] options) { + public static NodeAndClusterIdConverter newInstance(@SuppressWarnings("unused") final String[] options) { return new NodeAndClusterIdConverter(); } public NodeAndClusterIdConverter() { - super("NodeName", "node_and_cluster_id"); + super("NodeAndClusterId", "node_and_cluster_id"); } /** @@ -56,7 +56,7 @@ public NodeAndClusterIdConverter() { * @return true if the update was for the first time (successful) or false if for another calls (does not updates) */ public static boolean setOnce(String clusterUUID, String nodeId) { - return nodeAndClusterIdsReference.compareAndSet(null, formatIds(clusterUUID, nodeId)); + return nodeAndClusterId.compareAndSet(null, formatIds(clusterUUID, nodeId)); } /** @@ -66,8 +66,8 @@ public static boolean setOnce(String clusterUUID, String nodeId) { */ @Override public void format(LogEvent event, StringBuilder toAppendTo) { - if (nodeAndClusterIdsReference.get() != null) { - toAppendTo.append(nodeAndClusterIdsReference.get()); + if (nodeAndClusterId.get() != null) { + toAppendTo.append(nodeAndClusterId.get()); } // nodeId/clusterUuid not received yet, not appending } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index eec31246db5f6..f42012174fe56 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -41,7 +41,7 @@ public void clusterChanged(ClusterChangedEvent event) { boolean wasSet = NodeAndClusterIdConverter.setOnce(clusterUUID, nodeId); if (wasSet) { - logger.info("Eeceived first cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); + logger.debug("Received first cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index c6631b312aa47..a611547d81324 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -45,7 +45,7 @@ */ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { /** - * Number of lines in the log file to check for the node_name, node.id or cluster.uuid. We don't + * Number of lines in the log file to check for the node.name, node.id or cluster.uuid. We don't * just check the entire log file because it could be quite long */ private static final int LINES_TO_CHECK = 10; diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java index 1a8743c3af5a8..28ad649f55a79 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java @@ -40,7 +40,6 @@ * This is intended to be used for easy and readable assertions for logger tests */ public class JsonLogsStream { - private final XContentParser parser; private final BufferedReader reader; @@ -55,10 +54,10 @@ public static Stream from(BufferedReader reader) throws IOException } public static Stream from(Path path) throws IOException { - return new JsonLogsStream(Files.newBufferedReader(path)).stream(); + return from(Files.newBufferedReader(path)); } - public Stream stream() { + private Stream stream() { Spliterator spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED); return StreamSupport.stream(spliterator, false) .onClose(this::close); From 5d78edfcbf0c7bbc4f75e7057bf76c7b90028b55 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 16 Jan 2019 15:10:24 +0100 Subject: [PATCH 41/64] javadocs --- docs/reference/setup/logging-config.asciidoc | 88 +++++++++++++------ .../NodeAndClusterIdStateListener.java | 2 +- .../common/logging/JsonLogsIntegTestCase.java | 11 ++- 3 files changed, 71 insertions(+), 30 deletions(-) diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index f477a14bb6d3d..9bcae8953d062 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -22,41 +22,44 @@ will resolve to `/var/log/elasticsearch/production.log`. -------------------------------------------------- appender.rolling.type = RollingFile <1> appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log <2> -appender.rolling.layout.type = PatternLayout -appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz <3> +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <2> +appender.rolling.layout.type = ESJsonLayout <3> +appender.rolling.layout.type_name = server <4> +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz <5> appender.rolling.policies.type = Policies -appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4> -appender.rolling.policies.time.interval = 1 <5> -appender.rolling.policies.time.modulate = true <6> -appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <7> -appender.rolling.policies.size.size = 256MB <8> +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <6> +appender.rolling.policies.time.interval = 1 <7> +appender.rolling.policies.time.modulate = true <8> +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <9> +appender.rolling.policies.size.size = 256MB <10> appender.rolling.strategy.type = DefaultRolloverStrategy appender.rolling.strategy.fileIndex = nomax -appender.rolling.strategy.action.type = Delete <9> +appender.rolling.strategy.action.type = Delete <11> appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} -appender.rolling.strategy.action.condition.type = IfFileName <10> -appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <11> -appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <12> -appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <13> +appender.rolling.strategy.action.condition.type = IfFileName <12> +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <13> +appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <14> +appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> -------------------------------------------------- <1> Configure the `RollingFile` appender <2> Log to `/var/log/elasticsearch/production.log` -<3> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.log`; logs +<3> Use JSON layout. +<4> `type_name` is a flag populating the `type` field in a `ESJsonLayout`. + It can be used to distinguish different types of logs more easily when parsing them. +<5> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.log`; logs will be compressed on each roll and `i` will be incremented -<4> Use a time-based roll policy -<5> Roll logs on a daily basis -<6> Align rolls on the day boundary (as opposed to rolling every twenty-four +<6> Use a time-based roll policy +<7> Roll logs on a daily basis +<8> Align rolls on the day boundary (as opposed to rolling every twenty-four hours) -<7> Using a size-based roll policy -<8> Roll logs after 256 MB -<9> Use a delete action when rolling logs -<10> Only delete logs matching a file pattern -<11> The pattern is to only delete the main logs -<12> Only delete if we have accumulated too many compressed logs -<13> The size condition on the compressed logs is 2 GB +<9> Using a size-based roll policy +<10> Roll logs after 256 MB +<11> Use a delete action when rolling logs +<12> Only delete logs matching a file pattern +<13> The pattern is to only delete the main logs +<14> Only delete if we have accumulated too many compressed logs +<15> The size condition on the compressed logs is 2 GB NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace; if you copy and paste any Log4j settings on this page, or enter any Log4j @@ -194,3 +197,38 @@ files (four rolled logs, and the active log). You can disable it in the `config/log4j2.properties` file by setting the deprecation log level to `error`. + + +[float] +[[json-logging]] +=== JSON log format + +To make parsing Elasticsearch logs easier, logs are now printed in a JSON format. +This is configured by a Log4J layout property `appender.rolling.layout.type = ESJsonLayout`. +This layout requires a `type_name` attribute to be set which is used to distinguish +logs streams when parsing. +[source,properties] +-------------------------------------------------- +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = server +-------------------------------------------------- +:es-json-layout-java-doc: {elasticsearch-javadoc}/org/elasticsearch/common/logging/ESJsonLayout.html + +Each line contains a single JSON document with the properties configured in `ESJsonLayout`. +See this class {es-json-layout-java-doc}[javadoc] for more details. +However if a JSON document contains exception, it will be formatted as a multiline. +First line will contain regular properties and subsequent lines will contain +stacktrace formatted as a JSON array. + + +NOTE: You can still use your own custom layout. To do that replace the line +`appender.rolling.layout.type` with a different layout. See sample below: +[source,properties] +-------------------------------------------------- +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +-------------------------------------------------- \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index f42012174fe56..c31038736d2b2 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -30,7 +30,7 @@ * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter} */ public class NodeAndClusterIdStateListener implements ClusterStateListener { - private final Logger logger = LogManager.getLogger(getClass()); + private final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class); @Override public void clusterChanged(ClusterChangedEvent event) { diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index a611547d81324..77162316a28b6 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -35,13 +35,16 @@ /** * Tests that extend this class verify that all json layout fields appear in the first few log lines after startup - * Fields available from the first log line: type, timestamp, level, component, message, nodeName, clusterName - * whereas: nodeId and clusterId are available later once the clusterState was received. + * Fields available upon process startup: type, timestamp, level, component, + * message, node.name, cluster.name. + * Whereas node.id and cluster.uuid are available later once the first clusterState has been received. * - * NodeName, ClusterName, NodeId, ClusterId should not change across all log lines + * + * node.name, cluster.name, node.id, cluster.uuid + * should not change across all log lines * * Note that this won't pass for nodes in clusters that don't have the node name defined in elasticsearch.yml and start - * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName + * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName. */ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { /** From 4cbea2bc3359fa1e178bfea7a67fd15d2f523ccd Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 17 Jan 2019 16:57:59 +0100 Subject: [PATCH 42/64] additional test verifing old config move jsonLoggerTest to separate module to avoid clash with EvilLoggerTests --- .../test/rest/LogsWithCustomPatternIT.java | 48 ++++++++++++ qa/logging-config/build.gradle | 39 ++++++++++ qa/logging-config/custom-log4j2.properties | 31 ++++++++ .../common/logging/JsonLoggerTests.java | 4 + .../custom_logging/CustomLoggingConfigIT.java | 78 +++++++++++++++++++ .../logging/json_layout/log4j2.properties | 0 .../src/test/resources/plugin-security.policy | 4 + 7 files changed, 204 insertions(+) create mode 100644 distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java create mode 100644 qa/logging-config/build.gradle create mode 100644 qa/logging-config/custom-log4j2.properties rename qa/{evil-tests => logging-config}/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java (97%) create mode 100644 qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java rename qa/{evil-tests => logging-config}/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties (100%) create mode 100644 qa/logging-config/src/test/resources/plugin-security.policy diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java new file mode 100644 index 0000000000000..75b8a06d09568 --- /dev/null +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.common.logging.JsonLogsIntegTestCase; +import org.hamcrest.Matcher; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import static org.hamcrest.Matchers.is; + +public class LogsWithCustomPatternIT extends ESRestTestCase { + + protected BufferedReader openReader(Path logFile) { + assumeFalse("Skipping test because it is being run against an external cluster.", + logFile.getFileName().toString().equals("--external--")); + return AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } +} diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle new file mode 100644 index 0000000000000..ea5822c4c8226 --- /dev/null +++ b/qa/logging-config/build.gradle @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.standalone-test' + +integTestCluster { + autoSetInitialMasterNodes = false + autoSetHostsProvider = false + // overriding the distribution log4j configuration file with a custom configuration + extraConfigFile 'log4j2.properties', 'custom-log4j2.properties' +} + +integTestRunner { + systemProperty 'tests.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" +} + +unitTest { + systemProperty 'tests.security.manager', 'false' +} \ No newline at end of file diff --git a/qa/logging-config/custom-log4j2.properties b/qa/logging-config/custom-log4j2.properties new file mode 100644 index 0000000000000..e45fb641b51c8 --- /dev/null +++ b/qa/logging-config/custom-log4j2.properties @@ -0,0 +1,31 @@ + +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern =%notEmpty{%node_name} %notEmpty{%node_and_cluster_id} %notEmpty{${sys:es.logs.cluster_name}} %m%n + +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 128MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.fileIndex = nomax +appender.rolling.strategy.action.type = Delete +appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling.strategy.action.condition.type = IfFileName +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling \ No newline at end of file diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java similarity index 97% rename from qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java rename to qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index c8bd8b55eea55..b484ba90a4da3 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -41,6 +41,10 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +/** + * This test confirms JSON log structure is properly formatted and can be parsed. + * It has to be in a org.elasticsearch.common.logging package to use PrefixLogger + */ public class JsonLoggerTests extends ESTestCase { @BeforeClass diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java new file mode 100644 index 0000000000000..0939336b8a1f4 --- /dev/null +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.qa.custom_logging; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Stream; + +/** + * This test verifies that Elasticsearch can startup successfully with a custom logging config using variables introduced in + * ESJsonLayout + * The intention is to confirm that users can still run their Elasticsearch instances with previous configurations. + */ +public class CustomLoggingConfigIT extends ESRestTestCase { + + Pattern LINE_STARTED = Pattern.compile(".*node-0 \"cluster.uuid\": \"\\w*\", \"node.id\": \"\\w*\" \\w* started.*"); + + public void testSuccessfulStartupWithCustomConfig() { + Stream stringStream = openReader(getLogFile()); + + boolean startedLineFound = stringStream.anyMatch(line -> isStartupLine(line)); + assertTrue("Log line indicating successful startup not found", startedLineFound); + } + + private boolean isStartupLine(String line) { + Matcher matcher = LINE_STARTED.matcher(line); + return matcher.matches(); + } + + protected Stream openReader(Path logFile) { + return AccessController.doPrivileged((PrivilegedAction>) () -> { + try { + return Files.lines(logFile, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } + + @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") + private Path getLogFile() { + String logFileString = System.getProperty("tests.logfile"); + if (logFileString == null) { + fail("tests.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the " + + "log file."); + } + return Paths.get(logFileString); + } +} diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties similarity index 100% rename from qa/evil-tests/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties rename to qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties diff --git a/qa/logging-config/src/test/resources/plugin-security.policy b/qa/logging-config/src/test/resources/plugin-security.policy new file mode 100644 index 0000000000000..d0d865c4ede16 --- /dev/null +++ b/qa/logging-config/src/test/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Needed to read the log file + permission java.io.FilePermission "${tests.logfile}", "read"; +}; From f780f749029f1198ca9060ce8580b15bb0e1f537 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 17 Jan 2019 17:26:40 +0100 Subject: [PATCH 43/64] unused import --- .../org/elasticsearch/test/rest/LogsWithCustomPatternIT.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java index 75b8a06d09568..39e6e9fceee25 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java @@ -19,9 +19,6 @@ package org.elasticsearch.test.rest; -import org.elasticsearch.common.logging.JsonLogsIntegTestCase; -import org.hamcrest.Matcher; - import java.io.BufferedReader; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -30,8 +27,6 @@ import java.security.AccessController; import java.security.PrivilegedAction; -import static org.hamcrest.Matchers.is; - public class LogsWithCustomPatternIT extends ESRestTestCase { protected BufferedReader openReader(Path logFile) { From 8c3c7660ae31c7d9d2e61e49dc8b6eb181c452e0 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 17 Jan 2019 17:46:37 +0100 Subject: [PATCH 44/64] empty unused test --- .../test/rest/LogsWithCustomPatternIT.java | 43 ------------------- 1 file changed, 43 deletions(-) delete mode 100644 distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java deleted file mode 100644 index 39e6e9fceee25..0000000000000 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/LogsWithCustomPatternIT.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.rest; - -import java.io.BufferedReader; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; - -public class LogsWithCustomPatternIT extends ESRestTestCase { - - protected BufferedReader openReader(Path logFile) { - assumeFalse("Skipping test because it is being run against an external cluster.", - logFile.getFileName().toString().equals("--external--")); - return AccessController.doPrivileged((PrivilegedAction) () -> { - try { - return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - } -} From c3ebfc071b97a40e3df4794b7ece10387bf5e43f Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 18 Jan 2019 09:19:28 +0100 Subject: [PATCH 45/64] small fixes after review --- qa/logging-config/build.gradle | 7 +++++-- qa/logging-config/custom-log4j2.properties | 2 +- .../qa/custom_logging/CustomLoggingConfigIT.java | 11 ++++++----- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index ea5822c4c8226..5efcc474d009b 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -25,7 +25,10 @@ apply plugin: 'elasticsearch.standalone-test' integTestCluster { autoSetInitialMasterNodes = false autoSetHostsProvider = false - // overriding the distribution log4j configuration file with a custom configuration + /** + * The intention is to provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch + * can successfully startup. see CustomLoggingIT. + */ extraConfigFile 'log4j2.properties', 'custom-log4j2.properties' } @@ -36,4 +39,4 @@ integTestRunner { unitTest { systemProperty 'tests.security.manager', 'false' -} \ No newline at end of file +} diff --git a/qa/logging-config/custom-log4j2.properties b/qa/logging-config/custom-log4j2.properties index e45fb641b51c8..b225d7cd550cf 100644 --- a/qa/logging-config/custom-log4j2.properties +++ b/qa/logging-config/custom-log4j2.properties @@ -28,4 +28,4 @@ appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulated appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB rootLogger.level = info -rootLogger.appenderRef.rolling.ref = rolling \ No newline at end of file +rootLogger.appenderRef.rolling.ref = rolling diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java index 0939336b8a1f4..3df9d36c013e1 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -41,21 +41,22 @@ */ public class CustomLoggingConfigIT extends ESRestTestCase { - Pattern LINE_STARTED = Pattern.compile(".*node-0 \"cluster.uuid\": \"\\w*\", \"node.id\": \"\\w*\" \\w* started.*"); + private static final Pattern NODE_STARTED = Pattern.compile( + ".*node-0 \"cluster.uuid\": \"\\w*\", \"node.id\": \"\\w*\" \\w* started.*"); public void testSuccessfulStartupWithCustomConfig() { Stream stringStream = openReader(getLogFile()); - boolean startedLineFound = stringStream.anyMatch(line -> isStartupLine(line)); - assertTrue("Log line indicating successful startup not found", startedLineFound); + assertTrue("Log line indicating successful startup not found", + stringStream.anyMatch(line -> isStartupLine(line))); } private boolean isStartupLine(String line) { - Matcher matcher = LINE_STARTED.matcher(line); + Matcher matcher = NODE_STARTED.matcher(line); return matcher.matches(); } - protected Stream openReader(Path logFile) { + private Stream openReader(Path logFile) { return AccessController.doPrivileged((PrivilegedAction>) () -> { try { return Files.lines(logFile, StandardCharsets.UTF_8); From 18aca44293df1a2d8b3b84a341a681ee1b5e7d5a Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 18 Jan 2019 10:46:43 +0100 Subject: [PATCH 46/64] comment cleanup after review --- qa/logging-config/build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index 5efcc474d009b..0abdc1247514a 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -26,8 +26,8 @@ integTestCluster { autoSetInitialMasterNodes = false autoSetHostsProvider = false /** - * The intention is to provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch - * can successfully startup. see CustomLoggingIT. + * Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch + * can successfully startup. */ extraConfigFile 'log4j2.properties', 'custom-log4j2.properties' } From a4d9336740816067d4baad62e3bc41c862600369 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 18 Jan 2019 15:03:45 +0100 Subject: [PATCH 47/64] documentation and licence fix --- docs/reference/setup/logging-config.asciidoc | 4 +-- .../JsonThrowablePatternConverterTests.java | 29 ++++++++++--------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index 9bcae8953d062..360984e38fac6 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -216,8 +216,8 @@ appender.rolling.layout.type_name = server Each line contains a single JSON document with the properties configured in `ESJsonLayout`. See this class {es-json-layout-java-doc}[javadoc] for more details. -However if a JSON document contains exception, it will be formatted as a multiline. -First line will contain regular properties and subsequent lines will contain +However if a JSON document contains an exception, it will be printted over multiple lines. +The first line will contain regular properties and subsequent lines will contain the stacktrace formatted as a JSON array. diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index b76d0dcf09c95..d72b598f02865 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -1,17 +1,3 @@ -package org.elasticsearch.common.logging; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.LogEvent; -import org.apache.logging.log4j.core.impl.Log4jLogEvent; -import org.apache.logging.log4j.message.SimpleMessage; -import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.StringReader; - -import static org.hamcrest.Matchers.equalTo; /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -31,6 +17,21 @@ * under the License. */ +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.impl.Log4jLogEvent; +import org.apache.logging.log4j.message.SimpleMessage; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.StringReader; + +import static org.hamcrest.Matchers.equalTo; + public class JsonThrowablePatternConverterTests extends ESTestCase { JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); From 6bc7d1cca9cc652aef8f65d5b17dd44be442b0dd Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 18 Jan 2019 16:08:33 +0100 Subject: [PATCH 48/64] typo printted -> printed --- docs/reference/setup/logging-config.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index 360984e38fac6..2054b69c9b919 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -216,7 +216,7 @@ appender.rolling.layout.type_name = server Each line contains a single JSON document with the properties configured in `ESJsonLayout`. See this class {es-json-layout-java-doc}[javadoc] for more details. -However if a JSON document contains an exception, it will be printted over multiple lines. +However if a JSON document contains an exception, it will be printed over multiple lines. The first line will contain regular properties and subsequent lines will contain the stacktrace formatted as a JSON array. From 7c208c8901c59e60a5f01aca1fd0606589df1b7d Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 21 Jan 2019 09:55:06 +0100 Subject: [PATCH 49/64] setOnce argument ordering test pattern simplification --- .../qa/custom_logging/CustomLoggingConfigIT.java | 3 +-- .../common/logging/NodeAndClusterIdConverter.java | 4 ++-- .../common/logging/NodeAndClusterIdStateListener.java | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java index 3df9d36c013e1..04144420e250e 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -40,9 +40,8 @@ * The intention is to confirm that users can still run their Elasticsearch instances with previous configurations. */ public class CustomLoggingConfigIT extends ESRestTestCase { - private static final Pattern NODE_STARTED = Pattern.compile( - ".*node-0 \"cluster.uuid\": \"\\w*\", \"node.id\": \"\\w*\" \\w* started.*"); + ".*node-0 \"cluster.uuid\": \"\\w*\", \"node.id\": \"\\w*\".*started.*"); public void testSuccessfulStartupWithCustomConfig() { Stream stringStream = openReader(getLogFile()); diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 130a8e1aadf69..f65acbc3689a8 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -51,11 +51,11 @@ public NodeAndClusterIdConverter() { /** * Updates only once the clusterID and nodeId * - * @param clusterUUID a clusterId received from cluster state update * @param nodeId a nodeId received from cluster state update + * @param clusterUUID a clusterId received from cluster state update * @return true if the update was for the first time (successful) or false if for another calls (does not updates) */ - public static boolean setOnce(String clusterUUID, String nodeId) { + public static boolean setOnce(String nodeId, String clusterUUID) { return nodeAndClusterId.compareAndSet(null, formatIds(clusterUUID, nodeId)); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index c31038736d2b2..b130bc04e3a9a 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -38,7 +38,7 @@ public void clusterChanged(ClusterChangedEvent event) { String clusterUUID = event.state().getMetaData().clusterUUID(); String nodeId = localNode.getId(); - boolean wasSet = NodeAndClusterIdConverter.setOnce(clusterUUID, nodeId); + boolean wasSet = NodeAndClusterIdConverter.setOnce(nodeId, clusterUUID); if (wasSet) { logger.debug("Received first cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); From a6e81fa7ac22426ab173b683f6e3ab915b87b613 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 22 Jan 2019 09:04:42 +0100 Subject: [PATCH 50/64] javadoc typo --- .../elasticsearch/common/logging/NodeAndClusterIdConverter.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index f65acbc3689a8..8a88face56756 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -29,7 +29,7 @@ import java.util.concurrent.atomic.AtomicReference; /** - * Pattern converter to format the node_and_cluster_id variable into a json fields node.id and cluster.uuid. + * Pattern converter to format the node_and_cluster_id variable into JSON fields node.id and cluster.uuid. * Keeping those two fields together assures that they will be atomically set and become visible in logs at the same time. */ @Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") From f01a4ff9bfaf3a8d548b944d3650418064304a5e Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 23 Jan 2019 15:41:02 +0100 Subject: [PATCH 51/64] refactor cluster state listeners to use setOnce --- .../logging/NodeAndClusterIdConverter.java | 11 ++-- .../NodeAndClusterIdStateListener.java | 54 ++++++++++++++----- .../java/org/elasticsearch/node/Node.java | 5 +- .../common/logging/JsonLogsIntegTestCase.java | 2 + 4 files changed, 50 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 8a88face56756..0a14351bfb7e7 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -24,6 +24,7 @@ import org.apache.logging.log4j.core.pattern.ConverterKeys; import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.lucene.util.SetOnce; import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; @@ -35,7 +36,7 @@ @Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") @ConverterKeys({"node_and_cluster_id"}) public final class NodeAndClusterIdConverter extends LogEventPatternConverter { - private static final AtomicReference nodeAndClusterId = new AtomicReference<>(); + private static final SetOnce nodeAndClusterId = new SetOnce<>(); /** * Called by log4j2 to initialize this converter. @@ -49,14 +50,14 @@ public NodeAndClusterIdConverter() { } /** - * Updates only once the clusterID and nodeId + * Updates only once the clusterID and nodeId. + * Note: Should only be called once. Subsequent executions will throw {@link org.apache.lucene.util.SetOnce.AlreadySetException}. * * @param nodeId a nodeId received from cluster state update * @param clusterUUID a clusterId received from cluster state update - * @return true if the update was for the first time (successful) or false if for another calls (does not updates) */ - public static boolean setOnce(String nodeId, String clusterUUID) { - return nodeAndClusterId.compareAndSet(null, formatIds(clusterUUID, nodeId)); + public static void setOnce(String nodeId, String clusterUUID) { + nodeAndClusterId.set(formatIds(clusterUUID, nodeId)); } /** diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index b130bc04e3a9a..db260557ed1f2 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -21,27 +21,53 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.unit.TimeValue; /** - * The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update - * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter} + * The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update + * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter} + * Once the first update is received, it will automatically be de-registered from subsequent updates */ -public class NodeAndClusterIdStateListener implements ClusterStateListener { +public class NodeAndClusterIdStateListener implements ClusterStateObserver.Listener { private final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class); - @Override - public void clusterChanged(ClusterChangedEvent event) { - DiscoveryNode localNode = event.state().getNodes().getLocalNode(); - String clusterUUID = event.state().getMetaData().clusterUUID(); - String nodeId = localNode.getId(); + private NodeAndClusterIdStateListener() {} + + /** + * Subscribes for the first cluster state update where nodeId and clusterId is set. + */ + public static void subscribeTo(ClusterStateObserver observer) { + observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::nodeIdAndClusterIdSet); + } + + private static boolean nodeIdAndClusterIdSet(ClusterState clusterState) { + return getNodeId(clusterState) != null && getClusterUUID(clusterState) != null; + } + + private static String getClusterUUID(ClusterState state) { + return state.getMetaData().clusterUUID(); + } - boolean wasSet = NodeAndClusterIdConverter.setOnce(nodeId, clusterUUID); + private static String getNodeId(ClusterState state) { + DiscoveryNode localNode = state.getNodes().getLocalNode(); + return localNode.getId(); + } + + @Override + public void onNewClusterState(ClusterState state) { + String nodeId = getNodeId(state); + String clusterUUID = getClusterUUID(state); - if (wasSet) { - logger.debug("Received first cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); - } + NodeAndClusterIdConverter.setOnce(nodeId, clusterUUID); + logger.debug("Received first cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); } + + @Override + public void onClusterServiceClose() {} + + @Override + public void onTimeout(TimeValue timeout) {} } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 6ec23890c87a8..71bb5fd1c3ad1 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -438,9 +438,6 @@ protected Node( namedWriteableRegistry).stream()) .collect(Collectors.toList()); - NodeAndClusterIdStateListener nodeAndClusterIdConverter = new NodeAndClusterIdStateListener(); - clusterService.addListener(nodeAndClusterIdConverter); - ActionModule actionModule = new ActionModule(false, settings, clusterModule.getIndexNameExpressionResolver(), settingsModule.getIndexScopedSettings(), settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(), threadPool, pluginsService.filterPlugins(ActionPlugin.class), client, circuitBreakerService, usageService); @@ -717,6 +714,8 @@ public void onTimeout(TimeValue timeout) { } catch (InterruptedException e) { throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state"); } + + NodeAndClusterIdStateListener.subscribeTo(observer); } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java index 77162316a28b6..d9ba80d6b35de 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -67,6 +67,7 @@ public abstract class JsonLogsIntegTestCase extends ESRestTestCase { public void testElementsPresentOnAllLinesOfLog() throws IOException { JsonLogLine firstLine = findFirstLine(); + assertNotNull(firstLine); try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { stream.limit(LINES_TO_CHECK) @@ -102,6 +103,7 @@ public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { firstLine = jsonLogLine; } } + assertNotNull(firstLine); //once the nodeId and clusterId are received, they should be the same on remaining lines From 53ead5986bdd83388a0678af3ba8b5a2a1aa9fb3 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 23 Jan 2019 15:59:50 +0100 Subject: [PATCH 52/64] removed empty line --- distribution/docker/src/docker/config/log4j2.properties | 1 - 1 file changed, 1 deletion(-) diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index 69b97715b001c..73420a047edc5 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -37,7 +37,6 @@ appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog - logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling From 88d13687f5c14a2d61eb234ea3f4f0a94da9cd4f Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 23 Jan 2019 18:12:11 +0100 Subject: [PATCH 53/64] methods rename and cleanup --- .../logging/NodeAndClusterIdConverter.java | 5 ++--- .../logging/NodeAndClusterIdStateListener.java | 18 ++++++++++-------- .../main/java/org/elasticsearch/node/Node.java | 2 +- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java index 0a14351bfb7e7..27437947870b4 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -27,7 +27,6 @@ import org.apache.lucene.util.SetOnce; import java.util.Locale; -import java.util.concurrent.atomic.AtomicReference; /** * Pattern converter to format the node_and_cluster_id variable into JSON fields node.id and cluster.uuid. @@ -51,12 +50,12 @@ public NodeAndClusterIdConverter() { /** * Updates only once the clusterID and nodeId. - * Note: Should only be called once. Subsequent executions will throw {@link org.apache.lucene.util.SetOnce.AlreadySetException}. + * Subsequent executions will throw {@link org.apache.lucene.util.SetOnce.AlreadySetException}. * * @param nodeId a nodeId received from cluster state update * @param clusterUUID a clusterId received from cluster state update */ - public static void setOnce(String nodeId, String clusterUUID) { + public static void setNodeIdAndClusterId(String nodeId, String clusterUUID) { nodeAndClusterId.set(formatIds(clusterUUID, nodeId)); } diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index db260557ed1f2..0cc5be0bff6eb 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -28,8 +28,8 @@ /** * The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update - * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter} - * Once the first update is received, it will automatically be de-registered from subsequent updates + * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter}. + * Once the first update is received, it will automatically be de-registered from subsequent updates. */ public class NodeAndClusterIdStateListener implements ClusterStateObserver.Listener { private final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class); @@ -37,13 +37,15 @@ public class NodeAndClusterIdStateListener implements ClusterStateObserver.Liste private NodeAndClusterIdStateListener() {} /** - * Subscribes for the first cluster state update where nodeId and clusterId is set. + * Subscribes for the first cluster state update where nodeId and clusterId is present + * and sets these values in {@link NodeAndClusterIdConverter} + * @param observer - the observer that the listener subscribes for update */ - public static void subscribeTo(ClusterStateObserver observer) { - observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::nodeIdAndClusterIdSet); + public static void getAndSetNodeIdAndClusterId(ClusterStateObserver observer) { + observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::isNodeAndClusterIdPresent); } - private static boolean nodeIdAndClusterIdSet(ClusterState clusterState) { + private static boolean isNodeAndClusterIdPresent(ClusterState clusterState) { return getNodeId(clusterState) != null && getClusterUUID(clusterState) != null; } @@ -61,8 +63,8 @@ public void onNewClusterState(ClusterState state) { String nodeId = getNodeId(state); String clusterUUID = getClusterUUID(state); - NodeAndClusterIdConverter.setOnce(nodeId, clusterUUID); - logger.debug("Received first cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); + logger.debug("Received cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); + NodeAndClusterIdConverter.setNodeIdAndClusterId(nodeId, clusterUUID); } @Override diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 71bb5fd1c3ad1..6b6dd0ae342ef 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -715,8 +715,8 @@ public void onTimeout(TimeValue timeout) { throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state"); } - NodeAndClusterIdStateListener.subscribeTo(observer); } + NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId(observer); } injector.getInstance(HttpServerTransport.class).start(); From 72bd776e3cc1a58b950d7d55c8fc725f8fafc216 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 23 Jan 2019 18:12:48 +0100 Subject: [PATCH 54/64] javadoc typo --- .../common/logging/NodeAndClusterIdStateListener.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index 0cc5be0bff6eb..ff9ce487f0dc3 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -38,8 +38,8 @@ private NodeAndClusterIdStateListener() {} /** * Subscribes for the first cluster state update where nodeId and clusterId is present - * and sets these values in {@link NodeAndClusterIdConverter} - * @param observer - the observer that the listener subscribes for update + * and sets these values in {@link NodeAndClusterIdConverter}. + * @param observer - the observer that the listener subscribes for an update. */ public static void getAndSetNodeIdAndClusterId(ClusterStateObserver observer) { observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::isNodeAndClusterIdPresent); From c1a4206f33d7ee9f7c885710b71b6da822a28a9c Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 24 Jan 2019 11:59:28 +0100 Subject: [PATCH 55/64] keep the old appenders and let the nodeIDlistener start earlier --- distribution/src/config/log4j2.properties | 96 +++++++++++++++++-- .../NodeAndClusterIdStateListener.java | 14 +-- .../java/org/elasticsearch/node/Node.java | 9 +- 3 files changed, 104 insertions(+), 15 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index ca79f98b6c2b9..2231d7c8a6729 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -6,9 +6,10 @@ logger.action.level = debug appender.console.type = Console appender.console.name = console -appender.console.layout.type = ESJsonLayout -appender.console.layout.type_name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +######## Server JSON ######## appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log @@ -30,11 +31,37 @@ appender.rolling.strategy.action.condition.type = IfFileName appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB +############################################## +######## Server - old style pattern ######## +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.old_log +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz +appender.rolling_old.policies.type = Policies +appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_old.policies.time.interval = 1 +appender.rolling_old.policies.time.modulate = true +appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_old.policies.size.size = 128MB +appender.rolling_old.strategy.type = DefaultRolloverStrategy +appender.rolling_old.strategy.fileIndex = nomax +appender.rolling_old.strategy.action.type = Delete +appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling_old.strategy.action.condition.type = IfFileName +appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB +############################################## rootLogger.level = info rootLogger.appenderRef.console.ref = console rootLogger.appenderRef.rolling.ref = rolling +rootLogger.appenderRef.rolling_old.ref = rolling_old +######## Deprecation JSON ######## appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log @@ -47,44 +74,101 @@ appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.size = 1GB appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy appender.deprecation_rolling.strategy.max = 4 +############################################## +######## Rolling - old style pattern ######## +appender.deprecation_rolling_old.type = RollingFile +appender.deprecation_rolling_old.name = deprecation_rolling_old +appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.old_log +appender.deprecation_rolling_old.layout.type = PatternLayout +appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _deprecation-%i.old_log.gz +appender.deprecation_rolling_old.policies.type = Policies +appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling_old.policies.size.size = 1GB +appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling_old.strategy.max = 4 +############################################## logger.deprecation.name = org.elasticsearch.deprecation logger.deprecation.level = warn logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old logger.deprecation.additivity = false +######## Search slowlog JSON ######## appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling -appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog.log appender.index_search_slowlog_rolling.layout.type = ESJsonLayout appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog -appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog-%i.log.gz appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_search_slowlog_rolling.policies.size.size = 1GB appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_search_slowlog_rolling.strategy.max = 4 +############################################## +######## Search slowlog - old style pattern ######## +appender.index_search_slowlog_rolling_old.type = RollingFile +appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old +appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_search_slowlog.old_log +appender.index_search_slowlog_rolling_old.layout.type = PatternLayout +appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_search_slowlog-%i.old_log.gz +appender.index_search_slowlog_rolling_old.policies.type = Policies +appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.index_search_slowlog_rolling_old.policies.size.size = 1GB +appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.index_search_slowlog_rolling_old.strategy.max = 4 +############################################## logger.index_search_slowlog_rolling.name = index.search.slowlog logger.index_search_slowlog_rolling.level = trace logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old logger.index_search_slowlog_rolling.additivity = false +######## Indexing slowlog JSON ######## appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling -appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog -appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog-%i.log.gz appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.size.size = 1GB appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_indexing_slowlog_rolling.strategy.max = 4 +############################################## +######## Indexing slowlog - old style pattern ######## +appender.index_indexing_slowlog_rolling_old.type = RollingFile +appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog.old_log +appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog-%i.old_log.gz +appender.index_indexing_slowlog_rolling_old.policies.type = Policies +appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB +appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.index_indexing_slowlog_rolling_old.strategy.max = 4 +############################################## logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old logger.index_indexing_slowlog.additivity = false diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java index ff9ce487f0dc3..e8f636238447a 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -23,8 +23,9 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; -import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; /** * The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update @@ -32,16 +33,18 @@ * Once the first update is received, it will automatically be de-registered from subsequent updates. */ public class NodeAndClusterIdStateListener implements ClusterStateObserver.Listener { - private final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class); + private static final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class); private NodeAndClusterIdStateListener() {} /** * Subscribes for the first cluster state update where nodeId and clusterId is present * and sets these values in {@link NodeAndClusterIdConverter}. - * @param observer - the observer that the listener subscribes for an update. */ - public static void getAndSetNodeIdAndClusterId(ClusterStateObserver observer) { + public static void getAndSetNodeIdAndClusterId(ClusterService clusterService, ThreadContext threadContext) { + ClusterState clusterState = clusterService.state(); + ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, threadContext); + observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::isNodeAndClusterIdPresent); } @@ -54,8 +57,7 @@ private static String getClusterUUID(ClusterState state) { } private static String getNodeId(ClusterState state) { - DiscoveryNode localNode = state.getNodes().getLocalNode(); - return localNode.getId(); + return state.getNodes().getLocalNodeId(); } @Override diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 6b6dd0ae342ef..8c7d5e7059461 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -685,10 +685,15 @@ public Node start() throws NodeValidationException { transportService.acceptIncomingRequests(); discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings); + NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId(clusterService, + injector.getInstance(ThreadPool.class).getThreadContext()); + if (initialStateTimeout.millis() > 0) { final ThreadPool thread = injector.getInstance(ThreadPool.class); ClusterState clusterState = clusterService.state(); - ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); + ClusterStateObserver observer = + new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); + if (clusterState.nodes().getMasterNodeId() == null) { logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); final CountDownLatch latch = new CountDownLatch(1); @@ -714,9 +719,7 @@ public void onTimeout(TimeValue timeout) { } catch (InterruptedException e) { throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state"); } - } - NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId(observer); } injector.getInstance(HttpServerTransport.class).start(); From b12f8ee6ad4860e824457156f909495c00bff48c Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 24 Jan 2019 16:04:44 +0100 Subject: [PATCH 56/64] improved documentation and more robust test --- distribution/src/config/log4j2.properties | 34 +++++++++---------- docs/reference/setup/logging-config.asciidoc | 27 +++++++++++++++ .../custom_logging/CustomLoggingConfigIT.java | 14 ++++---- 3 files changed, 52 insertions(+), 23 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 2231d7c8a6729..0ebc99fdf44ca 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -9,7 +9,7 @@ appender.console.name = console appender.console.layout.type = PatternLayout appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n -######## Server JSON ######## +######## Server JSON ############################ appender.rolling.type = RollingFile appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log @@ -31,8 +31,8 @@ appender.rolling.strategy.action.condition.type = IfFileName appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB -############################################## -######## Server - old style pattern ######## +################################################ +######## Server - old style pattern ########### appender.rolling_old.type = RollingFile appender.rolling_old.name = rolling_old appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.old_log @@ -54,14 +54,14 @@ appender.rolling_old.strategy.action.condition.type = IfFileName appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB -############################################## +################################################ rootLogger.level = info rootLogger.appenderRef.console.ref = console rootLogger.appenderRef.rolling.ref = rolling rootLogger.appenderRef.rolling_old.ref = rolling_old -######## Deprecation JSON ######## +######## Deprecation JSON ####################### appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log @@ -74,8 +74,8 @@ appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.size = 1GB appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy appender.deprecation_rolling.strategy.max = 4 -############################################## -######## Rolling - old style pattern ######## +################################################# +######## Deprecation - old style pattern ####### appender.deprecation_rolling_old.type = RollingFile appender.deprecation_rolling_old.name = deprecation_rolling_old appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.old_log @@ -89,14 +89,14 @@ appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling_old.policies.size.size = 1GB appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy appender.deprecation_rolling_old.strategy.max = 4 -############################################## +################################################# logger.deprecation.name = org.elasticsearch.deprecation logger.deprecation.level = warn logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old logger.deprecation.additivity = false -######## Search slowlog JSON ######## +######## Search slowlog JSON #################### appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ @@ -111,8 +111,8 @@ appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPo appender.index_search_slowlog_rolling.policies.size.size = 1GB appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_search_slowlog_rolling.strategy.max = 4 -############################################## -######## Search slowlog - old style pattern ######## +################################################# +######## Search slowlog - old style pattern #### appender.index_search_slowlog_rolling_old.type = RollingFile appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ @@ -127,14 +127,14 @@ appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeri appender.index_search_slowlog_rolling_old.policies.size.size = 1GB appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy appender.index_search_slowlog_rolling_old.strategy.max = 4 -############################################## +################################################# logger.index_search_slowlog_rolling.name = index.search.slowlog logger.index_search_slowlog_rolling.level = trace logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old logger.index_search_slowlog_rolling.additivity = false -######## Indexing slowlog JSON ######## +######## Indexing slowlog JSON ################## appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ @@ -149,10 +149,10 @@ appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggering appender.index_indexing_slowlog_rolling.policies.size.size = 1GB appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_indexing_slowlog_rolling.strategy.max = 4 -############################################## -######## Indexing slowlog - old style pattern ######## +################################################# +######## Indexing slowlog - old style pattern ## appender.index_indexing_slowlog_rolling_old.type = RollingFile -appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ _index_indexing_slowlog.old_log appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout @@ -165,7 +165,7 @@ appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTrigge appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy appender.index_indexing_slowlog_rolling_old.strategy.max = 4 -############################################## +################################################# logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index 2054b69c9b919..b3bec68c0ca45 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -20,6 +20,7 @@ will resolve to `/var/log/elasticsearch/production.log`. [source,properties] -------------------------------------------------- +######## Server JSON ############################ appender.rolling.type = RollingFile <1> appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <2> @@ -40,6 +41,30 @@ appender.rolling.strategy.action.condition.type = IfFileName <12> appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <13> appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <14> appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> +################################################ +######## Server - old style pattern ########### +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.old_log <16> +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz +appender.rolling_old.policies.type = Policies +appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_old.policies.time.interval = 1 +appender.rolling_old.policies.time.modulate = true +appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_old.policies.size.size = 128MB +appender.rolling_old.strategy.type = DefaultRolloverStrategy +appender.rolling_old.strategy.fileIndex = nomax +appender.rolling_old.strategy.action.type = Delete +appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling_old.strategy.action.condition.type = IfFileName +appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB +################################################ -------------------------------------------------- <1> Configure the `RollingFile` appender @@ -60,6 +85,8 @@ appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> <13> The pattern is to only delete the main logs <14> Only delete if we have accumulated too many compressed logs <15> The size condition on the compressed logs is 2 GB +<16> The configuration for `old style` pattern appenders. These logs will be saved in `*.old_log` files and if archived will be in `* +.old_log.gz` files. Note that these should be considered deprecated and will be removed in the future. NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace; if you copy and paste any Log4j settings on this page, or enter any Log4j diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java index 04144420e250e..9d5f6af59f6e2 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -43,11 +43,13 @@ public class CustomLoggingConfigIT extends ESRestTestCase { private static final Pattern NODE_STARTED = Pattern.compile( ".*node-0 \"cluster.uuid\": \"\\w*\", \"node.id\": \"\\w*\".*started.*"); - public void testSuccessfulStartupWithCustomConfig() { - Stream stringStream = openReader(getLogFile()); - - assertTrue("Log line indicating successful startup not found", - stringStream.anyMatch(line -> isStartupLine(line))); + public void testSuccessfulStartupWithCustomConfig() throws Exception { + assertBusy(() -> { + try (Stream lines = streamLogLines(getLogFile())) { + assertTrue("Log line indicating successful startup not found\n", + lines.anyMatch(line -> isStartupLine(line))); + } + }); } private boolean isStartupLine(String line) { @@ -55,7 +57,7 @@ private boolean isStartupLine(String line) { return matcher.matches(); } - private Stream openReader(Path logFile) { + private Stream streamLogLines(Path logFile) { return AccessController.doPrivileged((PrivilegedAction>) () -> { try { return Files.lines(logFile, StandardCharsets.UTF_8); From 1951e2afb9754d82d93f4a948295d3ffbbd2ba50 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Thu, 24 Jan 2019 17:53:09 +0100 Subject: [PATCH 57/64] split logging config in 2 for docs --- docs/reference/setup/logging-config.asciidoc | 36 +++++++------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index b3bec68c0ca45..b8791ffaeec3d 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -42,29 +42,6 @@ appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <14> appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> ################################################ -######## Server - old style pattern ########### -appender.rolling_old.type = RollingFile -appender.rolling_old.name = rolling_old -appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.old_log <16> -appender.rolling_old.layout.type = PatternLayout -appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n - -appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz -appender.rolling_old.policies.type = Policies -appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy -appender.rolling_old.policies.time.interval = 1 -appender.rolling_old.policies.time.modulate = true -appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy -appender.rolling_old.policies.size.size = 128MB -appender.rolling_old.strategy.type = DefaultRolloverStrategy -appender.rolling_old.strategy.fileIndex = nomax -appender.rolling_old.strategy.action.type = Delete -appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} -appender.rolling_old.strategy.action.condition.type = IfFileName -appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* -appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize -appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB -################################################ -------------------------------------------------- <1> Configure the `RollingFile` appender @@ -85,7 +62,18 @@ appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB <13> The pattern is to only delete the main logs <14> Only delete if we have accumulated too many compressed logs <15> The size condition on the compressed logs is 2 GB -<16> The configuration for `old style` pattern appenders. These logs will be saved in `*.old_log` files and if archived will be in `* +[source,properties] +-------------------------------------------------- +######## Server - old style pattern ########### +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.old_log <1> +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz + +-------------------------------------------------- +<1> The configuration for `old style` pattern appenders. These logs will be saved in `*.old_log` files and if archived will be in `* .old_log.gz` files. Note that these should be considered deprecated and will be removed in the future. NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace; From 15021058c126fc482028a4fc20bfd2e5fcb57b1c Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 25 Jan 2019 08:52:06 +0100 Subject: [PATCH 58/64] enable log print out for this test --- .../qa/custom_logging/CustomLoggingConfigIT.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java index 9d5f6af59f6e2..b61a4a7391696 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -32,6 +32,7 @@ import java.security.PrivilegedAction; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -46,7 +47,8 @@ public class CustomLoggingConfigIT extends ESRestTestCase { public void testSuccessfulStartupWithCustomConfig() throws Exception { assertBusy(() -> { try (Stream lines = streamLogLines(getLogFile())) { - assertTrue("Log line indicating successful startup not found\n", + assertTrue("Log line indicating successful startup not found\n"+streamLogLines(getLogFile()) + .collect(Collectors.joining("\n")), lines.anyMatch(line -> isStartupLine(line))); } }); From 7eaaadac2e8c660f125544629f7df1258804d555 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 25 Jan 2019 10:03:50 +0100 Subject: [PATCH 59/64] rename logs to .json --- .../archives/integ-test-zip/build.gradle | 2 +- distribution/src/config/log4j2.properties | 32 +++++++++---------- docs/reference/setup/logging-config.asciidoc | 15 +++++---- qa/die-with-dignity/build.gradle | 2 +- qa/logging-config/build.gradle | 2 +- qa/logging-config/custom-log4j2.properties | 2 +- qa/unconfigured-node-name/build.gradle | 2 +- .../resources/packaging/tests/60_systemd.bats | 2 +- .../test/resources/packaging/utils/utils.bash | 2 +- .../downgrade-to-basic-license/build.gradle | 5 +-- 10 files changed, 34 insertions(+), 32 deletions(-) diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index fde5345338463..d79971907b50d 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -27,7 +27,7 @@ integTestRunner { */ if (System.getProperty("tests.rest.cluster") == null) { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } else { systemProperty 'tests.logfile', '--external--' } diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 0ebc99fdf44ca..293190da16afd 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -12,11 +12,11 @@ appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%ma ######## Server JSON ############################ appender.rolling.type = RollingFile appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json appender.rolling.layout.type = ESJsonLayout appender.rolling.layout.type_name = server -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 @@ -35,11 +35,11 @@ appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB ######## Server - old style pattern ########### appender.rolling_old.type = RollingFile appender.rolling_old.name = rolling_old -appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.old_log +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log appender.rolling_old.layout.type = PatternLayout appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n -appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz appender.rolling_old.policies.type = Policies appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy appender.rolling_old.policies.time.interval = 1 @@ -64,11 +64,11 @@ rootLogger.appenderRef.rolling_old.ref = rolling_old ######## Deprecation JSON ####################### appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling -appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log +appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json appender.deprecation_rolling.layout.type = ESJsonLayout appender.deprecation_rolling.layout.type_name = deprecation -appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz +appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.size = 1GB @@ -78,12 +78,12 @@ appender.deprecation_rolling.strategy.max = 4 ######## Deprecation - old style pattern ####### appender.deprecation_rolling_old.type = RollingFile appender.deprecation_rolling_old.name = deprecation_rolling_old -appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.old_log +appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log appender.deprecation_rolling_old.layout.type = PatternLayout appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _deprecation-%i.old_log.gz + _deprecation-%i.log.gz appender.deprecation_rolling_old.policies.type = Policies appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling_old.policies.size.size = 1GB @@ -100,12 +100,12 @@ logger.deprecation.additivity = false appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ - .cluster_name}_index_search_slowlog.log + .cluster_name}_index_search_slowlog.json appender.index_search_slowlog_rolling.layout.type = ESJsonLayout appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ - .cluster_name}_index_search_slowlog-%i.log.gz + .cluster_name}_index_search_slowlog-%i.json.gz appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_search_slowlog_rolling.policies.size.size = 1GB @@ -116,12 +116,12 @@ appender.index_search_slowlog_rolling.strategy.max = 4 appender.index_search_slowlog_rolling_old.type = RollingFile appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_search_slowlog.old_log + _index_search_slowlog.log appender.index_search_slowlog_rolling_old.layout.type = PatternLayout appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_search_slowlog-%i.old_log.gz + _index_search_slowlog-%i.log.gz appender.index_search_slowlog_rolling_old.policies.type = Policies appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy appender.index_search_slowlog_rolling_old.policies.size.size = 1GB @@ -138,12 +138,12 @@ logger.index_search_slowlog_rolling.additivity = false appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog.log + _index_indexing_slowlog.json appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog-%i.log.gz + _index_indexing_slowlog-%i.json.gz appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.size.size = 1GB @@ -154,12 +154,12 @@ appender.index_indexing_slowlog_rolling.strategy.max = 4 appender.index_indexing_slowlog_rolling_old.type = RollingFile appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog.old_log + _index_indexing_slowlog.log appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ - _index_indexing_slowlog-%i.old_log.gz + _index_indexing_slowlog-%i.log.gz appender.index_indexing_slowlog_rolling_old.policies.type = Policies appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index b8791ffaeec3d..dcea83a7f5d67 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -23,10 +23,10 @@ will resolve to `/var/log/elasticsearch/production.log`. ######## Server JSON ############################ appender.rolling.type = RollingFile <1> appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <2> +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json <2> appender.rolling.layout.type = ESJsonLayout <3> appender.rolling.layout.type_name = server <4> -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz <5> +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz <5> appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <6> appender.rolling.policies.time.interval = 1 <7> @@ -45,11 +45,11 @@ appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> -------------------------------------------------- <1> Configure the `RollingFile` appender -<2> Log to `/var/log/elasticsearch/production.log` +<2> Log to `/var/log/elasticsearch/production.json` <3> Use JSON layout. <4> `type_name` is a flag populating the `type` field in a `ESJsonLayout`. It can be used to distinguish different types of logs more easily when parsing them. -<5> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.log`; logs +<5> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.json`; logs will be compressed on each roll and `i` will be incremented <6> Use a time-based roll policy <7> Roll logs on a daily basis @@ -62,19 +62,20 @@ appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> <13> The pattern is to only delete the main logs <14> Only delete if we have accumulated too many compressed logs <15> The size condition on the compressed logs is 2 GB + [source,properties] -------------------------------------------------- ######## Server - old style pattern ########### appender.rolling_old.type = RollingFile appender.rolling_old.name = rolling_old -appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.old_log <1> +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <1> appender.rolling_old.layout.type = PatternLayout appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz -------------------------------------------------- -<1> The configuration for `old style` pattern appenders. These logs will be saved in `*.old_log` files and if archived will be in `* -.old_log.gz` files. Note that these should be considered deprecated and will be removed in the future. +<1> The configuration for `old style` pattern appenders. These logs will be saved in `*.log` files and if archived will be in `* +.log.gz` files. Note that these should be considered deprecated and will be removed in the future. NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace; if you copy and paste any Log4j settings on this page, or enter any Log4j diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index ae1acf3285b73..3b2e21fd557e7 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -28,7 +28,7 @@ integTestRunner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.log" + systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index 0abdc1247514a..a7f64ccaa18b7 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -34,7 +34,7 @@ integTestCluster { integTestRunner { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } unitTest { diff --git a/qa/logging-config/custom-log4j2.properties b/qa/logging-config/custom-log4j2.properties index b225d7cd550cf..6836a58bd9092 100644 --- a/qa/logging-config/custom-log4j2.properties +++ b/qa/logging-config/custom-log4j2.properties @@ -7,7 +7,7 @@ logger.action.level = debug appender.rolling.type = RollingFile appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern =%notEmpty{%node_name} %notEmpty{%node_and_cluster_id} %notEmpty{${sys:es.logs.cluster_name}} %m%n diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index 033a067ed885f..5aba0562e03f6 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -30,5 +30,5 @@ integTestCluster { integTestRunner { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats index f4c27fe6f7731..8baa75f38f5bc 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats @@ -98,7 +98,7 @@ setup() { systemctl start elasticsearch.service wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - assert_file_exist "/var/log/elasticsearch/elasticsearch_server.log" + assert_file_exist "/var/log/elasticsearch/elasticsearch_server.json" # Converts the epoch back in a human readable format run date --date=@$epoch "+%Y-%m-%d %H:%M:%S" diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 5924a475b7c36..409053ee47d17 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -439,7 +439,7 @@ describe_port() { } debug_collect_logs() { - local es_logfile="$ESLOG/elasticsearch_server.log" + local es_logfile="$ESLOG/elasticsearch_server.json" local system_logfile='/var/log/messages' if [ -e "$es_logfile" ]; then diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index e3fabfa8fba89..bba9709087a56 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -32,7 +32,7 @@ task writeJavaPolicy { javaPolicy.write( [ "grant {", - " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.log\", \"read\";", + " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.json\", \"read\";", "};" ].join("\n")) } @@ -54,7 +54,8 @@ followClusterTestRunner { systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.log" + systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + + "${-> followClusterTest.getNodes().get(0).clusterName}_server.json" finalizedBy 'leaderClusterTestCluster#stop' } From 1291ead42ec3741580795757fddcabafe154b058 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Fri, 25 Jan 2019 15:03:31 +0100 Subject: [PATCH 60/64] migration logging --- docs/reference/migration/migrate_7_0.asciidoc | 2 ++ docs/reference/migration/migrate_7_0/logging.asciidoc | 10 ++++++++++ 2 files changed, 12 insertions(+) create mode 100644 docs/reference/migration/migrate_7_0/logging.asciidoc diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 9f99604318aa9..313fdfdfafbe5 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -25,6 +25,7 @@ See also <> and <>. * <> * <> * <> +* <> [float] === Indices created before 7.0 @@ -58,3 +59,4 @@ include::migrate_7_0/scripting.asciidoc[] include::migrate_7_0/snapshotstats.asciidoc[] include::migrate_7_0/restclient.asciidoc[] include::migrate_7_0/low_level_restclient.asciidoc[] +include::migrate_7_0/logging.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc new file mode 100644 index 0000000000000..3a1e566e49257 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -0,0 +1,10 @@ +[float] +[[breaking_70_logging_changes]] +=== Logging changes + +[float] +==== Elasticsearch server's logs have been renamed from `${sys:es.logs.cluster_name}.log` to +`${sys:es.logs.cluster_name}_server.log`. + +Log files ending with `*.log` are now considered deprecated and newly added JSON logs ending +`*.json` should be used. From 30d9675ff6077bdc6cdabf82c00b04f074b25ac5 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 28 Jan 2019 10:59:03 +0100 Subject: [PATCH 61/64] old log rename and documentation update --- distribution/src/config/log4j2.properties | 2 +- docs/reference/migration/migrate_7_0/logging.asciidoc | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 293190da16afd..45bf720902c1c 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -35,7 +35,7 @@ appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB ######## Server - old style pattern ########### appender.rolling_old.type = RollingFile appender.rolling_old.name = rolling_old -appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling_old.layout.type = PatternLayout appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc index 3a1e566e49257..fcd66f065751f 100644 --- a/docs/reference/migration/migrate_7_0/logging.asciidoc +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -1,10 +1,10 @@ [float] -[[breaking_70_logging_changes]] +[[70_logging_changes]] === Logging changes [float] -==== Elasticsearch server's logs have been renamed from `${sys:es.logs.cluster_name}.log` to -`${sys:es.logs.cluster_name}_server.log`. +==== Log files ending with `*.log` deprecated -Log files ending with `*.log` are now considered deprecated and newly added JSON logs ending -`*.json` should be used. +Log files with `*.log` using old pattern layout format are now considered deprecated +and newly added JSON logs ending `*.json` should be used. +Note: GC logs in a file `gc.log` are not changed. From faa81fd07665eb466a2f0ad90cc6830b034ac7d0 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 28 Jan 2019 12:29:05 +0100 Subject: [PATCH 62/64] making test more stable deprecation docs --- .../migration/migrate_7_0/logging.asciidoc | 29 +++++++++++++++++-- qa/logging-config/build.gradle | 2 +- qa/logging-config/custom-log4j2.properties | 2 +- .../custom_logging/CustomLoggingConfigIT.java | 28 ++++++------------ 4 files changed, 38 insertions(+), 23 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc index fcd66f065751f..0741d155105cc 100644 --- a/docs/reference/migration/migrate_7_0/logging.asciidoc +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -1,10 +1,35 @@ [float] -[[70_logging_changes]] +[[breaking_70_logging_changes]] === Logging changes +[float] +==== New JSON format log files in `log` directory + +Elasticsearch now will produce additional log files in JSON format. They will be stored in `*.json` suffix files. +Following files should be expected now in log directory: +* ${cluster_name}_server.json +* ${cluster_name}_deprecation.json +* ${cluster_name}_index_search_slowlog.json +* ${cluster_name}_index_indexing_slowlog.json +* ${cluster_name}.log +* ${cluster_name}_deprecation.log +* ${cluster_name}_index_search_slowlog.log +* ${cluster_name}_index_indexing_slowlog.log +* ${cluster_name}_audit.json +* gc.log +* http.ports +* transport.ports + +Note: You can override additional files by removing the unwanted appenders in `log4j2.properties`. + [float] ==== Log files ending with `*.log` deprecated Log files with `*.log` using old pattern layout format are now considered deprecated and newly added JSON logs ending `*.json` should be used. -Note: GC logs in a file `gc.log` are not changed. +Note: GC logs in a file `gc.log` are not going to be changed. + +[float] +==== Docker output in JSON format + +All docker console logs are now in JSON format. You can distinguish logs streams with a `type` field. diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index a7f64ccaa18b7..0abdc1247514a 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -34,7 +34,7 @@ integTestCluster { integTestRunner { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" } unitTest { diff --git a/qa/logging-config/custom-log4j2.properties b/qa/logging-config/custom-log4j2.properties index 6836a58bd9092..b225d7cd550cf 100644 --- a/qa/logging-config/custom-log4j2.properties +++ b/qa/logging-config/custom-log4j2.properties @@ -7,7 +7,7 @@ logger.action.level = debug appender.rolling.type = RollingFile appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern =%notEmpty{%node_name} %notEmpty{%node_and_cluster_id} %notEmpty{${sys:es.logs.cluster_name}} %m%n diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java index b61a4a7391696..407d23de99769 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -20,7 +20,9 @@ package org.elasticsearch.qa.custom_logging; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.hamcrest.RegexMatcher; import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matchers; import java.io.IOException; import java.io.UncheckedIOException; @@ -30,10 +32,7 @@ import java.nio.file.Paths; import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import java.util.List; /** * This test verifies that Elasticsearch can startup successfully with a custom logging config using variables introduced in @@ -41,28 +40,19 @@ * The intention is to confirm that users can still run their Elasticsearch instances with previous configurations. */ public class CustomLoggingConfigIT extends ESRestTestCase { - private static final Pattern NODE_STARTED = Pattern.compile( - ".*node-0 \"cluster.uuid\": \"\\w*\", \"node.id\": \"\\w*\".*started.*"); + private static final String NODE_STARTED = ".*node-0.*cluster.uuid.*node.id.*started.*"; public void testSuccessfulStartupWithCustomConfig() throws Exception { assertBusy(() -> { - try (Stream lines = streamLogLines(getLogFile())) { - assertTrue("Log line indicating successful startup not found\n"+streamLogLines(getLogFile()) - .collect(Collectors.joining("\n")), - lines.anyMatch(line -> isStartupLine(line))); - } + List lines = readAllLines(getLogFile()); + assertThat(lines, Matchers.hasItem(RegexMatcher.matches(NODE_STARTED))); }); } - private boolean isStartupLine(String line) { - Matcher matcher = NODE_STARTED.matcher(line); - return matcher.matches(); - } - - private Stream streamLogLines(Path logFile) { - return AccessController.doPrivileged((PrivilegedAction>) () -> { + private List readAllLines(Path logFile) { + return AccessController.doPrivileged((PrivilegedAction>) () -> { try { - return Files.lines(logFile, StandardCharsets.UTF_8); + return Files.readAllLines(logFile, StandardCharsets.UTF_8); } catch (IOException e) { throw new UncheckedIOException(e); } From 5503ad52d8224b99dd943d900325468453ba8d75 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 28 Jan 2019 14:10:48 +0100 Subject: [PATCH 63/64] doc changes after review --- .../migration/migrate_7_0/logging.asciidoc | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc index 0741d155105cc..ed0cb990c7755 100644 --- a/docs/reference/migration/migrate_7_0/logging.asciidoc +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -17,19 +17,17 @@ Following files should be expected now in log directory: * ${cluster_name}_index_indexing_slowlog.log * ${cluster_name}_audit.json * gc.log -* http.ports -* transport.ports -Note: You can override additional files by removing the unwanted appenders in `log4j2.properties`. +Note: You can configure which of these files are written by editing `log4j2.properties`. [float] ==== Log files ending with `*.log` deprecated - -Log files with `*.log` using old pattern layout format are now considered deprecated -and newly added JSON logs ending `*.json` should be used. -Note: GC logs in a file `gc.log` are not going to be changed. +Log files with the `.log` file extension using the old pattern layout format +are now considered deprecated and the newly added JSON log file format with +the .json file extension should be used instead. +Note: GC logs which are written to the file gc.log will not be changed. [float] ==== Docker output in JSON format -All docker console logs are now in JSON format. You can distinguish logs streams with a `type` field. +All Docker console logs are now in JSON format. You can distinguish logs streams with the `type` field. From b563423890563a6352070f382925e49733c9e75f Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 28 Jan 2019 14:52:11 +0100 Subject: [PATCH 64/64] fix doc --- docs/reference/migration/migrate_7_0/logging.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc index ed0cb990c7755..0385397b31619 100644 --- a/docs/reference/migration/migrate_7_0/logging.asciidoc +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -24,8 +24,8 @@ Note: You can configure which of these files are written by editing `log4j2.prop ==== Log files ending with `*.log` deprecated Log files with the `.log` file extension using the old pattern layout format are now considered deprecated and the newly added JSON log file format with -the .json file extension should be used instead. -Note: GC logs which are written to the file gc.log will not be changed. +the `.json` file extension should be used instead. +Note: GC logs which are written to the file `gc.log` will not be changed. [float] ==== Docker output in JSON format