diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 30fa4d3c03805..d79971907b50d 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -27,7 +27,7 @@ integTestRunner { */ if (System.getProperty("tests.rest.cluster") == null) { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } else { systemProperty 'tests.logfile', '--external--' } diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java similarity index 88% rename from distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java index a854e6e66462a..12c916946085b 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java @@ -19,11 +19,11 @@ package org.elasticsearch.test.rest; -import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; +import org.elasticsearch.common.logging.JsonLogsIntegTestCase; import org.hamcrest.Matcher; -import java.io.IOException; import java.io.BufferedReader; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.is; -public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { +public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { @Override protected Matcher nodeNameMatcher() { return is("node-0"); @@ -41,7 +41,7 @@ protected Matcher nodeNameMatcher() { @Override protected BufferedReader openReader(Path logFile) { assumeFalse("Skipping test because it is being run against an external cluster.", - logFile.getFileName().toString().equals("--external--")); + logFile.getFileName().toString().equals("--external--")); return AccessController.doPrivileged((PrivilegedAction) () -> { try { return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index 9ad290ad82679..73420a047edc5 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -1,9 +1,43 @@ status = error -appender.console.type = Console -appender.console.name = console -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = server rootLogger.level = info -rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ESJsonLayout +appender.deprecation_rolling.layout.type_name = deprecation + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ESJsonLayout +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout +appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 6de21cd48f67b..45bf720902c1c 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -9,12 +9,14 @@ appender.console.name = console appender.console.layout.type = PatternLayout appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +######## Server JSON ############################ appender.rolling.type = RollingFile appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log -appender.rolling.layout.type = PatternLayout -appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = server + +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 @@ -29,58 +31,144 @@ appender.rolling.strategy.action.condition.type = IfFileName appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB +################################################ +######## Server - old style pattern ########### +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling_old.policies.type = Policies +appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_old.policies.time.interval = 1 +appender.rolling_old.policies.time.modulate = true +appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_old.policies.size.size = 128MB +appender.rolling_old.strategy.type = DefaultRolloverStrategy +appender.rolling_old.strategy.fileIndex = nomax +appender.rolling_old.strategy.action.type = Delete +appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling_old.strategy.action.condition.type = IfFileName +appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB +################################################ rootLogger.level = info rootLogger.appenderRef.console.ref = console rootLogger.appenderRef.rolling.ref = rolling +rootLogger.appenderRef.rolling_old.ref = rolling_old +######## Deprecation JSON ####################### appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling -appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log -appender.deprecation_rolling.layout.type = PatternLayout -appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n -appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz +appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json +appender.deprecation_rolling.layout.type = ESJsonLayout +appender.deprecation_rolling.layout.type_name = deprecation + +appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.size = 1GB appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy appender.deprecation_rolling.strategy.max = 4 +################################################# +######## Deprecation - old style pattern ####### +appender.deprecation_rolling_old.type = RollingFile +appender.deprecation_rolling_old.name = deprecation_rolling_old +appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log +appender.deprecation_rolling_old.layout.type = PatternLayout +appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _deprecation-%i.log.gz +appender.deprecation_rolling_old.policies.type = Policies +appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling_old.policies.size.size = 1GB +appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling_old.strategy.max = 4 +################################################# logger.deprecation.name = org.elasticsearch.deprecation logger.deprecation.level = warn logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old logger.deprecation.additivity = false +######## Search slowlog JSON #################### appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling -appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log -appender.index_search_slowlog_rolling.layout.type = PatternLayout -appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n -appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog.json +appender.index_search_slowlog_rolling.layout.type = ESJsonLayout +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog + +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog-%i.json.gz appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_search_slowlog_rolling.policies.size.size = 1GB appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_search_slowlog_rolling.strategy.max = 4 +################################################# +######## Search slowlog - old style pattern #### +appender.index_search_slowlog_rolling_old.type = RollingFile +appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old +appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_search_slowlog.log +appender.index_search_slowlog_rolling_old.layout.type = PatternLayout +appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_search_slowlog-%i.log.gz +appender.index_search_slowlog_rolling_old.policies.type = Policies +appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.index_search_slowlog_rolling_old.policies.size.size = 1GB +appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.index_search_slowlog_rolling_old.strategy.max = 4 +################################################# logger.index_search_slowlog_rolling.name = index.search.slowlog logger.index_search_slowlog_rolling.level = trace logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old logger.index_search_slowlog_rolling.additivity = false +######## Indexing slowlog JSON ################## appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling -appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log -appender.index_indexing_slowlog_rolling.layout.type = PatternLayout -appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n -appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog.json +appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout +appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog + +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog-%i.json.gz appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.size.size = 1GB appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_indexing_slowlog_rolling.strategy.max = 4 +################################################# +######## Indexing slowlog - old style pattern ## +appender.index_indexing_slowlog_rolling_old.type = RollingFile +appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old +appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog-%i.log.gz +appender.index_indexing_slowlog_rolling_old.policies.type = Policies +appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB +appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.index_indexing_slowlog_rolling_old.strategy.max = 4 +################################################# logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old logger.index_indexing_slowlog.additivity = false diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 9f99604318aa9..313fdfdfafbe5 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -25,6 +25,7 @@ See also <> and <>. * <> * <> * <> +* <> [float] === Indices created before 7.0 @@ -58,3 +59,4 @@ include::migrate_7_0/scripting.asciidoc[] include::migrate_7_0/snapshotstats.asciidoc[] include::migrate_7_0/restclient.asciidoc[] include::migrate_7_0/low_level_restclient.asciidoc[] +include::migrate_7_0/logging.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc new file mode 100644 index 0000000000000..0385397b31619 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -0,0 +1,33 @@ +[float] +[[breaking_70_logging_changes]] +=== Logging changes + +[float] +==== New JSON format log files in `log` directory + +Elasticsearch now will produce additional log files in JSON format. They will be stored in `*.json` suffix files. +Following files should be expected now in log directory: +* ${cluster_name}_server.json +* ${cluster_name}_deprecation.json +* ${cluster_name}_index_search_slowlog.json +* ${cluster_name}_index_indexing_slowlog.json +* ${cluster_name}.log +* ${cluster_name}_deprecation.log +* ${cluster_name}_index_search_slowlog.log +* ${cluster_name}_index_indexing_slowlog.log +* ${cluster_name}_audit.json +* gc.log + +Note: You can configure which of these files are written by editing `log4j2.properties`. + +[float] +==== Log files ending with `*.log` deprecated +Log files with the `.log` file extension using the old pattern layout format +are now considered deprecated and the newly added JSON log file format with +the `.json` file extension should be used instead. +Note: GC logs which are written to the file `gc.log` will not be changed. + +[float] +==== Docker output in JSON format + +All Docker console logs are now in JSON format. You can distinguish logs streams with the `type` field. diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index f477a14bb6d3d..dcea83a7f5d67 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -20,43 +20,62 @@ will resolve to `/var/log/elasticsearch/production.log`. [source,properties] -------------------------------------------------- +######## Server JSON ############################ appender.rolling.type = RollingFile <1> appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log <2> -appender.rolling.layout.type = PatternLayout -appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz <3> +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json <2> +appender.rolling.layout.type = ESJsonLayout <3> +appender.rolling.layout.type_name = server <4> +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz <5> appender.rolling.policies.type = Policies -appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4> -appender.rolling.policies.time.interval = 1 <5> -appender.rolling.policies.time.modulate = true <6> -appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <7> -appender.rolling.policies.size.size = 256MB <8> +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <6> +appender.rolling.policies.time.interval = 1 <7> +appender.rolling.policies.time.modulate = true <8> +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <9> +appender.rolling.policies.size.size = 256MB <10> appender.rolling.strategy.type = DefaultRolloverStrategy appender.rolling.strategy.fileIndex = nomax -appender.rolling.strategy.action.type = Delete <9> +appender.rolling.strategy.action.type = Delete <11> appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} -appender.rolling.strategy.action.condition.type = IfFileName <10> -appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <11> -appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <12> -appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <13> +appender.rolling.strategy.action.condition.type = IfFileName <12> +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <13> +appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <14> +appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> +################################################ -------------------------------------------------- <1> Configure the `RollingFile` appender -<2> Log to `/var/log/elasticsearch/production.log` -<3> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.log`; logs +<2> Log to `/var/log/elasticsearch/production.json` +<3> Use JSON layout. +<4> `type_name` is a flag populating the `type` field in a `ESJsonLayout`. + It can be used to distinguish different types of logs more easily when parsing them. +<5> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.json`; logs will be compressed on each roll and `i` will be incremented -<4> Use a time-based roll policy -<5> Roll logs on a daily basis -<6> Align rolls on the day boundary (as opposed to rolling every twenty-four +<6> Use a time-based roll policy +<7> Roll logs on a daily basis +<8> Align rolls on the day boundary (as opposed to rolling every twenty-four hours) -<7> Using a size-based roll policy -<8> Roll logs after 256 MB -<9> Use a delete action when rolling logs -<10> Only delete logs matching a file pattern -<11> The pattern is to only delete the main logs -<12> Only delete if we have accumulated too many compressed logs -<13> The size condition on the compressed logs is 2 GB +<9> Using a size-based roll policy +<10> Roll logs after 256 MB +<11> Use a delete action when rolling logs +<12> Only delete logs matching a file pattern +<13> The pattern is to only delete the main logs +<14> Only delete if we have accumulated too many compressed logs +<15> The size condition on the compressed logs is 2 GB + +[source,properties] +-------------------------------------------------- +######## Server - old style pattern ########### +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <1> +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz + +-------------------------------------------------- +<1> The configuration for `old style` pattern appenders. These logs will be saved in `*.log` files and if archived will be in `* +.log.gz` files. Note that these should be considered deprecated and will be removed in the future. NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace; if you copy and paste any Log4j settings on this page, or enter any Log4j @@ -194,3 +213,38 @@ files (four rolled logs, and the active log). You can disable it in the `config/log4j2.properties` file by setting the deprecation log level to `error`. + + +[float] +[[json-logging]] +=== JSON log format + +To make parsing Elasticsearch logs easier, logs are now printed in a JSON format. +This is configured by a Log4J layout property `appender.rolling.layout.type = ESJsonLayout`. +This layout requires a `type_name` attribute to be set which is used to distinguish +logs streams when parsing. +[source,properties] +-------------------------------------------------- +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = server +-------------------------------------------------- +:es-json-layout-java-doc: {elasticsearch-javadoc}/org/elasticsearch/common/logging/ESJsonLayout.html + +Each line contains a single JSON document with the properties configured in `ESJsonLayout`. +See this class {es-json-layout-java-doc}[javadoc] for more details. +However if a JSON document contains an exception, it will be printed over multiple lines. +The first line will contain regular properties and subsequent lines will contain the +stacktrace formatted as a JSON array. + + +NOTE: You can still use your own custom layout. To do that replace the line +`appender.rolling.layout.type` with a different layout. See sample below: +[source,properties] +-------------------------------------------------- +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +-------------------------------------------------- \ No newline at end of file diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 26d567ca3ef6c..3b2e21fd557e7 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -28,7 +28,7 @@ integTestRunner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}.log" + systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 9250122025c0a..16398b380cfe1 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -21,10 +21,14 @@ import org.apache.http.ConnectionClosedException; import org.apache.lucene.util.Constants; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.JsonLogLine; +import org.elasticsearch.common.logging.JsonLogsStream; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import java.io.BufferedReader; import java.io.IOException; @@ -34,10 +38,12 @@ import java.nio.file.Path; import java.util.Iterator; import java.util.List; +import java.util.stream.Stream; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; @@ -53,7 +59,7 @@ public void testDieWithDignity() throws Exception { final int pid = Integer.parseInt(pidFileLines.get(0)); Files.delete(pidFile); IOException e = expectThrows(IOException.class, - () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); + () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); Matcher failureMatcher = instanceOf(ConnectionClosedException.class); if (Constants.WINDOWS) { /* @@ -64,9 +70,9 @@ public void testDieWithDignity() throws Exception { * https://issues.apache.org/jira/browse/HTTPASYNC-134 * * So we catch it here and consider it "ok". - */ + */ failureMatcher = either(failureMatcher) - .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); + .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); } assertThat(e, failureMatcher); @@ -85,28 +91,62 @@ public void testDieWithDignity() throws Exception { } }); - // parse the logs and ensure that Elasticsearch died with the expected cause - final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); + try { + // parse the logs and ensure that Elasticsearch died with the expected cause + Path path = PathUtils.get(System.getProperty("log")); + try (Stream stream = JsonLogsStream.from(path)) { + final Iterator it = stream.iterator(); - final Iterator it = lines.iterator(); + boolean fatalError = false; + boolean fatalErrorInThreadExiting = false; - boolean fatalError = false; - boolean fatalErrorInThreadExiting = false; + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { + final JsonLogLine line = it.next(); + if (isFatalError(line)) { + fatalError = true; + } else if (isFatalErrorInThreadExiting(line) || isWarnExceptionReceived(line)) { + fatalErrorInThreadExiting = true; + assertThat(line.stacktrace(), + hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); + } + } - while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { - final String line = it.next(); - if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) { - fatalError = true; - } else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" - + " fatal error in thread \\[Thread-\\d+\\], exiting$")) { - fatalErrorInThreadExiting = true; - assertTrue(it.hasNext()); - assertThat(it.next(), equalTo("java.lang.OutOfMemoryError: die with dignity")); + assertTrue(fatalError); + assertTrue(fatalErrorInThreadExiting); } + } catch (AssertionError ae) { + Path path = PathUtils.get(System.getProperty("log")); + debugLogs(path); + throw ae; } + } + + private boolean isWarnExceptionReceived(JsonLogLine line) { + return line.level().equals("WARN") + && line.component().equals("o.e.h.AbstractHttpServerTransport") + && line.nodeName().equals("node-0") + && line.message().contains("caught exception while handling client http traffic"); + } + + private void debugLogs(Path path) throws IOException { + try (BufferedReader reader = Files.newBufferedReader(path)) { + Terminal terminal = Terminal.DEFAULT; + reader.lines().forEach(line -> terminal.println(line)); + } + } + + private boolean isFatalErrorInThreadExiting(JsonLogLine line) { + return line.level().equals("ERROR") + && line.component().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") + && line.nodeName().equals("node-0") + && line.message().matches("fatal error in thread \\[Thread-\\d+\\], exiting$"); + } - assertTrue(fatalError); - assertTrue(fatalErrorInThreadExiting); + private boolean isFatalError(JsonLogLine line) { + return line.level().equals("ERROR") + && line.component().equals("o.e.ExceptionsHelper") + && line.nodeName().equals("node-0") + && line.message().contains("fatal error"); } @Override diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle new file mode 100644 index 0000000000000..0abdc1247514a --- /dev/null +++ b/qa/logging-config/build.gradle @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.standalone-test' + +integTestCluster { + autoSetInitialMasterNodes = false + autoSetHostsProvider = false + /** + * Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch + * can successfully startup. + */ + extraConfigFile 'log4j2.properties', 'custom-log4j2.properties' +} + +integTestRunner { + systemProperty 'tests.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" +} + +unitTest { + systemProperty 'tests.security.manager', 'false' +} diff --git a/qa/logging-config/custom-log4j2.properties b/qa/logging-config/custom-log4j2.properties new file mode 100644 index 0000000000000..b225d7cd550cf --- /dev/null +++ b/qa/logging-config/custom-log4j2.properties @@ -0,0 +1,31 @@ + +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern =%notEmpty{%node_name} %notEmpty{%node_and_cluster_id} %notEmpty{${sys:es.logs.cluster_name}} %m%n + +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 128MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.fileIndex = nomax +appender.rolling.strategy.action.type = Delete +appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling.strategy.action.condition.type = IfFileName +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java new file mode 100644 index 0000000000000..b484ba90a4da3 --- /dev/null +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -0,0 +1,232 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configurator; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * This test confirms JSON log structure is properly formatted and can be parsed. + * It has to be in a org.elasticsearch.common.logging package to use PrefixLogger + */ +public class JsonLoggerTests extends ESTestCase { + + @BeforeClass + public static void initNodeName() { + LogConfigurator.setNodeName("sample-name"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + LogConfigurator.registerErrorListener(); + setupLogging("json_layout"); + } + + @Override + public void tearDown() throws Exception { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + Configurator.shutdown(context); + super.tearDown(); + } + + @SuppressWarnings("unchecked") + public void testJsonLayout() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + + testLogger.error("This is an error message"); + testLogger.warn("This is a warning message"); + testLogger.info("This is an info message"); + testLogger.debug("This is a debug message"); + testLogger.trace("This is a trace message"); + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.ERROR, "sample-name", "test", "This is an error message"), + logLine("file", Level.WARN, "sample-name", "test", "This is a warning message"), + logLine("file", Level.INFO, "sample-name", "test", "This is an info message"), + logLine("file", Level.DEBUG, "sample-name", "test", "This is a debug message"), + logLine("file", Level.TRACE, "sample-name", "test", "This is a trace message") + )); + } + } + + @SuppressWarnings("unchecked") + public void testPrefixLoggerInJson() throws IOException { + Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); + shardIdLogger.info("This is an info message with a shardId"); + + Logger prefixLogger = new PrefixLogger(LogManager.getLogger("prefixLogger"), "PREFIX"); + prefixLogger.info("This is an info message with a prefix"); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"), + logLine("file", Level.INFO, "sample-name", "prefixLogger", "PREFIX This is an info message with a prefix") + )); + } + } + + public void testJsonInMessage() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + testLogger.info(json); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.INFO, "sample-name", "test", json) + )); + } + } + + public void testStacktrace() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message"))); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + assertThat(jsonLogs, Matchers.contains( + Matchers.allOf( + logLine("file", Level.ERROR, "sample-name", "test", "error message"), + stacktraceWith("java.lang.Exception: exception message"), + stacktraceWith("Caused by: java.lang.RuntimeException: cause message") + ) + )); + } + } + + public void testJsonInStacktraceMessageIsSplitted() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + testLogger.error("error message " + json, new Exception(json)); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + + assertThat(jsonLogs, Matchers.contains( + Matchers.allOf( + //message field will have a single line with json escaped + logLine("file", Level.ERROR, "sample-name", "test", "error message " + json), + + //stacktrace field will have each json line will in a separate array element + stacktraceWith(("java.lang.Exception: " + json).split("\n")) + ) + )); + } + } + + private List collectLines(Stream stream) { + return stream + .skip(1)//skip the first line from super class + .collect(Collectors.toList()); + } + + private Path clusterLogsPath() { + return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name") + ".log"); + } + + private void setupLogging(final String config) throws IOException, UserException { + setupLogging(config, Settings.EMPTY); + } + + private void setupLogging(final String config, final Settings settings) throws IOException, UserException { + assertFalse("Environment path.home variable should not be set", Environment.PATH_HOME_SETTING.exists(settings)); + final Path configDir = getDataPath(config); + final Settings mergedSettings = Settings.builder() + .put(settings) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + // need to use custom config path so we can use a custom log4j2.properties file for the test + final Environment environment = new Environment(mergedSettings, configDir); + LogConfigurator.configure(environment); + } + + private Matcher logLine(String type, Level level, String nodeName, String component, String message) { + return new FeatureMatcher(Matchers.is(true), "logLine", "logLine") { + + @Override + protected Boolean featureValueOf(JsonLogLine actual) { + return actual.type().equals(type) && + actual.level().equals(level.toString()) && + actual.nodeName().equals(nodeName) && + actual.component().equals(component) && + actual.message().equals(message); + } + }; + } + + private Matcher stacktraceWith(String... lines) { + return new FeatureMatcher>(Matchers.hasItems(lines), + "stacktrace", "stacktrace") { + + @Override + protected List featureValueOf(JsonLogLine actual) { + return actual.stacktrace(); + } + }; + } +} diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java new file mode 100644 index 0000000000000..407d23de99769 --- /dev/null +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.qa.custom_logging; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.hamcrest.RegexMatcher; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.List; + +/** + * This test verifies that Elasticsearch can startup successfully with a custom logging config using variables introduced in + * ESJsonLayout + * The intention is to confirm that users can still run their Elasticsearch instances with previous configurations. + */ +public class CustomLoggingConfigIT extends ESRestTestCase { + private static final String NODE_STARTED = ".*node-0.*cluster.uuid.*node.id.*started.*"; + + public void testSuccessfulStartupWithCustomConfig() throws Exception { + assertBusy(() -> { + List lines = readAllLines(getLogFile()); + assertThat(lines, Matchers.hasItem(RegexMatcher.matches(NODE_STARTED))); + }); + } + + private List readAllLines(Path logFile) { + return AccessController.doPrivileged((PrivilegedAction>) () -> { + try { + return Files.readAllLines(logFile, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } + + @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") + private Path getLogFile() { + String logFileString = System.getProperty("tests.logfile"); + if (logFileString == null) { + fail("tests.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the " + + "log file."); + } + return Paths.get(logFileString); + } +} diff --git a/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties new file mode 100644 index 0000000000000..4bbd0b038ab8a --- /dev/null +++ b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties @@ -0,0 +1,21 @@ +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = ESJsonLayout +appender.console.layout.type_name = console + +appender.file.type = File +appender.file.name = file +appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.file.layout.type = ESJsonLayout +appender.file.layout.type_name = file + + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.file.ref = file + +logger.test.name = test +logger.test.level = trace +logger.test.appenderRef.console.ref = console +logger.test.appenderRef.file.ref = file +logger.test.additivity = false diff --git a/qa/logging-config/src/test/resources/plugin-security.policy b/qa/logging-config/src/test/resources/plugin-security.policy new file mode 100644 index 0000000000000..d0d865c4ede16 --- /dev/null +++ b/qa/logging-config/src/test/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Needed to read the log file + permission java.io.FilePermission "${tests.logfile}", "read"; +}; diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index f8fb696e8ca85..5aba0562e03f6 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -30,5 +30,5 @@ integTestCluster { integTestRunner { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } diff --git a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java similarity index 92% rename from qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java rename to qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java index 44d5bb6c900f5..50cc20b0e5789 100644 --- a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java +++ b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java @@ -19,11 +19,11 @@ package org.elasticsearch.unconfigured_node_name; -import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; +import org.elasticsearch.common.logging.JsonLogsIntegTestCase; import org.hamcrest.Matcher; -import java.io.IOException; import java.io.BufferedReader; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.not; -public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { +public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { @Override protected Matcher nodeNameMatcher() { return not(""); diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats index 3cf495939aff9..8baa75f38f5bc 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats @@ -98,7 +98,7 @@ setup() { systemctl start elasticsearch.service wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - assert_file_exist "/var/log/elasticsearch/elasticsearch.log" + assert_file_exist "/var/log/elasticsearch/elasticsearch_server.json" # Converts the epoch back in a human readable format run date --date=@$epoch "+%Y-%m-%d %H:%M:%S" diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 92363d4d4e348..3f577668bf1ec 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -428,7 +428,7 @@ describe_port() { } debug_collect_logs() { - local es_logfile="$ESLOG/elasticsearch.log" + local es_logfile="$ESLOG/elasticsearch_server.json" local system_logfile='/var/log/messages' if [ -e "$es_logfile" ]; then diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java new file mode 100644 index 0000000000000..af7cd81f202e3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.Node; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginFactory; +import org.apache.logging.log4j.core.layout.AbstractStringLayout; +import org.apache.logging.log4j.core.layout.ByteBufferDestination; +import org.apache.logging.log4j.core.layout.PatternLayout; +import org.elasticsearch.common.Strings; + +import java.nio.charset.Charset; +import java.util.Map; + +/** + * Formats log events as strings in a json format. + *

+ * The class is wrapping the {@link PatternLayout} with a pattern to format into json. This gives more flexibility and control over how the + * log messages are formatted in {@link org.apache.logging.log4j.core.layout.JsonLayout} + */ +@Plugin(name = "ESJsonLayout", category = Node.CATEGORY, elementType = Layout.ELEMENT_TYPE, printObject = true) +public class ESJsonLayout extends AbstractStringLayout { + /** + * Fields used in a pattern to format a json log line: + *

    + *
  • type - the type of logs. These represent appenders and help docker distinguish log streams.
  • + *
  • timestamp - ISO8601 with additional timezone ID
  • + *
  • level - INFO, WARN etc
  • + *
  • component - logger name, most of the times class name
  • + *
  • cluster.name - taken from sys:es.logs.cluster_name system property because it is always set
  • + *
  • node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml
  • + *
  • node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present + * once clusterStateUpdate is first received
  • + *
  • message - a json escaped message. Multiline messages will be converted to single line with new line explicitly + * replaced to \n
  • + *
  • exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger. + * Taken from JsonThrowablePatternConverter
  • + *
+ */ + private static final String PATTERN = "{" + + "\"type\": \"${TYPE}\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + + "\"level\": \"%p\", " + + "\"component\": \"%c{1.}\", " + + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\", " + + "%notEmpty{%node_and_cluster_id, } " + + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\" " + + "%exceptionAsJson " + + "}%n"; + + private final PatternLayout patternLayout; + + protected ESJsonLayout(String typeName, Charset charset) { + super(charset); + this.patternLayout = PatternLayout.newBuilder() + .withPattern(pattern(typeName)) + .withAlwaysWriteExceptions(false) + .build(); + } + + private String pattern(String type) { + if (Strings.isEmpty(type)) { + throw new IllegalArgumentException("layout parameter 'type_name' cannot be empty"); + } + return PATTERN.replace("${TYPE}", type); + } + + @PluginFactory + public static ESJsonLayout createLayout(@PluginAttribute("type_name") String type, + @PluginAttribute(value = "charset", defaultString = "UTF-8") Charset charset) { + return new ESJsonLayout(type, charset); + } + + @Override + public String toSerializable(final LogEvent event) { + return patternLayout.toSerializable(event); + } + + @Override + public Map getContentFormat() { + return patternLayout.getContentFormat(); + } + + @Override + public void encode(final LogEvent event, final ByteBufferDestination destination) { + patternLayout.encode(event, destination); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("ESJsonLayout{"); + sb.append("patternLayout=").append(patternLayout); + sb.append('}'); + return sb.toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java new file mode 100644 index 0000000000000..97e712512317b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache license, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the license for the specific language governing permissions and + * limitations under the license. + */ +package org.elasticsearch.common.logging; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; +import org.apache.logging.log4j.util.Strings; + +import java.nio.charset.Charset; +import java.util.StringJoiner; + +/** + + * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array + * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] + * + * Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from + * LoggingEvent into a multiline string + */ +@Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY) +@ConverterKeys({"exceptionAsJson"}) +public final class JsonThrowablePatternConverter extends ThrowablePatternConverter { + private final ExtendedThrowablePatternConverter throwablePatternConverter; + + /** + * Private as only expected to be used by log4j2 newInstance method + */ + private JsonThrowablePatternConverter(final Configuration config, final String[] options) { + super("JsonThrowablePatternConverter", "throwable", options, config); + this.throwablePatternConverter = ExtendedThrowablePatternConverter.newInstance(config, options); + } + + /** + * Gets an instance of the class. + * + * @param config The current Configuration. + * @param options pattern options, may be null. If first element is "short", + * only the first line of the throwable will be formatted. + * @return instance of class. + */ + public static JsonThrowablePatternConverter newInstance(final Configuration config, final String[] options) { + return new JsonThrowablePatternConverter(config, options); + } + + /** + * {@inheritDoc} + */ + @Override + public void format(final LogEvent event, final StringBuilder toAppendTo) { + String consoleStacktrace = formatStacktrace(event); + if (Strings.isNotEmpty(consoleStacktrace)) { + String jsonStacktrace = formatJson(consoleStacktrace); + + toAppendTo.append(", "); + toAppendTo.append(jsonStacktrace); + } + } + + private String formatStacktrace(LogEvent event) { + StringBuilder stringBuilder = new StringBuilder(); + throwablePatternConverter.format(event, stringBuilder); + return stringBuilder.toString(); + } + + private String formatJson(String consoleStacktrace) { + String lineSeparator = options.getSeparator() + "\t|" + options.getSeparator(); + String[] split = consoleStacktrace.split(lineSeparator); + + StringJoiner stringJoiner = new StringJoiner(",\n", "\n\"stacktrace\": [", "]"); + for (String line : split) { + stringJoiner.add(wrapAsJson(line)); + } + return stringJoiner.toString(); + } + + private String wrapAsJson(String line) { + byte[] bytes = JsonStringEncoder.getInstance().quoteAsUTF8(line); + return "\"" + new String(bytes, Charset.defaultCharset()) + "\""; + } + + @Override + public boolean handlesThrowable() { + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java new file mode 100644 index 0000000000000..27437947870b4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.lucene.util.SetOnce; + +import java.util.Locale; + +/** + * Pattern converter to format the node_and_cluster_id variable into JSON fields node.id and cluster.uuid. + * Keeping those two fields together assures that they will be atomically set and become visible in logs at the same time. + */ +@Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") +@ConverterKeys({"node_and_cluster_id"}) +public final class NodeAndClusterIdConverter extends LogEventPatternConverter { + private static final SetOnce nodeAndClusterId = new SetOnce<>(); + + /** + * Called by log4j2 to initialize this converter. + */ + public static NodeAndClusterIdConverter newInstance(@SuppressWarnings("unused") final String[] options) { + return new NodeAndClusterIdConverter(); + } + + public NodeAndClusterIdConverter() { + super("NodeAndClusterId", "node_and_cluster_id"); + } + + /** + * Updates only once the clusterID and nodeId. + * Subsequent executions will throw {@link org.apache.lucene.util.SetOnce.AlreadySetException}. + * + * @param nodeId a nodeId received from cluster state update + * @param clusterUUID a clusterId received from cluster state update + */ + public static void setNodeIdAndClusterId(String nodeId, String clusterUUID) { + nodeAndClusterId.set(formatIds(clusterUUID, nodeId)); + } + + /** + * Formats the node.id and cluster.uuid into json fields. + * + * @param event - a log event is ignored in this method as it uses the nodeId and clusterId to format + */ + @Override + public void format(LogEvent event, StringBuilder toAppendTo) { + if (nodeAndClusterId.get() != null) { + toAppendTo.append(nodeAndClusterId.get()); + } + // nodeId/clusterUuid not received yet, not appending + } + + private static String formatIds(String clusterUUID, String nodeId) { + return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\"", clusterUUID, nodeId); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java new file mode 100644 index 0000000000000..e8f636238447a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +/** + * The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update + * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter}. + * Once the first update is received, it will automatically be de-registered from subsequent updates. + */ +public class NodeAndClusterIdStateListener implements ClusterStateObserver.Listener { + private static final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class); + + private NodeAndClusterIdStateListener() {} + + /** + * Subscribes for the first cluster state update where nodeId and clusterId is present + * and sets these values in {@link NodeAndClusterIdConverter}. + */ + public static void getAndSetNodeIdAndClusterId(ClusterService clusterService, ThreadContext threadContext) { + ClusterState clusterState = clusterService.state(); + ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, threadContext); + + observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::isNodeAndClusterIdPresent); + } + + private static boolean isNodeAndClusterIdPresent(ClusterState clusterState) { + return getNodeId(clusterState) != null && getClusterUUID(clusterState) != null; + } + + private static String getClusterUUID(ClusterState state) { + return state.getMetaData().clusterUUID(); + } + + private static String getNodeId(ClusterState state) { + return state.getNodes().getLocalNodeId(); + } + + @Override + public void onNewClusterState(ClusterState state) { + String nodeId = getNodeId(state); + String clusterUUID = getClusterUUID(state); + + logger.debug("Received cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); + NodeAndClusterIdConverter.setNodeIdAndClusterId(nodeId, clusterUUID); + } + + @Override + public void onClusterServiceClose() {} + + @Override + public void onTimeout(TimeValue timeout) {} +} diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index dfc538c23260f..0ea5f1e78cf8b 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.NodeAndClusterIdStateListener; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -683,10 +684,15 @@ public Node start() throws NodeValidationException { transportService.acceptIncomingRequests(); discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings); + NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId(clusterService, + injector.getInstance(ThreadPool.class).getThreadContext()); + if (initialStateTimeout.millis() > 0) { final ThreadPool thread = injector.getInstance(ThreadPool.class); ClusterState clusterState = clusterService.state(); - ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); + ClusterStateObserver observer = + new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); + if (clusterState.nodes().getMasterNodeId() == null) { logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); final CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java new file mode 100644 index 0000000000000..d72b598f02865 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.impl.Log4jLogEvent; +import org.apache.logging.log4j.message.SimpleMessage; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.StringReader; + +import static org.hamcrest.Matchers.equalTo; + +public class JsonThrowablePatternConverterTests extends ESTestCase { + JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); + + public void testNoStacktrace() throws IOException { + LogEvent event = Log4jLogEvent.newBuilder() + .build(); + String result = format(event); + + JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result))) + .findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); + + assertThat(jsonLogLine.stacktrace(), Matchers.nullValue()); + } + + public void testStacktraceWithJson() throws IOException { + LogManager.getLogger().info("asdf"); + + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + Exception thrown = new Exception(json); + LogEvent event = Log4jLogEvent.newBuilder() + .setMessage(new SimpleMessage("message")) + .setThrown(thrown) + .build(); + + String result = format(event); + + //confirms exception is correctly parsed + + JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result))) + .findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); + + int jsonLength = json.split("\n").length; + int stacktraceLength = thrown.getStackTrace().length; + assertThat("stacktrace should formatted in multiple lines", + jsonLogLine.stacktrace().size(), equalTo(jsonLength + stacktraceLength)); + } + + private String format(LogEvent event) { + StringBuilder builder = new StringBuilder(); + converter.format(event, builder); + String jsonStacktraceElement = builder.toString(); + + return "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", " + + "\"component\": \"o.e.a.s.TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", " + + "\"cluster.uuid\": \"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " + + jsonStacktraceElement + "}"; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java new file mode 100644 index 0000000000000..fa8f3d7d27018 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; + +import java.util.List; + + +/** + * Represents a single log line in a json format. + * Parsing log lines with this class confirms the json format of logs + */ +public class JsonLogLine { + public static final ObjectParser PARSER = createParser(false); + + private String type; + private String timestamp; + private String level; + private String component; + private String clusterName; + private String nodeName; + private String clusterUuid; + private String nodeId; + private String message; + private List stacktrace; + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("JsonLogLine{"); + sb.append("type='").append(type).append('\''); + sb.append(", timestamp='").append(timestamp).append('\''); + sb.append(", level='").append(level).append('\''); + sb.append(", component='").append(component).append('\''); + sb.append(", clusterName='").append(clusterName).append('\''); + sb.append(", nodeName='").append(nodeName).append('\''); + sb.append(", clusterUuid='").append(clusterUuid).append('\''); + sb.append(", nodeId='").append(nodeId).append('\''); + sb.append(", message='").append(message).append('\''); + sb.append(", stacktrace=").append(stacktrace); + sb.append('}'); + return sb.toString(); + } + + public String type() { + return type; + } + + public String timestamp() { + return timestamp; + } + + public String level() { + return level; + } + + public String component() { + return component; + } + + public String clusterName() { + return clusterName; + } + + public String nodeName() { + return nodeName; + } + + public String clusterUuid() { + return clusterUuid; + } + + public String nodeId() { + return nodeId; + } + + public String message() { + return message; + } + + public List stacktrace() { + return stacktrace; + } + + public void setType(String type) { + this.type = type; + } + + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + + public void setLevel(String level) { + this.level = level; + } + + public void setComponent(String component) { + this.component = component; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public void setNodeName(String nodeName) { + this.nodeName = nodeName; + } + + public void setClusterUuid(String clusterUuid) { + this.clusterUuid = clusterUuid; + } + + public void setNodeId(String nodeId) { + this.nodeId = nodeId; + } + + public void setMessage(String message) { + this.message = message; + } + + public void setStacktrace(List stacktrace) { + this.stacktrace = stacktrace; + } + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>("search_template", ignoreUnknownFields, JsonLogLine::new); + parser.declareString(JsonLogLine::setType, new ParseField("type")); + parser.declareString(JsonLogLine::setTimestamp, new ParseField("timestamp")); + parser.declareString(JsonLogLine::setLevel, new ParseField("level")); + parser.declareString(JsonLogLine::setComponent, new ParseField("component")); + parser.declareString(JsonLogLine::setClusterName, new ParseField("cluster.name")); + parser.declareString(JsonLogLine::setNodeName, new ParseField("node.name")); + parser.declareString(JsonLogLine::setClusterUuid, new ParseField("cluster.uuid")); + parser.declareString(JsonLogLine::setNodeId, new ParseField("node.id")); + parser.declareString(JsonLogLine::setMessage, new ParseField("message")); + parser.declareStringArray(JsonLogLine::setStacktrace, new ParseField("stacktrace")); + + return parser; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java new file mode 100644 index 0000000000000..d9ba80d6b35de --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Iterator; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isEmptyOrNullString; +import static org.hamcrest.Matchers.not; + +/** + * Tests that extend this class verify that all json layout fields appear in the first few log lines after startup + * Fields available upon process startup: type, timestamp, level, component, + * message, node.name, cluster.name. + * Whereas node.id and cluster.uuid are available later once the first clusterState has been received. + * + * + * node.name, cluster.name, node.id, cluster.uuid + * should not change across all log lines + * + * Note that this won't pass for nodes in clusters that don't have the node name defined in elasticsearch.yml and start + * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName. + */ +public abstract class JsonLogsIntegTestCase extends ESRestTestCase { + /** + * Number of lines in the log file to check for the node.name, node.id or cluster.uuid. We don't + * just check the entire log file because it could be quite long + */ + private static final int LINES_TO_CHECK = 10; + + /** + * The node name to expect in the log file. + */ + protected abstract org.hamcrest.Matcher nodeNameMatcher(); + + /** + * Open the log file. This is delegated to subclasses because the test + * framework doesn't have permission to read from the log file but + * subclasses can grant themselves that permission. + */ + protected abstract BufferedReader openReader(Path logFile); + + public void testElementsPresentOnAllLinesOfLog() throws IOException { + JsonLogLine firstLine = findFirstLine(); + assertNotNull(firstLine); + + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + stream.limit(LINES_TO_CHECK) + .forEach(jsonLogLine -> { + assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.component(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); + + // all lines should have the same nodeName and clusterName + assertThat(jsonLogLine.nodeName(), nodeNameMatcher()); + assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); + }); + } + } + + private JsonLogLine findFirstLine() throws IOException { + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + return stream.findFirst() + .orElseThrow(() -> new AssertionError("no logs at all?!")); + } + } + + public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + Iterator iterator = stream.iterator(); + + JsonLogLine firstLine = null; + while (iterator.hasNext()) { + JsonLogLine jsonLogLine = iterator.next(); + if (jsonLogLine.nodeId() != null) { + firstLine = jsonLogLine; + } + } + assertNotNull(firstLine); + + //once the nodeId and clusterId are received, they should be the same on remaining lines + + int i = 0; + while (iterator.hasNext() && i++ < LINES_TO_CHECK) { + JsonLogLine jsonLogLine = iterator.next(); + assertThat(jsonLogLine.nodeId(), equalTo(firstLine.nodeId())); + assertThat(jsonLogLine.clusterUuid(), equalTo(firstLine.clusterUuid())); + } + } + } + + @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") + private Path getLogFile() { + String logFileString = System.getProperty("tests.logfile"); + if (logFileString == null) { + fail("tests.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the " + + "log file."); + } + return Paths.get(logFileString); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java new file mode 100644 index 0000000000000..28ad649f55a79 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Iterator; +import java.util.Spliterator; +import java.util.Spliterators; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +/** + * Returns a stream of json log lines. + * This is intended to be used for easy and readable assertions for logger tests + */ +public class JsonLogsStream { + private final XContentParser parser; + private final BufferedReader reader; + + private JsonLogsStream(BufferedReader reader) throws IOException { + this.reader = reader; + this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + reader); + } + + public static Stream from(BufferedReader reader) throws IOException { + return new JsonLogsStream(reader).stream(); + } + + public static Stream from(Path path) throws IOException { + return from(Files.newBufferedReader(path)); + } + + private Stream stream() { + Spliterator spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED); + return StreamSupport.stream(spliterator, false) + .onClose(this::close); + } + + private void close() { + try { + parser.close(); + reader.close(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private class JsonIterator implements Iterator { + + @Override + public boolean hasNext() { + return parser.isClosed() == false; + } + + @Override + public JsonLogLine next() { + JsonLogLine apply = JsonLogLine.PARSER.apply(parser, null); + nextToken(); + return apply; + } + + private void nextToken() { + try { + parser.nextToken(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java deleted file mode 100644 index a8a142096e3dd..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.test.rest.ESRestTestCase; - -import java.io.BufferedReader; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.regex.Pattern; -import java.util.regex.Matcher; - -import static org.hamcrest.Matchers.containsString; - -/** - * Tests that extend this class verify that the node name appears in the first - * few log lines on startup. Note that this won't pass for clusters that don't - * the node name defined in elasticsearch.yml and start with - * DEBUG or TRACE level logging. Those nodes log a few lines before they - * resolve the node name. - */ -public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase { - /** - * Number of lines in the log file to check for the node name. We don't - * just check the entire log file because it could be quite long and - * exceptions don't include the node name. - */ - private static final int LINES_TO_CHECK = 10; - - /** - * The node name to expect in the logs file. - */ - protected abstract org.hamcrest.Matcher nodeNameMatcher(); - - /** - * Open the log file. This is delegated to subclasses because the test - * framework doesn't have permission to read from the log file but - * subclasses can grant themselves that permission. - */ - protected abstract BufferedReader openReader(Path logFile); - - public void testNodeNameIsOnAllLinesOfLog() throws IOException { - BufferedReader logReader = openReader(getLogFile()); - try { - String line = logReader.readLine(); - assertNotNull("no logs at all?!", line); - Matcher m = Pattern.compile("\\] \\[([^\\]]+)\\] ").matcher(line); - if (false == m.find()) { - fail("Didn't see the node name in [" + line + "]"); - } - String nodeName = m.group(1); - - assertThat(nodeName, nodeNameMatcher()); - - int lineNumber = 1; - while (true) { - if (lineNumber < LINES_TO_CHECK) { - break; - } - line = logReader.readLine(); - if (line == null) { - break; // eof - } - lineNumber++; - assertThat(line, containsString("] [" + nodeName + "] ")); - } - } finally { - logReader.close(); - } - } - - @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") - private Path getLogFile() { - String logFileString = System.getProperty("tests.logfile"); - if (null == logFileString) { - fail("tests.logfile must be set to run this test. It is automatically " - + "set by gradle. If you must set it yourself then it should be the absolute path to the " - + "log file."); - } - return Paths.get(logFileString); - } -} diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index 9147d5251b5be..bba9709087a56 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -32,7 +32,7 @@ task writeJavaPolicy { javaPolicy.write( [ "grant {", - " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log\", \"read\";", + " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.json\", \"read\";", "};" ].join("\n")) } @@ -54,7 +54,8 @@ followClusterTestRunner { systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log" + systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + + "${-> followClusterTest.getNodes().get(0).clusterName}_server.json" finalizedBy 'leaderClusterTestCluster#stop' } diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index b612d8822437e..8fb305ba06ee6 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -11,13 +11,17 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.JsonLogLine; +import org.elasticsearch.common.logging.JsonLogsStream; import org.elasticsearch.common.settings.Settings; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import java.io.IOException; -import java.nio.file.Files; -import java.util.Iterator; -import java.util.List; +import java.nio.file.Path; import java.util.Map; +import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.ObjectPath.eval; import static org.hamcrest.Matchers.containsString; @@ -80,25 +84,10 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { // (does not work on windows...) if (Constants.WINDOWS == false) { assertBusy(() -> { - final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); - final Iterator it = lines.iterator(); - boolean warn = false; - while (it.hasNext()) { - final String line = it.next(); - if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " + - "failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) { - warn = true; - break; - } + Path path = PathUtils.get(System.getProperty("log")); + try (Stream stream = JsonLogsStream.from(path)) { + assertTrue(stream.anyMatch(autoFollowCoordinatorWarn()::matches)); } - assertTrue(warn); - assertTrue(it.hasNext()); - final String lineAfterWarn = it.next(); - assertThat( - lineAfterWarn, - equalTo("org.elasticsearch.ElasticsearchStatusException: " + - "can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " + - "the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); }); } }); @@ -108,10 +97,26 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { assertThat(e.getMessage(), containsString("the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); } + private Matcher autoFollowCoordinatorWarn() { + return new FeatureMatcher(Matchers.is(true), "autoFollowCoordinatorWarn", "autoFollowCoordinatorWarn") { + + @Override + protected Boolean featureValueOf(JsonLogLine actual) { + return actual.level().equals("WARN") && + actual.component().equals("o.e.x.c.a.AutoFollowCoordinator") && + actual.nodeName().equals("node-0") && + actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") && + actual.stacktrace().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + + "as the remote cluster [leader_cluster] is not licensed for [ccr]; the license mode [BASIC]" + + " on cluster [leader_cluster] does not enable [ccr]"); + } + }; + } + private void createNewIndexAndIndexDocs(RestClient client, String index) throws IOException { Settings settings = Settings.builder() - .put("index.soft_deletes.enabled", true) - .build(); + .put("index.soft_deletes.enabled", true) + .build(); Request request = new Request("PUT", "/" + index); request.setJsonEntity("{\"settings\": " + Strings.toString(settings) + ", \"mappings\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}}");