forked from elastic/elasticsearch
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Address test failures for SmokeTestWatcherWithSecurityIT
There are likely multiple root causes to the seemingly random failures generated by SmokeTestWatcherWithSecurityIT. This commit un-mutes this this test, address one known cause and adds debug logging for this test. The known root cause for one failure is that we can have a Watch running that is reading data from an index. Before we stop Watcher we delete that index. If Watcher happens to execute after deletion of the index but before the stop of Watcher the test can fail. The fix here is to simply move the index deletion after the stop of Watcher. Related elastic#35361 Related elastic#30777 Related elastic#35361 Related elastic#33291 Related elastic#29893
- Loading branch information
1 parent
afba887
commit f735ae5
Showing
3 changed files
with
281 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
278 changes: 278 additions & 0 deletions
278
x-pack/qa/smoke-test-watcher-with-security/custom-log4j2.properties
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,278 @@ | ||
####################################### | ||
# custom properties | ||
####################################### | ||
|
||
logger.watcher.name = org.elasticsearch.xpack.watcher | ||
logger.watcher.level = debug | ||
|
||
logger.core-watcher.name = org.elasticsearch.xpack.core.watcher | ||
logger.core-watcher.level = debug | ||
|
||
logger.node.name = org.elasticsearch.node | ||
logger.node.level = debug | ||
|
||
####################################### | ||
# copied from distribution/src/config/log4j2.properties | ||
####################################### | ||
|
||
status = error | ||
|
||
# log action execution errors for easier debugging | ||
logger.action.name = org.elasticsearch.action | ||
logger.action.level = debug | ||
|
||
appender.console.type = Console | ||
appender.console.name = console | ||
appender.console.layout.type = PatternLayout | ||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n | ||
|
||
######## Server JSON ############################ | ||
appender.rolling.type = RollingFile | ||
appender.rolling.name = rolling | ||
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json | ||
appender.rolling.layout.type = ESJsonLayout | ||
appender.rolling.layout.type_name = server | ||
|
||
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz | ||
appender.rolling.policies.type = Policies | ||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy | ||
appender.rolling.policies.time.interval = 1 | ||
appender.rolling.policies.time.modulate = true | ||
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.rolling.policies.size.size = 128MB | ||
appender.rolling.strategy.type = DefaultRolloverStrategy | ||
appender.rolling.strategy.fileIndex = nomax | ||
appender.rolling.strategy.action.type = Delete | ||
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} | ||
appender.rolling.strategy.action.condition.type = IfFileName | ||
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* | ||
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize | ||
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB | ||
################################################ | ||
######## Server - old style pattern ########### | ||
appender.rolling_old.type = RollingFile | ||
appender.rolling_old.name = rolling_old | ||
appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log | ||
appender.rolling_old.layout.type = PatternLayout | ||
appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n | ||
|
||
appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz | ||
appender.rolling_old.policies.type = Policies | ||
appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy | ||
appender.rolling_old.policies.time.interval = 1 | ||
appender.rolling_old.policies.time.modulate = true | ||
appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.rolling_old.policies.size.size = 128MB | ||
appender.rolling_old.strategy.type = DefaultRolloverStrategy | ||
appender.rolling_old.strategy.fileIndex = nomax | ||
appender.rolling_old.strategy.action.type = Delete | ||
appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} | ||
appender.rolling_old.strategy.action.condition.type = IfFileName | ||
appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* | ||
appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize | ||
appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB | ||
################################################ | ||
|
||
rootLogger.level = info | ||
rootLogger.appenderRef.console.ref = console | ||
rootLogger.appenderRef.rolling.ref = rolling | ||
rootLogger.appenderRef.rolling_old.ref = rolling_old | ||
|
||
######## Deprecation JSON ####################### | ||
appender.deprecation_rolling.type = RollingFile | ||
appender.deprecation_rolling.name = deprecation_rolling | ||
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json | ||
appender.deprecation_rolling.layout.type = ESJsonLayout | ||
appender.deprecation_rolling.layout.type_name = deprecation | ||
|
||
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz | ||
appender.deprecation_rolling.policies.type = Policies | ||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.deprecation_rolling.policies.size.size = 1GB | ||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy | ||
appender.deprecation_rolling.strategy.max = 4 | ||
################################################# | ||
######## Deprecation - old style pattern ####### | ||
appender.deprecation_rolling_old.type = RollingFile | ||
appender.deprecation_rolling_old.name = deprecation_rolling_old | ||
appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log | ||
appender.deprecation_rolling_old.layout.type = PatternLayout | ||
appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n | ||
|
||
appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ | ||
_deprecation-%i.log.gz | ||
appender.deprecation_rolling_old.policies.type = Policies | ||
appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.deprecation_rolling_old.policies.size.size = 1GB | ||
appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy | ||
appender.deprecation_rolling_old.strategy.max = 4 | ||
################################################# | ||
logger.deprecation.name = org.elasticsearch.deprecation | ||
logger.deprecation.level = warn | ||
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling | ||
logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old | ||
logger.deprecation.additivity = false | ||
|
||
######## Search slowlog JSON #################### | ||
appender.index_search_slowlog_rolling.type = RollingFile | ||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling | ||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ | ||
.cluster_name}_index_search_slowlog.json | ||
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout | ||
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog | ||
|
||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ | ||
.cluster_name}_index_search_slowlog-%i.json.gz | ||
appender.index_search_slowlog_rolling.policies.type = Policies | ||
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.index_search_slowlog_rolling.policies.size.size = 1GB | ||
appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy | ||
appender.index_search_slowlog_rolling.strategy.max = 4 | ||
################################################# | ||
######## Search slowlog - old style pattern #### | ||
appender.index_search_slowlog_rolling_old.type = RollingFile | ||
appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old | ||
appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ | ||
_index_search_slowlog.log | ||
appender.index_search_slowlog_rolling_old.layout.type = PatternLayout | ||
appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n | ||
|
||
appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ | ||
_index_search_slowlog-%i.log.gz | ||
appender.index_search_slowlog_rolling_old.policies.type = Policies | ||
appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.index_search_slowlog_rolling_old.policies.size.size = 1GB | ||
appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy | ||
appender.index_search_slowlog_rolling_old.strategy.max = 4 | ||
################################################# | ||
logger.index_search_slowlog_rolling.name = index.search.slowlog | ||
logger.index_search_slowlog_rolling.level = trace | ||
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling | ||
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old | ||
logger.index_search_slowlog_rolling.additivity = false | ||
|
||
######## Indexing slowlog JSON ################## | ||
appender.index_indexing_slowlog_rolling.type = RollingFile | ||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling | ||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ | ||
_index_indexing_slowlog.json | ||
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout | ||
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog | ||
|
||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ | ||
_index_indexing_slowlog-%i.json.gz | ||
appender.index_indexing_slowlog_rolling.policies.type = Policies | ||
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.index_indexing_slowlog_rolling.policies.size.size = 1GB | ||
appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy | ||
appender.index_indexing_slowlog_rolling.strategy.max = 4 | ||
################################################# | ||
######## Indexing slowlog - old style pattern ## | ||
appender.index_indexing_slowlog_rolling_old.type = RollingFile | ||
appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old | ||
appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ | ||
_index_indexing_slowlog.log | ||
appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout | ||
appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n | ||
|
||
appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ | ||
_index_indexing_slowlog-%i.log.gz | ||
appender.index_indexing_slowlog_rolling_old.policies.type = Policies | ||
appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy | ||
appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB | ||
appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy | ||
appender.index_indexing_slowlog_rolling_old.strategy.max = 4 | ||
################################################# | ||
|
||
logger.index_indexing_slowlog.name = index.indexing.slowlog.index | ||
logger.index_indexing_slowlog.level = trace | ||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling | ||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old | ||
logger.index_indexing_slowlog.additivity = false | ||
|
||
####################################### | ||
# copied from x-pack/plugin/core/src/main/config/log4j2.properties | ||
####################################### | ||
appender.audit_rolling.type = RollingFile | ||
appender.audit_rolling.name = audit_rolling | ||
appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json | ||
appender.audit_rolling.layout.type = PatternLayout | ||
appender.audit_rolling.layout.pattern = {\ | ||
"@timestamp":"%d{ISO8601}"\ | ||
%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ | ||
%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ | ||
%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ | ||
%varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ | ||
%varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ | ||
%varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ | ||
%varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ | ||
%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ | ||
%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ | ||
%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ | ||
%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ | ||
%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ | ||
%varsNotEmpty{, "user.roles":%map{user.roles}}\ | ||
%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ | ||
%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ | ||
%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ | ||
%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ | ||
%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ | ||
%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ | ||
%varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ | ||
%varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ | ||
%varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ | ||
%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ | ||
%varsNotEmpty{, "indices":%map{indices}}\ | ||
%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ | ||
%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ | ||
%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ | ||
%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ | ||
%varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\ | ||
}%n | ||
# "node.name" node name from the `elasticsearch.yml` settings | ||
# "node.id" node id which should not change between cluster restarts | ||
# "host.name" unresolved hostname of the local node | ||
# "host.ip" the local bound ip (i.e. the ip listening for connections) | ||
# "event.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) | ||
# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. | ||
# "user.name" the subject name as authenticated by a realm | ||
# "user.run_by.name" the original authenticated subject name that is impersonating another one. | ||
# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. | ||
# "user.realm" the name of the realm that authenticated "user.name" | ||
# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") | ||
# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from | ||
# "user.roles" the roles array of the user; these are the roles that are granting privileges | ||
# "origin.type" it is "rest" if the event is originating (is in relation to) a REST request; possible other values are "transport" and "ip_filter" | ||
# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node | ||
# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated | ||
# "url.path" the URI component between the port and the query string; it is percent (URL) encoded | ||
# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded | ||
# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT | ||
# "request.body" the content of the request body entity, JSON escaped | ||
# "request.id" a synthentic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request | ||
# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) | ||
# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) | ||
# "indices" the array of indices that the "action" is acting upon | ||
# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header | ||
# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) | ||
# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event | ||
# "rule" name of the applied rulee if the "origin.type" is "ip_filter" | ||
# "event.category" fixed value "elasticsearch-audit" | ||
|
||
appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}.json | ||
appender.audit_rolling.policies.type = Policies | ||
appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy | ||
appender.audit_rolling.policies.time.interval = 1 | ||
appender.audit_rolling.policies.time.modulate = true | ||
|
||
logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail | ||
logger.xpack_security_audit_logfile.level = info | ||
logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling | ||
logger.xpack_security_audit_logfile.additivity = false | ||
|
||
logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature | ||
logger.xmlsig.level = error | ||
logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter | ||
logger.samlxml_decrypt.level = fatal | ||
logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter | ||
logger.saml2_decrypt.level = fatal |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters