From 863fd58c693f3e6d6b8db3c84e27a7dae6eeb394 Mon Sep 17 00:00:00 2001 From: marko-bekhta Date: Wed, 20 Nov 2024 17:46:21 +0100 Subject: [PATCH] HSEARCH-798 Remove inline logging statements --- .../impl/AwsSigningRequestInterceptor.java | 10 +- .../ElasticsearchAwsHttpClientConfigurer.java | 5 +- .../aws/logging/impl/AwsLog.java | 39 +++++ .../impl/ElasticsearchBackendFactory.java | 5 +- .../index/layout/impl/IndexNames.java | 2 +- .../logging/impl/ConfigurationLog.java | 11 +- .../logging/impl/ElasticsearchLog.java | 2 +- .../logging/impl/MappingLog.java | 9 +- .../lucene/impl/LuceneBackendFactory.java | 2 +- .../lucene/logging/impl/ConfigurationLog.java | 4 + .../lucene/logging/impl/LuceneLog.java | 2 +- .../logging/impl/LuceneSpecificLog.java | 23 +++ .../impl/HibernateSearchMultiReader.java | 4 +- .../writer/impl/IndexWriterDelegatorImpl.java | 2 +- .../writer/impl/IndexWriterProvider.java | 2 +- .../writer/impl/IndexWriterSettings.java | 3 +- .../orchestration/spi/BatchingExecutor.java | 4 +- .../orchestration/spi/SingletonTask.java | 6 +- .../search/engine/logging/impl/EngineLog.java | 2 +- .../engine/logging/impl/ExecutorLog.java | 21 +++ .../core/logging/impl/JakartaBatchLog.java | 7 +- .../impl/HibernateSearchPartitionMapper.java | 2 +- .../cluster/impl/AgentPersister.java | 12 +- .../event/impl/AbstractAgentClusterLink.java | 9 +- .../impl/OutboxPollingEventProcessor.java | 3 +- ...utboxPollingEventProcessorClusterLink.java | 78 ++++----- ...boxPollingMassIndexerAgentClusterLink.java | 16 +- .../OutboxPollingOutboxEventSendingPlan.java | 2 +- .../logging/impl/OutboxPollingEventsLog.java | 155 +++++++++++++++++- .../logging/impl/OutboxPollingLog.java | 2 +- ...HibernateOrmBeanContainerBeanProvider.java | 2 +- .../HibernateSearchPreIntegrationService.java | 2 +- .../orm/common/spi/TransactionHelper.java | 9 +- .../impl/HibernateSearchEventListener.java | 2 +- .../HibernateOrmMassIdentifierLoader.java | 2 +- .../orm/logging/impl/ConfigurationLog.java | 26 ++- .../mapper/orm/logging/impl/IndexingLog.java | 19 ++- .../mapper/orm/logging/impl/LoadingLog.java | 6 + .../mapper/orm/logging/impl/OrmLog.java | 2 +- .../orm/logging/impl/OrmSpecificLog.java | 35 +++- ...fterCommitIndexingPlanSynchronization.java | 11 +- ...foreCommitIndexingPlanSynchronization.java | 9 +- .../ConfiguredAutomaticIndexingStrategy.java | 4 +- .../session/impl/SynchronizationAdapter.java | 14 +- .../mapper/pojo/logging/impl/MappingLog.java | 3 +- .../pojo/logging/impl/MassIndexingLog.java | 31 +++- .../pojo/logging/impl/PojoMapperLog.java | 2 +- ...ojoMassIndexingBatchIndexingWorkspace.java | 2 +- ...dexingEntityIdentifierLoadingRunnable.java | 6 +- ...PojoMassIndexingEntityLoadingRunnable.java | 6 +- .../PojoMassIndexingIndexedTypeGroup.java | 6 +- ...CancellableExecutionCompletableFuture.java | 8 +- .../util/common/jar/impl/JandexUtils.java | 4 +- .../logging/impl/CommonFailuresLog.java | 28 +++- 54 files changed, 507 insertions(+), 176 deletions(-) diff --git a/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/AwsSigningRequestInterceptor.java b/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/AwsSigningRequestInterceptor.java index 951fd30a3d5..d854ce58be6 100644 --- a/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/AwsSigningRequestInterceptor.java +++ b/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/AwsSigningRequestInterceptor.java @@ -58,12 +58,12 @@ private void sign(HttpRequest request, HttpContext context, HttpEntityContentStr SdkHttpFullRequest awsRequest = toAwsRequest( request, context, contentStreamProvider ); if ( AWS_LOGGER.isTraceEnabled() ) { - AWS_LOGGER.tracef( "HTTP request (before signing): %s", request ); - AWS_LOGGER.tracef( "AWS request (before signing): %s", awsRequest ); + AWS_LOGGER.httpRequestBeforeSigning( request ); + AWS_LOGGER.awsRequestBeforeSigning( awsRequest ); } AwsCredentials credentials = credentialsProvider.resolveCredentials(); - AWS_LOGGER.tracef( "AWS credentials: %s", credentials ); + AWS_LOGGER.awsCredentials( credentials ); SignedRequest signedRequest = signer.sign( r -> r.identity( credentials ) .request( awsRequest ) @@ -89,8 +89,8 @@ private void sign(HttpRequest request, HttpContext context, HttpEntityContentStr } if ( AWS_LOGGER.isTraceEnabled() ) { - AWS_LOGGER.tracef( "AWS request (after signing): %s", signedRequest ); - AWS_LOGGER.tracef( "HTTP request (after signing): %s", request ); + AWS_LOGGER.httpRequestAfterSigning( signedRequest ); + AWS_LOGGER.awsRequestAfterSigning( request ); } } diff --git a/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/ElasticsearchAwsHttpClientConfigurer.java b/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/ElasticsearchAwsHttpClientConfigurer.java index ff957501d00..8e744bcad5a 100644 --- a/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/ElasticsearchAwsHttpClientConfigurer.java +++ b/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/impl/ElasticsearchAwsHttpClientConfigurer.java @@ -58,7 +58,7 @@ public void configure(ElasticsearchHttpClientConfigurationContext context) { ConfigurationPropertySource propertySource = context.configurationPropertySource(); if ( !SIGNING_ENABLED.get( propertySource ) ) { - AWS_LOGGER.debug( "AWS request signing is disabled." ); + AWS_LOGGER.signingDisabled(); return; } @@ -77,8 +77,7 @@ public void configure(ElasticsearchHttpClientConfigurationContext context) { } AwsCredentialsProvider credentialsProvider = createCredentialsProvider( context.beanResolver(), propertySource ); - AWS_LOGGER.debugf( "AWS request signing is enabled [region = '%s', service = '%s', credentialsProvider = '%s'].", - region, service, credentialsProvider ); + AWS_LOGGER.signingEnabled( region, service, credentialsProvider ); AwsSigningRequestInterceptor signingInterceptor = new AwsSigningRequestInterceptor( region, service, credentialsProvider ); diff --git a/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/logging/impl/AwsLog.java b/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/logging/impl/AwsLog.java index 7f0581ce814..b369c93ade2 100644 --- a/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/logging/impl/AwsLog.java +++ b/backend/elasticsearch-aws/src/main/java/org/hibernate/search/backend/elasticsearch/aws/logging/impl/AwsLog.java @@ -5,6 +5,9 @@ package org.hibernate.search.backend.elasticsearch.aws.logging.impl; +import static org.jboss.logging.Logger.Level.DEBUG; +import static org.jboss.logging.Logger.Level.TRACE; + import java.lang.invoke.MethodHandles; import org.hibernate.search.util.common.SearchException; @@ -13,11 +16,19 @@ import org.hibernate.search.util.common.logging.impl.MessageConstants; import org.jboss.logging.BasicLogger; +import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; import org.jboss.logging.annotations.ValidIdRange; import org.jboss.logging.annotations.ValidIdRanges; +import org.apache.http.HttpRequest; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.auth.spi.signer.SignedRequest; +import software.amazon.awssdk.regions.Region; + @CategorizedLogger( category = AwsLog.CATEGORY_NAME, description = """ @@ -63,4 +74,32 @@ SearchException obsoleteAccessKeyIdOrSecretAccessKeyForSigning(String legacyAcce String credentialsTypePropertyKey, String credentialsTypePropertyValueStatic, String accessKeyIdPropertyKey, String secretAccessKeyPropertyKey); + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 3, value = "HTTP request (before signing): %s") + void httpRequestBeforeSigning(HttpRequest request); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 4, value = "AWS request (before signing): %s") + void awsRequestBeforeSigning(SdkHttpFullRequest awsRequest); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 5, value = "AWS credentials: %s") + void awsCredentials(AwsCredentials credentials); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 6, value = "AWS request (after signing): %s") + void httpRequestAfterSigning(SignedRequest signedRequest); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 7, value = "HTTP request (after signing): %s") + void awsRequestAfterSigning(HttpRequest request); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 8, value = "AWS request signing is disabled.") + void signingDisabled(); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 9, + value = "AWS request signing is enabled [region = '%s', service = '%s', credentialsProvider = '%s'].") + void signingEnabled(Region region, String service, AwsCredentialsProvider credentialsProvider); } diff --git a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/impl/ElasticsearchBackendFactory.java b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/impl/ElasticsearchBackendFactory.java index 32000e22f84..b0371627a1a 100644 --- a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/impl/ElasticsearchBackendFactory.java +++ b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/impl/ElasticsearchBackendFactory.java @@ -92,10 +92,7 @@ public BackendImplementor create(EventContext eventContext, BackendBuildContext CLIENT_FACTORY.getAndMap( propertySource, beanResolver::resolve ); if ( customClientFactoryHolderOptional.isPresent() ) { clientFactoryHolder = customClientFactoryHolderOptional.get(); - CONFIGURATION_LOGGER.debugf( - "Elasticsearch backend will use client factory '%s'. Context: %s", - clientFactoryHolder, eventContext.render() - ); + CONFIGURATION_LOGGER.backendClientFactory( clientFactoryHolder, eventContext.render() ); } else { clientFactoryHolder = BeanHolder.of( new ElasticsearchClientFactoryImpl() ); diff --git a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/index/layout/impl/IndexNames.java b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/index/layout/impl/IndexNames.java index f4ec1693e00..ca145990aa9 100644 --- a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/index/layout/impl/IndexNames.java +++ b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/index/layout/impl/IndexNames.java @@ -15,7 +15,7 @@ public final class IndexNames { public static String normalizeName(String indexName) { String esIndexName = indexName.toLowerCase( Locale.ENGLISH ); if ( !esIndexName.equals( indexName ) ) { - MAPPING_LOGGER.debugf( "Normalizing index name from '%1$s' to '%2$s'", indexName, esIndexName ); + MAPPING_LOGGER.normalizeIndexName( indexName, esIndexName ); } return esIndexName; } diff --git a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ConfigurationLog.java b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ConfigurationLog.java index 4e4ff7583ac..e1f62e66b79 100644 --- a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ConfigurationLog.java +++ b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ConfigurationLog.java @@ -6,19 +6,21 @@ import static org.hibernate.search.backend.elasticsearch.logging.impl.ElasticsearchLog.ID_OFFSET; import static org.hibernate.search.backend.elasticsearch.logging.impl.ElasticsearchLog.ID_OFFSET_LEGACY_ES; +import static org.jboss.logging.Logger.Level.DEBUG; import java.lang.invoke.MethodHandles; import java.util.List; import java.util.Set; +import org.hibernate.search.engine.environment.bean.BeanHolder; import org.hibernate.search.util.common.SearchException; import org.hibernate.search.util.common.logging.CategorizedLogger; import org.hibernate.search.util.common.logging.impl.LoggerFactory; import org.hibernate.search.util.common.logging.impl.MessageConstants; import org.hibernate.search.util.common.reporting.EventContext; -import org.jboss.logging.BasicLogger; import org.jboss.logging.annotations.Cause; +import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; import org.jboss.logging.annotations.Param; @@ -30,7 +32,7 @@ """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface ConfigurationLog extends BasicLogger { +public interface ConfigurationLog { String CATEGORY_NAME = "org.hibernate.search.configuration"; ConfigurationLog CONFIGURATION_LOGGER = LoggerFactory.make( ConfigurationLog.class, CATEGORY_NAME, MethodHandles.lookup() ); @@ -114,4 +116,9 @@ public interface ConfigurationLog extends BasicLogger { value = "Invalid backend configuration: mapping requires single-tenancy" + " but multi-tenancy strategy is set.") SearchException multiTenancyNotRequiredButExplicitlyEnabledByTheBackend(); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 192, + value = "Elasticsearch backend will use client factory '%s'. Context: %s") + void backendClientFactory(BeanHolder clientFactoryHolder, String eventContext); } diff --git a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ElasticsearchLog.java b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ElasticsearchLog.java index 6e6455ab5fb..bd9664ba7f9 100644 --- a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ElasticsearchLog.java +++ b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/ElasticsearchLog.java @@ -41,6 +41,6 @@ public interface ElasticsearchLog * here to the next value. */ @LogMessage(level = TRACE) - @Message(id = ID_OFFSET + 191, value = "") + @Message(id = ID_OFFSET + 193, value = "") void nextLoggerIdForConvenience(); } diff --git a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/MappingLog.java b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/MappingLog.java index bdb32e54c55..c5db5b1a4d4 100644 --- a/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/MappingLog.java +++ b/backend/elasticsearch/src/main/java/org/hibernate/search/backend/elasticsearch/logging/impl/MappingLog.java @@ -5,6 +5,7 @@ package org.hibernate.search.backend.elasticsearch.logging.impl; import static org.hibernate.search.backend.elasticsearch.logging.impl.ElasticsearchLog.ID_OFFSET; +import static org.jboss.logging.Logger.Level.DEBUG; import java.lang.invoke.MethodHandles; import java.util.Set; @@ -20,9 +21,9 @@ import org.hibernate.search.util.common.logging.impl.MessageConstants; import org.hibernate.search.util.common.reporting.EventContext; -import org.jboss.logging.BasicLogger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.FormatWith; +import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; import org.jboss.logging.annotations.Param; @@ -34,7 +35,7 @@ """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface MappingLog extends BasicLogger { +public interface MappingLog { String CATEGORY_NAME = "org.hibernate.search.mapping"; MappingLog MAPPING_LOGGER = LoggerFactory.make( MappingLog.class, CATEGORY_NAME, MethodHandles.lookup() ); @@ -153,4 +154,8 @@ SearchException cannotGuessVectorFieldType(@FormatWith(ClassFormatter.class) Cla value = "The OpenSearch distribution does not allow using %1$s as a space type for a Lucene engine." + " Try using a different similarity type and refer to the OpenSearch documentation for more details.") SearchException vectorSimilarityNotSupportedByOpenSearchBackend(VectorSimilarity vectorSimilarity); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 191, value = "Normalizing index name from '%1$s' to '%2$s'") + void normalizeIndexName(String indexName, String esIndexName); } diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/impl/LuceneBackendFactory.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/impl/LuceneBackendFactory.java index 4526bf76ed6..be78a8df86d 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/impl/LuceneBackendFactory.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/impl/LuceneBackendFactory.java @@ -113,7 +113,7 @@ private Version getLuceneVersion(EventContext backendContext, ConfigurationPrope if ( luceneVersionOptional.isPresent() ) { luceneVersion = luceneVersionOptional.get(); if ( CONFIGURATION_LOGGER.isDebugEnabled() ) { - CONFIGURATION_LOGGER.debug( "Setting Lucene compatibility to Version " + luceneVersion ); + CONFIGURATION_LOGGER.luceneCompatibilityVersion( luceneVersion ); } } else { diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/ConfigurationLog.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/ConfigurationLog.java index d144a641dc0..746f54c6453 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/ConfigurationLog.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/ConfigurationLog.java @@ -21,6 +21,7 @@ import org.hibernate.search.util.common.reporting.EventContext; import org.jboss.logging.BasicLogger; +import org.jboss.logging.Logger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.FormatWith; import org.jboss.logging.annotations.LogMessage; @@ -124,4 +125,7 @@ void recommendConfiguringLuceneVersion(String key, Version latest, + " but multi-tenancy strategy is set.") SearchException multiTenancyNotRequiredButExplicitlyEnabledByTheBackend(); + @LogMessage(level = Logger.Level.DEBUG) + @Message(id = ID_OFFSET + 188, value = "Setting Lucene compatibility to Version %s") + void luceneCompatibilityVersion(Version luceneVersion); } diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneLog.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneLog.java index 9eab96528e4..58eedec979c 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneLog.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneLog.java @@ -63,6 +63,6 @@ public interface LuceneLog * here to the next value. */ @LogMessage(level = TRACE) - @Message(id = ID_OFFSET + 188, value = "") + @Message(id = ID_OFFSET + 194, value = "") void nextLoggerIdForConvenience(); } diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneSpecificLog.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneSpecificLog.java index 1f755d19ed0..83cc23cbeb8 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneSpecificLog.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/logging/impl/LuceneSpecificLog.java @@ -6,12 +6,15 @@ import static org.hibernate.search.backend.lucene.logging.impl.LuceneLog.ID_OFFSET; import static org.hibernate.search.backend.lucene.logging.impl.LuceneLog.ID_OFFSET_LEGACY_ENGINE; +import static org.jboss.logging.Logger.Level.DEBUG; import static org.jboss.logging.Logger.Level.INFO; +import static org.jboss.logging.Logger.Level.TRACE; import static org.jboss.logging.Logger.Level.WARN; import java.lang.invoke.MethodHandles; import java.nio.file.Path; +import org.hibernate.search.backend.lucene.lowlevel.reader.impl.HibernateSearchMultiReader; import org.hibernate.search.util.common.SearchException; import org.hibernate.search.util.common.logging.CategorizedLogger; import org.hibernate.search.util.common.logging.impl.EventContextFormatter; @@ -137,4 +140,24 @@ SearchException uncommittedOperationsBecauseOfFailure(String causeMessage, @Message(id = ID_OFFSET + 187, value = "Unable to refresh an index reader: %1$s") SearchException unableToRefresh(String causeMessage, @Param EventContext context, @Cause Exception cause); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 189, value = "Closing MultiReader: %s") + void closingMultiReader(HibernateSearchMultiReader hibernateSearchMultiReader); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 190, value = "MultiReader closed: %s") + void closedMultiReader(HibernateSearchMultiReader hibernateSearchMultiReader); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 191, value = "IndexWriter closed") + void closedIndexWriter(); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 192, value = "IndexWriter opened") + void openedIndexWriter(); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 193, value = "Set index writer parameter %s to value : %s. %s") + void indexWriterSetParameter(String settingName, Object value, String context); } diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/reader/impl/HibernateSearchMultiReader.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/reader/impl/HibernateSearchMultiReader.java index f9da201fdd9..c6635350fc2 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/reader/impl/HibernateSearchMultiReader.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/reader/impl/HibernateSearchMultiReader.java @@ -76,7 +76,7 @@ public IndexReaderMetadataResolver getMetadataResolver() { protected synchronized void doClose() throws IOException { final boolean traceEnabled = LUCENE_SPECIFIC_LOGGER.isTraceEnabled(); if ( traceEnabled ) { - LUCENE_SPECIFIC_LOGGER.tracef( "Closing MultiReader: %s", this ); + LUCENE_SPECIFIC_LOGGER.closingMultiReader( this ); } try ( Closer closer = new Closer<>() ) { /* @@ -87,7 +87,7 @@ protected synchronized void doClose() throws IOException { closer.pushAll( DirectoryReader::decRef, directoryReaders ); } if ( traceEnabled ) { - LUCENE_SPECIFIC_LOGGER.trace( "MultiReader closed." ); + LUCENE_SPECIFIC_LOGGER.closedMultiReader( this ); } } diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterDelegatorImpl.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterDelegatorImpl.java index e4cf11f0d83..8aa4ec1bbe3 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterDelegatorImpl.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterDelegatorImpl.java @@ -150,7 +150,7 @@ void close() throws IOException { finally { commitLock.unlock(); } - LUCENE_SPECIFIC_LOGGER.trace( "IndexWriter closed" ); + LUCENE_SPECIFIC_LOGGER.closedIndexWriter(); } } diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterProvider.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterProvider.java index 529ea797bb9..d727b76b74d 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterProvider.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterProvider.java @@ -123,7 +123,7 @@ public IndexWriterDelegatorImpl getOrCreate() throws IOException { failureHandler, this::clearAfterFailure ); - LUCENE_SPECIFIC_LOGGER.trace( "IndexWriter opened" ); + LUCENE_SPECIFIC_LOGGER.openedIndexWriter(); currentWriter.set( indexWriterDelegator ); } } diff --git a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterSettings.java b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterSettings.java index 4a11767fb1e..03b17981044 100644 --- a/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterSettings.java +++ b/backend/lucene/src/main/java/org/hibernate/search/backend/lucene/lowlevel/writer/impl/IndexWriterSettings.java @@ -137,8 +137,7 @@ private IndexWriterSettingValue createValueOrNull(T value, EventContext event return null; } if ( LUCENE_SPECIFIC_LOGGER.isDebugEnabled() ) { - LUCENE_SPECIFIC_LOGGER.debugf( "Set index writer parameter %s to value : %s. %s", - settingName, value, eventContext.renderWithPrefix() ); + LUCENE_SPECIFIC_LOGGER.indexWriterSetParameter( settingName, value, eventContext.renderWithPrefix() ); } R processedValue = processor.apply( value ); return new IndexWriterSettingValue<>( settingName, processedValue, diff --git a/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/BatchingExecutor.java b/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/BatchingExecutor.java index b492894899b..efa9e26e747 100644 --- a/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/BatchingExecutor.java +++ b/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/BatchingExecutor.java @@ -176,7 +176,7 @@ public CompletableFuture work() { int workCount = workBuffer.size(); boolean traceEnabled = EXECUTOR_LOGGER.isTraceEnabled(); if ( traceEnabled ) { - EXECUTOR_LOGGER.tracef( "Processing %d works in executor '%s'", workCount, name ); + EXECUTOR_LOGGER.numberOfWorksInExecutor( workCount, name ); } processor.beginBatch(); @@ -193,7 +193,7 @@ public CompletableFuture work() { CompletableFuture future = processor.endBatch(); if ( traceEnabled ) { future.whenComplete( (result, throwable) -> { - EXECUTOR_LOGGER.tracef( "Processed %d works in executor '%s'", workCount, name ); + EXECUTOR_LOGGER.numberOfProcessedWorksInExecutor( workCount, name ); } ); } diff --git a/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/SingletonTask.java b/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/SingletonTask.java index 0ce0b9ddf9f..c234d8ace7d 100644 --- a/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/SingletonTask.java +++ b/engine/src/main/java/org/hibernate/search/engine/backend/orchestration/spi/SingletonTask.java @@ -68,7 +68,7 @@ public void ensureScheduled() { return; } - EXECUTOR_LOGGER.tracef( "Scheduling task '%s'.", name ); + EXECUTOR_LOGGER.schedulingTask( name ); /* * Our thread successfully switched the status: @@ -191,7 +191,7 @@ public void run() { needsRun = false; nextExecutionFuture = null; try { - EXECUTOR_LOGGER.tracef( "Running task '%s'", name ); + EXECUTOR_LOGGER.runningTask( name ); worker.work().handle( workFinishedHandler ); } catch (Throwable e) { @@ -220,7 +220,7 @@ private void afterRun() { // First, tell the worker that we're done. try { - EXECUTOR_LOGGER.tracef( "Completed task '%s'", name ); + EXECUTOR_LOGGER.completedTask( name ); worker.complete(); } catch (Throwable e) { diff --git a/engine/src/main/java/org/hibernate/search/engine/logging/impl/EngineLog.java b/engine/src/main/java/org/hibernate/search/engine/logging/impl/EngineLog.java index a28b58dc927..e886a51a9f2 100644 --- a/engine/src/main/java/org/hibernate/search/engine/logging/impl/EngineLog.java +++ b/engine/src/main/java/org/hibernate/search/engine/logging/impl/EngineLog.java @@ -45,6 +45,6 @@ public interface EngineLog * here to the next value. */ @LogMessage(level = TRACE) - @Message(id = ID_OFFSET + 130, value = "") + @Message(id = ID_OFFSET + 135, value = "") void nextLoggerIdForConvenience(); } diff --git a/engine/src/main/java/org/hibernate/search/engine/logging/impl/ExecutorLog.java b/engine/src/main/java/org/hibernate/search/engine/logging/impl/ExecutorLog.java index 538f20d195f..306c6051d26 100644 --- a/engine/src/main/java/org/hibernate/search/engine/logging/impl/ExecutorLog.java +++ b/engine/src/main/java/org/hibernate/search/engine/logging/impl/ExecutorLog.java @@ -7,6 +7,7 @@ import static org.hibernate.search.engine.logging.impl.EngineLog.ID_OFFSET; import static org.hibernate.search.engine.logging.impl.EngineLog.ID_OFFSET_LEGACY; import static org.jboss.logging.Logger.Level.DEBUG; +import static org.jboss.logging.Logger.Level.TRACE; import java.lang.invoke.MethodHandles; @@ -59,4 +60,24 @@ public interface ExecutorLog extends BasicLogger { + " The work has been discarded.") SearchException submittedWorkToStoppedOrchestrator(String orchestratorName); + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 130, value = "Processing %d works in executor '%s'") + void numberOfWorksInExecutor(int workCount, String name); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 131, value = "Processed %d works in executor '%s'") + void numberOfProcessedWorksInExecutor(int workCount, String name); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 132, value = "Scheduling task '%s'.") + void schedulingTask(String name); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 133, value = "Running task '%s'") + void runningTask(String name); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 134, value = "Completed task '%s'") + void completedTask(String name); + } diff --git a/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/logging/impl/JakartaBatchLog.java b/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/logging/impl/JakartaBatchLog.java index b7a975f7b58..9f821fe754a 100644 --- a/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/logging/impl/JakartaBatchLog.java +++ b/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/logging/impl/JakartaBatchLog.java @@ -13,7 +13,6 @@ import org.hibernate.search.util.common.logging.impl.LoggerFactory; import org.hibernate.search.util.common.logging.impl.MessageConstants; -import org.jboss.logging.BasicLogger; import org.jboss.logging.Logger; import org.jboss.logging.Logger.Level; import org.jboss.logging.annotations.Cause; @@ -39,7 +38,7 @@ @ValidIdRange(min = MessageConstants.JAKARTA_BATCH_CORE_ID_RANGE_MIN, max = MessageConstants.JAKARTA_BATCH_CORE_ID_RANGE_MAX), }) -public interface JakartaBatchLog extends BasicLogger { +public interface JakartaBatchLog { String CATEGORY_NAME = "org.hibernate.search.mapper.jakarta.batch"; @@ -199,4 +198,8 @@ SearchException unableToParseJobParameter(String parameterName, Object parameter value = "Both \"dropAndCreateSchemaOnStart\" and \"purgeAllOnStart\" are enabled. " + "Consider having just one setting enabled as after the index is recreated there is nothing to purge.") void redundantPurgeAfterDrop(); + + @LogMessage(level = Level.DEBUG) + @Message(id = ID_OFFSET + 38, value = "Partitions: %s") + void listPartitions(String partitions); } diff --git a/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/massindexing/step/impl/HibernateSearchPartitionMapper.java b/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/massindexing/step/impl/HibernateSearchPartitionMapper.java index d20cad737be..5511f19954e 100644 --- a/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/massindexing/step/impl/HibernateSearchPartitionMapper.java +++ b/mapper/orm-jakarta-batch/core/src/main/java/org/hibernate/search/jakarta/batch/core/massindexing/step/impl/HibernateSearchPartitionMapper.java @@ -162,7 +162,7 @@ public PartitionPlan mapPartitions() throws Exception { ); } - JAKARTA_BATCH_LOGGER.debugf( "Partitions: %s", Arrays.toString( props ) ); + JAKARTA_BATCH_LOGGER.listPartitions( Arrays.toString( props ) ); PartitionPlan partitionPlan = new PartitionPlanImpl(); partitionPlan.setPartitionProperties( props ); diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/cluster/impl/AgentPersister.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/cluster/impl/AgentPersister.java index 2b04142862d..2da6bfc0e69 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/cluster/impl/AgentPersister.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/cluster/impl/AgentPersister.java @@ -59,7 +59,7 @@ public void createSelf(AgentRepository agentRepository, Instant expiration) { Agent self = new Agent( type, name, expiration, AgentState.SUSPENDED, staticShardAssignment ); agentRepository.create( self ); selfReference = self.getReference(); - OUTBOX_POLLING_EVENTS_LOGGER.infof( "Agent '%s': registering", selfReference ); + OUTBOX_POLLING_EVENTS_LOGGER.agentRegistering( selfReference ); } public void leaveCluster(AgentRepository store) { @@ -67,7 +67,7 @@ public void leaveCluster(AgentRepository store) { // We never even joined the cluster return; } - OUTBOX_POLLING_EVENTS_LOGGER.infof( "Agent '%s': leaving cluster", selfReference ); + OUTBOX_POLLING_EVENTS_LOGGER.agentLeaving( selfReference ); Agent agent = store.find( selfReference.id ); if ( agent != null ) { store.delete( Collections.singletonList( agent ) ); @@ -76,7 +76,7 @@ public void leaveCluster(AgentRepository store) { public void setSuspended(Agent self) { if ( self.getState() != AgentState.SUSPENDED ) { - OUTBOX_POLLING_EVENTS_LOGGER.infof( "Agent '%s': suspending", selfReference ); + OUTBOX_POLLING_EVENTS_LOGGER.agentSuspending( selfReference ); self.setState( AgentState.SUSPENDED ); } if ( staticShardAssignment == null ) { @@ -88,8 +88,7 @@ public void setSuspended(Agent self) { public void setWaiting(Agent self, ClusterDescriptor clusterDescriptor, ShardAssignmentDescriptor shardAssignment) { if ( self.getState() != AgentState.WAITING ) { - OUTBOX_POLLING_EVENTS_LOGGER.infof( "Agent '%s': waiting for cluster changes. Shard assignment: %s. Cluster: %s", - selfReference, shardAssignment, clusterDescriptor ); + OUTBOX_POLLING_EVENTS_LOGGER.agentWaiting( selfReference, shardAssignment, clusterDescriptor ); self.setState( AgentState.WAITING ); } if ( staticShardAssignment == null && shardAssignment != null ) { @@ -100,8 +99,7 @@ public void setWaiting(Agent self, ClusterDescriptor clusterDescriptor, public void setRunning(Agent self, ClusterDescriptor clusterDescriptor) { if ( self.getState() != AgentState.RUNNING ) { - OUTBOX_POLLING_EVENTS_LOGGER.infof( "Agent '%s': running. Shard assignment: %s. Cluster: %s", - selfReference, self.getShardAssignment(), clusterDescriptor ); + OUTBOX_POLLING_EVENTS_LOGGER.agentRunning( selfReference, self.getShardAssignment(), clusterDescriptor ); self.setState( AgentState.RUNNING ); } } diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/AbstractAgentClusterLink.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/AbstractAgentClusterLink.java index d7a9a5d6f39..58e7b9120d3 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/AbstractAgentClusterLink.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/AbstractAgentClusterLink.java @@ -63,8 +63,7 @@ public final R pulse(AgentClusterLinkContext context) { List allAgentsInIdOrder = context.agentRepository().findAllOrderById(); Instant now = clock.instant(); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( "Agent '%s': starting pulse at %s with self = %s, all agents = %s", - selfReference(), now, self, allAgentsInIdOrder ); + OUTBOX_POLLING_EVENTS_LOGGER.agentPulseStarting( selfReference(), now, self, allAgentsInIdOrder ); // In order to avoid transaction deadlocks with some RDBMS (and this time I mean Oracle), // we make sure that if we need to delete expired agents, @@ -84,8 +83,7 @@ public final R pulse(AgentClusterLinkContext context) { if ( !timedOutAgents.isEmpty() ) { OUTBOX_POLLING_EVENTS_LOGGER.removingTimedOutAgents( selfReference(), timedOutAgents ); context.agentRepository().delete( timedOutAgents ); - OUTBOX_POLLING_EVENTS_LOGGER.infof( "Agent '%s': reassessing the new situation in the next pulse", - selfReference() ); + OUTBOX_POLLING_EVENTS_LOGGER.agentReassessing( selfReference() ); return instructCommitAndRetryPulseAfterDelay( now, pollingInterval ); } @@ -103,8 +101,7 @@ public final R pulse(AgentClusterLinkContext context) { // Delay expiration with each write self.setExpiration( now.plus( pulseExpiration ) ); R instructions = pulseResult.applyAndReturnInstructions( now, self, agentPersister ); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( "Agent '%s': ending pulse at %s with self = %s", - selfReference(), now, self ); + OUTBOX_POLLING_EVENTS_LOGGER.agentPulseEnded( selfReference(), now, self ); return instructions; } diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessor.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessor.java index 658d3f853ab..fb705ba0a09 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessor.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessor.java @@ -299,8 +299,7 @@ public CompletableFuture work() { // See the Scheduler class below. ensureScheduled(); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( "Processing %d outbox events for '%s': '%s'", events.size(), name, - events ); + OUTBOX_POLLING_EVENTS_LOGGER.processingOutboxEvents( events.size(), name, events ); // Process the events eventProcessing.processEvents( events ); diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessorClusterLink.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessorClusterLink.java index 69fa0504db5..1a0205428d1 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessorClusterLink.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingEventProcessorClusterLink.java @@ -25,8 +25,6 @@ import org.hibernate.search.util.common.SearchException; import org.hibernate.search.util.common.spi.ToStringTreeAppender; -import org.jboss.logging.Logger; - public final class OutboxPollingEventProcessorClusterLink extends AbstractAgentClusterLink { @@ -73,10 +71,12 @@ public void appendTo(ToStringTreeAppender appender) { protected WriteAction doPulse(List allAgentsInIdOrder, Agent currentSelf) { for ( Agent agent : allAgentsInIdOrder ) { if ( AgentType.MASS_INDEXING.equals( agent.getType() ) ) { - OUTBOX_POLLING_EVENTS_LOGGER.logf( - currentSelf.getState() != AgentState.SUSPENDED ? Logger.Level.INFO : Logger.Level.TRACE, - "Agent '%s': another agent '%s' is currently mass indexing", - selfReference(), agent ); + if ( currentSelf.getState() != AgentState.SUSPENDED ) { + OUTBOX_POLLING_EVENTS_LOGGER.agentOtherAgentIsIndexingInfo( selfReference(), agent ); + } + else { + OUTBOX_POLLING_EVENTS_LOGGER.agentOtherAgentIsIndexingTrace( selfReference(), agent ); + } return (now, self, agentPersister) -> { agentPersister.setSuspended( self ); return instructCommitAndRetryPulseAfterDelay( now, pulseInterval ); @@ -107,13 +107,13 @@ protected WriteAction doPulse(List shardAssignmentOptional = ShardAssignmentDescriptor.fromClusterMemberList( clusterTarget.descriptor.memberIdsInShardOrder, selfReference().id ); - if ( !shardAssignmentOptional.isPresent() ) { - OUTBOX_POLLING_EVENTS_LOGGER.logf( - currentSelf.getState() != AgentState.SUSPENDED ? Logger.Level.INFO : Logger.Level.TRACE, - "Agent '%s': this agent is superfluous and will not perform event processing," - + " because other agents are enough to handle all the shards." - + " Target cluster: %s.", - selfReference(), clusterTarget.descriptor ); + if ( shardAssignmentOptional.isEmpty() ) { + if ( currentSelf.getState() != AgentState.SUSPENDED ) { + OUTBOX_POLLING_EVENTS_LOGGER.agentSuperfluousInfo( selfReference(), clusterTarget.descriptor ); + } + else { + OUTBOX_POLLING_EVENTS_LOGGER.agentSuperfluousTrace( selfReference(), clusterTarget.descriptor ); + } return (now, self, agentPersister) -> { agentPersister.setSuspended( self ); return instructCommitAndRetryPulseAfterDelay( now, pulseInterval ); @@ -123,11 +123,12 @@ protected WriteAction doPulse(List { agentPersister.setSuspended( self ); return instructCommitAndRetryPulseAfterDelay( now, pollingInterval ); @@ -137,9 +138,7 @@ protected WriteAction doPulse(List { @@ -182,7 +181,7 @@ protected WriteAction doPulse(List { @@ -199,14 +198,13 @@ private boolean excludedAgentsAreOutOfCluster(List excludedAgents) { AgentState expectedState = AgentState.SUSPENDED; for ( Agent agent : excludedAgents ) { if ( !expectedState.equals( agent.getState() ) ) { - OUTBOX_POLLING_EVENTS_LOGGER.tracef( "Agent '%s': waiting for agent '%s', which has not reached state '%s' yet", - selfReference(), agent.getReference(), expectedState ); + OUTBOX_POLLING_EVENTS_LOGGER.agentWaitingAgentReachState( selfReference(), agent.getReference(), + expectedState ); return false; } } - OUTBOX_POLLING_EVENTS_LOGGER.tracef( "Agent '%s': agents excluded from the cluster reached the expected state %s", - selfReference(), expectedState ); + OUTBOX_POLLING_EVENTS_LOGGER.agentExcluded( selfReference(), expectedState ); return true; } @@ -218,48 +216,40 @@ private boolean clusterMembersAreInCluster(List clusterMembersInShardOrde for ( Agent agent : clusterMembersInShardOrder ) { AgentState state = agent.getState(); if ( !expectedStates.contains( agent.getState() ) ) { - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': waiting for agent '%s', whose state %s is not in the expected %s yet", - selfReference(), agent.getReference(), state, expectedStates ); + OUTBOX_POLLING_EVENTS_LOGGER.clusterMembersAreInClusterWaitingForState( selfReference(), agent.getReference(), + state, expectedStates ); return false; } Integer totalShardCount = agent.getTotalShardCount(); if ( totalShardCount == null || expectedTotalShardCount != totalShardCount ) { - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': waiting for agent '%s', whose total shard count %s is not the expected %s yet", - selfReference(), agent.getReference(), totalShardCount, expectedTotalShardCount ); + OUTBOX_POLLING_EVENTS_LOGGER.clusterMembersAreInClusterShardCountExpectation( selfReference(), + agent.getReference(), totalShardCount, expectedTotalShardCount ); return false; } Integer assignedShardIndex = agent.getAssignedShardIndex(); if ( assignedShardIndex == null || expectedAssignedShardIndex != assignedShardIndex ) { - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': waiting for agent '%s', whose assigned shard index %s is not the expected %s yet", - selfReference(), agent.getReference(), assignedShardIndex, expectedAssignedShardIndex ); + OUTBOX_POLLING_EVENTS_LOGGER.clusterMembersAreInClusterSharIndexExpectation( selfReference(), + agent.getReference(), assignedShardIndex, expectedAssignedShardIndex ); return false; } ++expectedAssignedShardIndex; } - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': all cluster members reached the expected states %s and shard assignment %s", - selfReference(), expectedStates, clusterDescriptor ); + OUTBOX_POLLING_EVENTS_LOGGER.clusterMembersAreInClusterReachedExpectedStates( selfReference(), expectedStates, + clusterDescriptor ); return true; } @Override protected OutboxPollingEventProcessingInstructions instructCommitAndRetryPulseAfterDelay(Instant now, Duration delay) { Instant expiration = now.plus( delay ); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': instructions are to not process events and to retry a pulse in %s, around %s", - selfReference(), delay, expiration ); + OUTBOX_POLLING_EVENTS_LOGGER.instructCommitAndRetryPulseAfterDelay( selfReference(), delay, expiration ); return new OutboxPollingEventProcessingInstructions( clock, expiration, Optional.empty() ); } private OutboxPollingEventProcessingInstructions instructProceedWithEventProcessing(Instant now) { Instant expiration = now.plus( pulseInterval ); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': instructions are to process events and to retry a pulse in %s, around %s", - selfReference(), pulseInterval, expiration ); + OUTBOX_POLLING_EVENTS_LOGGER.instructProceedWithEventProcessing( selfReference(), pulseInterval, expiration ); return new OutboxPollingEventProcessingInstructions( clock, expiration, Optional.of( lastShardAssignment.eventFinder ) ); } diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingMassIndexerAgentClusterLink.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingMassIndexerAgentClusterLink.java index 320bafd2a82..f213d28d77e 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingMassIndexerAgentClusterLink.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingMassIndexerAgentClusterLink.java @@ -77,32 +77,26 @@ private boolean eventProcessorsAreSuspended(List eventProcessors) { AgentState expectedState = AgentState.SUSPENDED; for ( Agent eventProcessor : eventProcessors ) { if ( !expectedState.equals( eventProcessor.getState() ) ) { - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': waiting for event processor '%s', which has not reached state '%s' yet", - selfReference(), eventProcessor.getReference(), expectedState ); + OUTBOX_POLLING_EVENTS_LOGGER.agentWaitingForEvents( selfReference(), eventProcessor.getReference(), + expectedState ); return false; } } - OUTBOX_POLLING_EVENTS_LOGGER.tracef( "Agent '%s': all event processors reached the expected state %s", - selfReference(), expectedState ); + OUTBOX_POLLING_EVENTS_LOGGER.agentProcessorsExpired( selfReference(), expectedState ); return true; } @Override protected OutboxPollingMassIndexingInstructions instructCommitAndRetryPulseAfterDelay(Instant now, Duration delay) { Instant expiration = now.plus( delay ); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': instructions are to hold off mass indexing and to retry a pulse in %s, around %s", - selfReference(), delay, expiration ); + OUTBOX_POLLING_EVENTS_LOGGER.agentHoldMassIndexing( selfReference(), delay, expiration ); return new OutboxPollingMassIndexingInstructions( clock, expiration, false ); } private OutboxPollingMassIndexingInstructions instructProceedWithMassIndexing(Instant now) { Instant expiration = now.plus( pulseInterval ); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( - "Agent '%s': instructions are to proceed with mass indexing and to retry a pulse in %s, around %s", - selfReference(), pulseInterval, expiration ); + OUTBOX_POLLING_EVENTS_LOGGER.agentProceedMassIndexing( selfReference(), pulseInterval, expiration ); return new OutboxPollingMassIndexingInstructions( clock, expiration, true ); } diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingOutboxEventSendingPlan.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingOutboxEventSendingPlan.java index 5acc8b95153..eb970a22d14 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingOutboxEventSendingPlan.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/event/impl/OutboxPollingOutboxEventSendingPlan.java @@ -89,7 +89,7 @@ private CompletableFuture sendAndReportOnSe } } currentSession.flush(); - OUTBOX_POLLING_EVENTS_LOGGER.tracef( "Persisted %d outbox events: '%s'", events.size(), events ); + OUTBOX_POLLING_EVENTS_LOGGER.eventPlanNumberOfPersistedEvents( events.size(), events ); return CompletableFuture.completedFuture( builder.build() ); } finally { diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingEventsLog.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingEventsLog.java index 39ef9130ff1..e93d5e9dc96 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingEventsLog.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingEventsLog.java @@ -7,11 +7,15 @@ import static org.hibernate.search.mapper.orm.outboxpolling.logging.impl.OutboxPollingLog.ID_OFFSET; import static org.jboss.logging.Logger.Level.DEBUG; +import static org.jboss.logging.Logger.Level.INFO; +import static org.jboss.logging.Logger.Level.TRACE; import static org.jboss.logging.Logger.Level.WARN; import java.lang.invoke.MethodHandles; +import java.time.Duration; import java.time.Instant; import java.util.List; +import java.util.Set; import java.util.UUID; import jakarta.persistence.PersistenceException; @@ -19,6 +23,10 @@ import org.hibernate.search.engine.logging.impl.EngineLog; import org.hibernate.search.mapper.orm.outboxpolling.cluster.impl.Agent; import org.hibernate.search.mapper.orm.outboxpolling.cluster.impl.AgentReference; +import org.hibernate.search.mapper.orm.outboxpolling.cluster.impl.AgentState; +import org.hibernate.search.mapper.orm.outboxpolling.cluster.impl.ClusterDescriptor; +import org.hibernate.search.mapper.orm.outboxpolling.cluster.impl.ShardAssignmentDescriptor; +import org.hibernate.search.mapper.orm.outboxpolling.event.impl.OutboxEvent; import org.hibernate.search.util.common.SearchException; import org.hibernate.search.util.common.logging.CategorizedLogger; import org.hibernate.search.util.common.logging.impl.LoggerFactory; @@ -26,7 +34,6 @@ import org.hibernate.search.util.common.logging.impl.ToStringTreeMultilineFormatter; import org.hibernate.search.util.common.spi.ToStringTreeAppendable; -import org.jboss.logging.BasicLogger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.FormatWith; import org.jboss.logging.annotations.LogMessage; @@ -43,7 +50,7 @@ """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface OutboxPollingEventsLog extends BasicLogger { +public interface OutboxPollingEventsLog { String CATEGORY_NAME = "org.hibernate.search.mapper.outboxpolling"; @@ -119,4 +126,148 @@ void startingOutboxMassIndexerAgent(String name, // If you update this message, make sure to also update OutboxPollingAutomaticIndexingConcurrencyIT. value = "'%1$s' failed to retrieve events to process due to a locking failure; will try again later.") void eventProcessorFindEventsUnableToLock(String name, @Cause PersistenceException lockException); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 38, value = "Agent '%s': registering.") + void agentRegistering(AgentReference agentReference); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 39, value = "Agent '%s': leaving cluster.") + void agentLeaving(AgentReference agentReference); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 40, value = "Agent '%s': suspending.") + void agentSuspending(AgentReference agentReference); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 41, value = "Agent '%s': waiting for cluster changes. Shard assignment: %s. Cluster: %s") + void agentWaiting(AgentReference agentReference, ShardAssignmentDescriptor shardAssignment, + ClusterDescriptor clusterDescriptor); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 42, value = "Agent '%s': running. Shard assignment: %s. Cluster: %s") + void agentRunning(AgentReference agentReference, ShardAssignmentDescriptor shardAssignment, + ClusterDescriptor clusterDescriptor); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 43, value = "Agent '%s': starting pulse at %s with self = %s, all agents = %s") + void agentPulseStarting(AgentReference agentReference, Instant now, Agent self, List allAgentsInIdOrder); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 44, value = "Agent '%s': ending pulse at %s with self = %s") + void agentPulseEnded(AgentReference agentReference, Instant now, Agent self); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 45, value = "Agent '%s': reassessing the new situation in the next pulse") + void agentReassessing(AgentReference agentReference); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 46, value = "Processing %d outbox events for '%s': '%s'") + void processingOutboxEvents(int size, String name, List events); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 47, value = "Agent '%s': waiting for event processor '%s', which has not reached state '%s' yet") + void agentWaitingForEvents(AgentReference agentReference, AgentReference reference, AgentState expectedState); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 48, value = "Agent '%s': all event processors reached the expected state %s") + void agentProcessorsExpired(AgentReference agentReference, AgentState expectedState); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 49, + value = "Agent '%s': instructions are to hold off mass indexing and to retry a pulse in %s, around %s") + void agentHoldMassIndexing(AgentReference agentReference, Duration delay, Instant expiration); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 50, + value = "Agent '%s': instructions are to proceed with mass indexing and to retry a pulse in %s, around %s") + void agentProceedMassIndexing(AgentReference agentReference, Duration pulseInterval, Instant expiration); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 51, value = "Agent '%s': the persisted shard assignment (%s) does not match the target." + + " Target assignment: %s." + + " Cluster: %s.") + void agentAssignmentDoesNotMatchTarget(AgentReference agentReference, ShardAssignmentDescriptor persistedShardAssignment, + ShardAssignmentDescriptor targetShardAssignment, ClusterDescriptor descriptor); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 52, value = "Agent '%s': assigning to %s") + void agentAssignment(AgentReference agentReference, ShardAssignmentDescriptor targetShardAssignment); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 53, value = "Agent '%s': waiting for agent '%s', which has not reached state '%s' yet") + void agentWaitingAgentReachState(AgentReference agentReference, AgentReference reference, AgentState expectedState); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 54, value = "Agent '%s': agents excluded from the cluster reached the expected state %s") + void agentExcluded(AgentReference agentReference, AgentState expectedState); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 55, + value = "Agent '%s': instructions are to not process events and to retry a pulse in %s, around %s") + void instructCommitAndRetryPulseAfterDelay(AgentReference agentReference, Duration delay, Instant expiration); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 56, + value = "Agent '%s': instructions are to process events and to retry a pulse in %s, around %s") + void instructProceedWithEventProcessing(AgentReference agentReference, Duration pulseInterval, Instant expiration); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 57, value = "Agent '%s': waiting for agent '%s', whose state %s is not in the expected %s yet") + void clusterMembersAreInClusterWaitingForState(AgentReference agentReference, AgentReference reference, AgentState state, + Set expectedStates); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 58, + value = "Agent '%s': waiting for agent '%s', whose total shard count %s is not the expected %s yet") + void clusterMembersAreInClusterShardCountExpectation(AgentReference agentReference, AgentReference reference, + Integer totalShardCount, int expectedTotalShardCount); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 59, + value = "Agent '%s': waiting for agent '%s', whose assigned shard index %s is not the expected %s yet") + void clusterMembersAreInClusterSharIndexExpectation(AgentReference agentReference, AgentReference reference, + Integer assignedShardIndex, int expectedAssignedShardIndex); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 60, + value = "Agent '%s': all cluster members reached the expected states %s and shard assignment %s") + void clusterMembersAreInClusterReachedExpectedStates(AgentReference agentReference, Set expectedStates, + ClusterDescriptor clusterDescriptor); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 61, + value = "Agent '%s': some cluster members are missing; this agent will wait until they are present." + + " Target cluster: %s.") + void agentClusterMembersMissingInfo(AgentReference agentReference, ClusterDescriptor descriptor); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 62, + value = "Agent '%s': some cluster members are missing; this agent will wait until they are present." + + " Target cluster: %s.") + void agentClusterMembersMissingTrace(AgentReference agentReference, ClusterDescriptor descriptor); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 63, value = "Agent '%s': this agent is superfluous and will not perform event processing," + + " because other agents are enough to handle all the shards." + + " Target cluster: %s.") + void agentSuperfluousInfo(AgentReference agentReference, ClusterDescriptor descriptor); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 64, value = "Agent '%s': this agent is superfluous and will not perform event processing," + + " because other agents are enough to handle all the shards." + + " Target cluster: %s.") + void agentSuperfluousTrace(AgentReference agentReference, ClusterDescriptor descriptor); + + @LogMessage(level = INFO) + @Message(id = ID_OFFSET + 65, value = "Agent '%s': another agent '%s' is currently mass indexing") + void agentOtherAgentIsIndexingInfo(AgentReference agentReference, Agent agent); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 66, value = "Agent '%s': another agent '%s' is currently mass indexing") + void agentOtherAgentIsIndexingTrace(AgentReference agentReference, Agent agent); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 67, value = "Persisted %d outbox events: '%s'") + void eventPlanNumberOfPersistedEvents(int size, List events); } diff --git a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingLog.java b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingLog.java index 96865d54e4b..6f77bd37e4d 100644 --- a/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingLog.java +++ b/mapper/orm-outbox-polling/src/main/java/org/hibernate/search/mapper/orm/outboxpolling/logging/impl/OutboxPollingLog.java @@ -30,6 +30,6 @@ public interface OutboxPollingLog extends ConfigurationLog, DeprecationLog, Outb * here to the next value. */ @LogMessage(level = TRACE) - @Message(id = ID_OFFSET + 38, value = "") + @Message(id = ID_OFFSET + 68, value = "") void nextLoggerIdForConvenience(); } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateOrmBeanContainerBeanProvider.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateOrmBeanContainerBeanProvider.java index 99569b0d132..f8f8b11637f 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateOrmBeanContainerBeanProvider.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateOrmBeanContainerBeanProvider.java @@ -72,7 +72,7 @@ public BeanHolder forType(Class typeReference) { } catch (Exception e) { new SuppressingCloser( e ).push( result ); - CONFIGURATION_LOGGER.debugf( e, "Error resolving bean of type [%s] - using fallback", typeReference ); + CONFIGURATION_LOGGER.errorResolvingBean( typeReference, e ); try { result = BeanHolder.of( fallbackInstanceProducer.produceBeanInstance( typeReference ) ); } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateSearchPreIntegrationService.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateSearchPreIntegrationService.java index c9ca562f2dc..c5c23b408a7 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateSearchPreIntegrationService.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/bootstrap/impl/HibernateSearchPreIntegrationService.java @@ -110,7 +110,7 @@ public HibernateSearchPreIntegrationService initiateService(Map configurationVal Optional partialBuildState = HibernateOrmIntegrationPartialBuildState.get( propertySource ); if ( !enabled ) { - CONFIGURATION_LOGGER.debug( "Hibernate Search is disabled through configuration properties." ); + CONFIGURATION_LOGGER.hibernateSearchDisabled(); // The partial build state won't get used. partialBuildState.ifPresent( HibernateOrmIntegrationPartialBuildState::closeOnFailure ); // Hibernate Search will not boot. diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/common/spi/TransactionHelper.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/common/spi/TransactionHelper.java index 088747cdcb6..96abba60330 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/common/spi/TransactionHelper.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/common/spi/TransactionHelper.java @@ -131,18 +131,17 @@ private static boolean shouldUseJta(TransactionManager transactionManager, TransactionCoordinatorBuilder transactionCoordinatorBuilder) { if ( !transactionCoordinatorBuilder.isJta() ) { //Today we only require a TransactionManager on JTA based transaction factories - ORM_SPECIFIC_LOGGER - .trace( "TransactionFactory does not require a TransactionManager: don't wrap in a JTA transaction" ); + ORM_SPECIFIC_LOGGER.transactionManagerNotRequired(); return false; } if ( transactionManager == null ) { //no TM, nothing to do OR configuration mistake - ORM_SPECIFIC_LOGGER.trace( "No TransactionManager found, do not start a surrounding JTA transaction" ); + ORM_SPECIFIC_LOGGER.transactionManagerNotFound(); return false; } try { if ( transactionManager.getStatus() == Status.STATUS_NO_TRANSACTION ) { - ORM_SPECIFIC_LOGGER.trace( "No Transaction in progress, needs to start a JTA transaction" ); + ORM_SPECIFIC_LOGGER.noInProgressTransaction(); return true; } } @@ -150,7 +149,7 @@ private static boolean shouldUseJta(TransactionManager transactionManager, ORM_SPECIFIC_LOGGER.cannotGuessTransactionStatus( e ); return false; } - ORM_SPECIFIC_LOGGER.trace( "Transaction in progress, no need to start a JTA transaction" ); + ORM_SPECIFIC_LOGGER.transactionAlreadyInProgress(); return false; } } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/event/impl/HibernateSearchEventListener.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/event/impl/HibernateSearchEventListener.java index 0f752f78a13..1cf50e9c7a9 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/event/impl/HibernateSearchEventListener.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/event/impl/HibernateSearchEventListener.java @@ -64,7 +64,7 @@ public HibernateSearchEventListener(HibernateOrmListenerContextProvider contextP boolean dirtyCheckingEnabled) { this.contextProvider = contextProvider; this.dirtyCheckingEnabled = dirtyCheckingEnabled; - CONFIGURATION_LOGGER.debug( "Hibernate Search dirty checks " + ( dirtyCheckingEnabled ? "enabled" : "disabled" ) ); + CONFIGURATION_LOGGER.dirtyChecksEnabled( dirtyCheckingEnabled ); } public void registerTo(SessionFactoryImplementor sessionFactory) { diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/loading/impl/HibernateOrmMassIdentifierLoader.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/loading/impl/HibernateOrmMassIdentifierLoader.java index aaf62a294ab..ec6481b90d8 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/loading/impl/HibernateOrmMassIdentifierLoader.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/loading/impl/HibernateOrmMassIdentifierLoader.java @@ -53,7 +53,7 @@ public HibernateOrmMassIdentifierLoader(HibernateOrmQueryLoader typeQueryL } if ( LOADING_LOGGER.isDebugEnabled() ) { - LOADING_LOGGER.debugf( "going to fetch %d primary keys", totalCount ); + LOADING_LOGGER.numberOfKeysToFetch( totalCount ); } results = typeQueryLoader.createIdentifiersQuery( session ) diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/ConfigurationLog.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/ConfigurationLog.java index 3867f70b40d..1563b8c5373 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/ConfigurationLog.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/ConfigurationLog.java @@ -17,7 +17,9 @@ import org.hibernate.search.util.common.logging.impl.LoggerFactory; import org.hibernate.search.util.common.logging.impl.MessageConstants; -import org.jboss.logging.BasicLogger; +import org.jboss.logging.Logger; +import org.jboss.logging.annotations.Cause; +import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; @@ -28,7 +30,7 @@ """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface ConfigurationLog extends BasicLogger { +public interface ConfigurationLog { String CATEGORY_NAME = "org.hibernate.search.configuration"; ConfigurationLog CONFIGURATION_LOGGER = LoggerFactory.make( ConfigurationLog.class, CATEGORY_NAME, MethodHandles.lookup() ); @@ -75,4 +77,24 @@ SearchException invalidSchemaManagementStrategyName(String invalidRepresentation value = "Unable to apply the given filter at the session level with the outbox polling coordination strategy. " + "With this coordination strategy, applying a session-level indexing plan filter is only allowed if it excludes all types.") SearchException cannotApplySessionFilterWhenAsyncProcessingIsUsed(); + + @LogMessage(level = Logger.Level.DEBUG) + @Message(id = ID_OFFSET + 138, value = "Error resolving bean of type [%s] - using fallback") + void errorResolvingBean(Class typeReference, @Cause Exception e); + + @LogMessage(level = Logger.Level.DEBUG) + @Message(id = ID_OFFSET + 139, value = "Hibernate Search is disabled through configuration properties.") + void hibernateSearchDisabled(); + + @LogMessage(level = Logger.Level.DEBUG) + @Message(id = ID_OFFSET + 140, value = "Hibernate Search dirty checks: %s") + void dirtyChecksEnabled(boolean dirtyCheckingEnabled); + + @LogMessage(level = Logger.Level.DEBUG) + @Message(id = ID_OFFSET + 141, value = "Hibernate Search event listeners activated") + void hibernateSearchListenerEnabled(); + + @LogMessage(level = Logger.Level.DEBUG) + @Message(id = ID_OFFSET + 142, value = "Hibernate Search event listeners deactivated") + void hibernateSearchListenerDisabled(); } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/IndexingLog.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/IndexingLog.java index 6487d8c376b..8dd3f691da2 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/IndexingLog.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/IndexingLog.java @@ -9,13 +9,15 @@ import java.lang.invoke.MethodHandles; import java.util.List; +import org.hibernate.Transaction; import org.hibernate.search.util.common.SearchException; import org.hibernate.search.util.common.logging.CategorizedLogger; import org.hibernate.search.util.common.logging.impl.LoggerFactory; import org.hibernate.search.util.common.logging.impl.MessageConstants; -import org.jboss.logging.BasicLogger; +import org.jboss.logging.Logger; import org.jboss.logging.annotations.Cause; +import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; @@ -26,7 +28,7 @@ Logged messages may notify when the particular indexing plan synchronization sta """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface IndexingLog extends BasicLogger { +public interface IndexingLog { String CATEGORY_NAME = "org.hibernate.search.mapper.indexing"; IndexingLog INDEXING_LOGGER = LoggerFactory.make( IndexingLog.class, CATEGORY_NAME, MethodHandles.lookup() ); @@ -34,4 +36,17 @@ public interface IndexingLog extends BasicLogger { @Message(id = ID_OFFSET + 22, value = "Indexing failure: %1$s.\nThe following entities may not have been updated correctly in the index: %2$s.") SearchException indexingFailure(String causeMessage, List failingEntities, @Cause Throwable cause); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 135, value = "Processing Transaction's beforeCompletion() phase for %s.") + void beforeCompletion(Transaction transactionIdentifier); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 136, value = "Processing Transaction's afterCompletion() phase for %s. Executing indexing plan.") + void afterCompletionExecuting(Transaction transactionIdentifier); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 137, + value = "Processing Transaction's afterCompletion() phase for %s. Cancelling indexing plan due to transaction status %d") + void afterCompletionCanceling(Transaction transactionIdentifier, int status); } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/LoadingLog.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/LoadingLog.java index 1f05669522e..034e8a9f8b4 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/LoadingLog.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/LoadingLog.java @@ -15,6 +15,8 @@ import org.hibernate.search.util.common.logging.impl.MessageConstants; import org.jboss.logging.BasicLogger; +import org.jboss.logging.Logger; +import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; @@ -38,4 +40,8 @@ public interface LoadingLog extends BasicLogger { + " '%2$s' is the document ID and must be assigned unique values.") SearchException foundMultipleEntitiesForDocumentId(String entityName, String documentIdSourcePropertyName, Object id); + + @LogMessage(level = Logger.Level.DEBUG) + @Message(id = ID_OFFSET + 134, value = "Going to fetch %d primary keys") + void numberOfKeysToFetch(long totalCount); } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmLog.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmLog.java index 3e008db22c3..abf0d29a1cf 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmLog.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmLog.java @@ -50,6 +50,6 @@ public interface OrmLog * here to the next value. */ @LogMessage(level = TRACE) - @Message(id = ID_OFFSET + 127, value = "") + @Message(id = ID_OFFSET + 143, value = "") void nextLoggerIdForConvenience(); } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmSpecificLog.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmSpecificLog.java index f71a0fcf9eb..05e287a24f8 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmSpecificLog.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/logging/impl/OrmSpecificLog.java @@ -11,6 +11,8 @@ import java.lang.invoke.MethodHandles; +import jakarta.transaction.Synchronization; + import org.hibernate.ScrollMode; import org.hibernate.SessionFactory; import org.hibernate.search.mapper.orm.search.loading.EntityLoadingCacheLookupStrategy; @@ -19,7 +21,6 @@ import org.hibernate.search.util.common.logging.impl.LoggerFactory; import org.hibernate.search.util.common.logging.impl.MessageConstants; -import org.jboss.logging.BasicLogger; import org.jboss.logging.Logger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.LogMessage; @@ -34,7 +35,7 @@ """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface OrmSpecificLog extends BasicLogger { +public interface OrmSpecificLog { String CATEGORY_NAME = "org.hibernate.search.mapper.orm"; OrmSpecificLog ORM_SPECIFIC_LOGGER = LoggerFactory.make( OrmSpecificLog.class, CATEGORY_NAME, MethodHandles.lookup() ); @@ -123,4 +124,34 @@ void skippingPreliminaryCacheLookupsForNonEntityIdEntityLoader(String entityName + " for example with .loading(o -> o.fetchSize(50))." + " See the reference documentation for more information.") SearchException cannotSetFetchSize(); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 127, + value = "TransactionFactory does not require a TransactionManager: don't wrap in a JTA transaction") + void transactionManagerNotRequired(); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 128, value = "No TransactionManager found, do not start a surrounding JTA transaction") + void transactionManagerNotFound(); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 129, value = "No Transaction in progress, needs to start a JTA transaction") + void noInProgressTransaction(); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 130, value = "Transaction in progress, no need to start a JTA transaction") + void transactionAlreadyInProgress(); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 131, value = "Transaction's afterCompletion is expected to be executed" + + " through the AfterTransactionCompletionProcess interface, ignoring: %s") + void syncAdapterIgnoringAfterCompletion(Synchronization delegate); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 132, value = "Transaction's beforeCompletion() phase already been processed, ignoring: %s") + void syncAdapterIgnoringBeforeCompletionAlreadyExecuted(Synchronization delegate); + + @LogMessage(level = Logger.Level.TRACE) + @Message(id = ID_OFFSET + 133, value = "Transaction's afterCompletion() phase already been processed, ignoring: %s") + void syncAdapterIgnoringAfterCompletionAlreadyExecuted(Synchronization delegate); } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/AfterCommitIndexingPlanSynchronization.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/AfterCommitIndexingPlanSynchronization.java index 6a63889a8b5..c2f79dbd45c 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/AfterCommitIndexingPlanSynchronization.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/AfterCommitIndexingPlanSynchronization.java @@ -34,7 +34,7 @@ class AfterCommitIndexingPlanSynchronization implements Synchronization { @Override public void beforeCompletion() { - INDEXING_LOGGER.tracef( "Processing Transaction's beforeCompletion() phase for %s.", transactionIdentifier ); + INDEXING_LOGGER.beforeCompletion( transactionIdentifier ); indexingPlan.process(); } @@ -42,16 +42,11 @@ public void beforeCompletion() { public void afterCompletion(int i) { try { if ( Status.STATUS_COMMITTED == i ) { - INDEXING_LOGGER.tracef( "Processing Transaction's afterCompletion() phase for %s. Executing indexing plan.", - transactionIdentifier ); + INDEXING_LOGGER.afterCompletionExecuting( transactionIdentifier ); synchronizationStrategy.executeAndSynchronize( indexingPlan ); } else { - INDEXING_LOGGER.tracef( - "Processing Transaction's afterCompletion() phase for %s. Cancelling indexing plan due to transaction status %d", - transactionIdentifier, - i - ); + INDEXING_LOGGER.afterCompletionCanceling( transactionIdentifier, i ); indexingPlan.discard(); } } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/BeforeCommitIndexingPlanSynchronization.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/BeforeCommitIndexingPlanSynchronization.java index 9fb16c45df4..1d8963bcde9 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/BeforeCommitIndexingPlanSynchronization.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/BeforeCommitIndexingPlanSynchronization.java @@ -34,8 +34,7 @@ class BeforeCommitIndexingPlanSynchronization implements Synchronization { @Override public void beforeCompletion() { - INDEXING_LOGGER.tracef( "Processing Transaction's afterCompletion() phase for %s. Executing indexing plan.", - transactionIdentifier ); + INDEXING_LOGGER.afterCompletionExecuting( transactionIdentifier ); synchronizationStrategy.executeAndSynchronize( indexingPlan ); } @@ -43,11 +42,7 @@ public void beforeCompletion() { public void afterCompletion(int i) { try { if ( Status.STATUS_COMMITTED != i ) { - INDEXING_LOGGER.tracef( - "Processing Transaction's afterCompletion() phase for %s. Cancelling work due to transaction status %d", - transactionIdentifier, - i - ); + INDEXING_LOGGER.afterCompletionCanceling( transactionIdentifier, i ); indexingPlan.discard(); } } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/ConfiguredAutomaticIndexingStrategy.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/ConfiguredAutomaticIndexingStrategy.java index 2e912697b8f..981be6614d5 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/ConfiguredAutomaticIndexingStrategy.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/ConfiguredAutomaticIndexingStrategy.java @@ -130,7 +130,7 @@ public void start(HibernateOrmSearchSessionMappingContext mappingContext, return enabled; } ) .orElse( true ) ) { - CONFIGURATION_LOGGER.debug( "Hibernate Search event listeners activated" ); + CONFIGURATION_LOGGER.hibernateSearchListenerEnabled(); @SuppressWarnings("deprecation") HibernateSearchEventListener hibernateSearchEventListener = new HibernateSearchEventListener( contextProvider, AUTOMATIC_INDEXING_ENABLE_DIRTY_CHECK.getAndTransform( @@ -147,7 +147,7 @@ public void start(HibernateOrmSearchSessionMappingContext mappingContext, hibernateSearchEventListener.registerTo( mappingContext.sessionFactory() ); } else { - CONFIGURATION_LOGGER.debug( "Hibernate Search event listeners deactivated" ); + CONFIGURATION_LOGGER.hibernateSearchListenerDisabled(); } } diff --git a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/SynchronizationAdapter.java b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/SynchronizationAdapter.java index 90f32b69e59..195e34be9e7 100644 --- a/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/SynchronizationAdapter.java +++ b/mapper/orm/src/main/java/org/hibernate/search/mapper/orm/session/impl/SynchronizationAdapter.java @@ -42,11 +42,7 @@ public void beforeCompletion() { @Override public void afterCompletion(int status) { - ORM_SPECIFIC_LOGGER.tracef( - "Transaction's afterCompletion is expected to be executed" - + " through the AfterTransactionCompletionProcess interface, ignoring: %s", - delegate - ); + ORM_SPECIFIC_LOGGER.syncAdapterIgnoringAfterCompletion( delegate ); } @Override @@ -71,9 +67,7 @@ public void doAfterTransactionCompletion(boolean success, SharedSessionContractI private void doBeforeCompletion() { if ( beforeExecuted ) { - ORM_SPECIFIC_LOGGER.tracef( - "Transaction's beforeCompletion() phase already been processed, ignoring: %s", delegate - ); + ORM_SPECIFIC_LOGGER.syncAdapterIgnoringBeforeCompletionAlreadyExecuted( delegate ); } else { delegate.beforeCompletion(); @@ -83,9 +77,7 @@ private void doBeforeCompletion() { private void doAfterCompletion(int status) { if ( afterExecuted ) { - ORM_SPECIFIC_LOGGER.tracef( - "Transaction's afterCompletion() phase already been processed, ignoring: %s", delegate - ); + ORM_SPECIFIC_LOGGER.syncAdapterIgnoringAfterCompletionAlreadyExecuted( delegate ); } else { delegate.afterCompletion( status ); diff --git a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MappingLog.java b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MappingLog.java index 828bab87650..62115262bd9 100644 --- a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MappingLog.java +++ b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MappingLog.java @@ -45,7 +45,6 @@ import org.hibernate.search.util.common.logging.impl.TypeFormatter; import org.hibernate.search.util.common.reporting.EventContext; -import org.jboss.logging.BasicLogger; import org.jboss.logging.Logger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.FormatWith; @@ -61,7 +60,7 @@ """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface MappingLog extends BasicLogger { +public interface MappingLog { String CATEGORY_NAME = "org.hibernate.search.mapping"; MappingLog MAPPING_LOGGER = LoggerFactory.make( MappingLog.class, CATEGORY_NAME, MethodHandles.lookup() ); diff --git a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MassIndexingLog.java b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MassIndexingLog.java index 54b4f308a29..ace4a2929bf 100644 --- a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MassIndexingLog.java +++ b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/MassIndexingLog.java @@ -6,6 +6,7 @@ import static org.hibernate.search.mapper.pojo.logging.impl.PojoMapperLog.ID_OFFSET; import static org.hibernate.search.mapper.pojo.logging.impl.PojoMapperLog.ID_OFFSET_LEGACY_ENGINE; +import static org.jboss.logging.Logger.Level.DEBUG; import static org.jboss.logging.Logger.Level.ERROR; import static org.jboss.logging.Logger.Level.INFO; @@ -20,7 +21,6 @@ import org.hibernate.search.util.common.logging.impl.LoggerFactory; import org.hibernate.search.util.common.logging.impl.MessageConstants; -import org.jboss.logging.BasicLogger; import org.jboss.logging.Logger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.LogMessage; @@ -36,7 +36,7 @@ Logs information on various mass indexing operations (e.g. when they start/end o """ ) @MessageLogger(projectCode = MessageConstants.PROJECT_CODE) -public interface MassIndexingLog extends BasicLogger { +public interface MassIndexingLog { String CATEGORY_NAME = "org.hibernate.search.mapper.massindexing"; MassIndexingLog MASS_INDEXING_LOGGER = LoggerFactory.make( MassIndexingLog.class, CATEGORY_NAME, MethodHandles.lookup() ); @@ -134,4 +134,31 @@ void indexingProgressWithRemainingTime(float estimatePercentileComplete, long do value = "Mass indexing is going to index approx. %1$d entities (%2$s). Actual number may change once the indexing starts.") void indexingEntitiesApprox(long count, String types); + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 172, value = "Indexing for %s is done.") + void indexingForTypeGroupDone(String typeGroup); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 173, value = "Identifier loading for %s started.") + void identifierLoadingStarted(String typeGroup); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 174, value = "Identifier loading for %s finished.") + void identifierLoadingFinished(String typeGroup); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 175, value = "Identifier loading produced a list of ids: %s.") + void identifierLoadingLoadedIds(List ids); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 176, value = "Entity loading for %s started.") + void entityLoadingStarted(String typeGroup); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 177, value = "Entity loading for %s finished.") + void entityLoadingFinished(String typeGroup); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 178, value = "Entity loading will attempt to load entities for ids: %s.") + void entityLoadingAttemptToLoadIds(List ids); } diff --git a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/PojoMapperLog.java b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/PojoMapperLog.java index d9b72cda2d0..9ff69389fd7 100644 --- a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/PojoMapperLog.java +++ b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/logging/impl/PojoMapperLog.java @@ -56,7 +56,7 @@ public interface PojoMapperLog * here to the next value. */ @LogMessage(level = TRACE) - @Message(id = ID_OFFSET + 172, value = "") + @Message(id = ID_OFFSET + 179, value = "") void nextLoggerIdForConvenience(); } diff --git a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingBatchIndexingWorkspace.java b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingBatchIndexingWorkspace.java index 92f2cc1cfaf..39718b8d4cd 100644 --- a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingBatchIndexingWorkspace.java +++ b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingBatchIndexingWorkspace.java @@ -77,7 +77,7 @@ public void runWithFailureHandler() throws InterruptedException { allFutures.addAll( indexingFutures ); Futures.unwrappedExceptionGet( Futures.firstFailureOrAllOf( allFutures ) ); typeGroupMonitor.indexingCompleted( massIndexingTypeGroupContext ); - MASS_INDEXING_LOGGER.debugf( "Indexing for %s is done", typeGroup.notifiedGroupName() ); + MASS_INDEXING_LOGGER.indexingForTypeGroupDone( typeGroup.notifiedGroupName() ); } @Override diff --git a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityIdentifierLoadingRunnable.java b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityIdentifierLoadingRunnable.java index eea0e6366a4..add6361e21d 100644 --- a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityIdentifierLoadingRunnable.java +++ b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityIdentifierLoadingRunnable.java @@ -46,7 +46,7 @@ public PojoMassIndexingEntityIdentifierLoadingRunnable(PojoMassIndexingNotifier @Override protected void runWithFailureHandler() throws InterruptedException { - MASS_INDEXING_LOGGER.trace( "started" ); + MASS_INDEXING_LOGGER.identifierLoadingStarted( typeGroup.notifiedGroupName() ); LoadingContext context = new LoadingContext(); try ( PojoMassIdentifierLoader loader = loadingStrategy.createIdentifierLoader( typeGroup.includedTypes(), context ) ) { typeGroupMonitor.indexingStarted( massIndexingTypeGroupContext.withIdentifierLoader( loader ) ); @@ -60,7 +60,7 @@ protected void runWithFailureHandler() throws InterruptedException { // or if the thread is interrupted by the workspace (due to consumer failure). identifierQueue.producerStopping(); } - MASS_INDEXING_LOGGER.trace( "finished" ); + MASS_INDEXING_LOGGER.identifierLoadingFinished( typeGroup.notifiedGroupName() ); } @Override @@ -101,7 +101,7 @@ public PojoMassIdentifierSink createSink() { return new PojoMassIdentifierSink() { @Override public void accept(List batch) throws InterruptedException { - MASS_INDEXING_LOGGER.tracef( "produced a list of ids %s", batch ); + MASS_INDEXING_LOGGER.identifierLoadingLoadedIds( batch ); List copy = new ArrayList<>( batch ); identifierQueue.put( copy ); } diff --git a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityLoadingRunnable.java b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityLoadingRunnable.java index 594f2e1968c..f51246c8ef0 100644 --- a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityLoadingRunnable.java +++ b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingEntityLoadingRunnable.java @@ -58,7 +58,7 @@ protected PojoMassIndexingEntityLoadingRunnable(PojoMassIndexingNotifier notifie @Override protected void runWithFailureHandler() throws InterruptedException { - MASS_INDEXING_LOGGER.trace( "started loading entities" ); + MASS_INDEXING_LOGGER.entityLoadingStarted( typeGroup.notifiedGroupName() ); LoadingContext context = new LoadingContext(); try ( PojoMassEntityLoader entityLoader = loadingStrategy.createEntityLoader( typeGroup.includedTypes(), context ) ) { @@ -66,7 +66,7 @@ protected void runWithFailureHandler() throws InterruptedException { do { idList = identifierQueue.take(); if ( idList != null ) { - MASS_INDEXING_LOGGER.tracef( "received list of ids %s", idList ); + MASS_INDEXING_LOGGER.entityLoadingAttemptToLoadIds( idList ); // This will pass the loaded entities to the sink, which will trigger indexing for those entities. try { entityLoader.load( idList ); @@ -79,7 +79,7 @@ protected void runWithFailureHandler() throws InterruptedException { while ( idList != null ); context.waitForLastBatches(); } - MASS_INDEXING_LOGGER.trace( "finished loading entities" ); + MASS_INDEXING_LOGGER.entityLoadingFinished( typeGroup.notifiedGroupName() ); } @Override diff --git a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingIndexedTypeGroup.java b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingIndexedTypeGroup.java index ba18ce541df..d61b826d816 100644 --- a/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingIndexedTypeGroup.java +++ b/mapper/pojo-base/src/main/java/org/hibernate/search/mapper/pojo/massindexing/impl/PojoMassIndexingIndexedTypeGroup.java @@ -77,6 +77,7 @@ private static PojoMassIndexingIndexedTypeGroup single(PojoMassIn private final PojoMassIndexingTypeContextProvider typeContextProvider; private final Set> includedTypes; private final PojoMassLoadingContext loadingContext; + private final String notifiedGroupName; private Boolean groupingAllowed; private PojoMassIndexingIndexedTypeGroup(PojoMassLoadingStrategy loadingStrategy, @@ -91,6 +92,8 @@ private PojoMassIndexingIndexedTypeGroup(PojoMassLoadingStrategy loadingSt this.loadingStrategy = loadingStrategy; this.includedTypes = includedTypes; this.loadingContext = loadingContext; + this.notifiedGroupName = includedTypes.stream().map( PojoMassIndexingIndexedTypeContext::entityName ) + .collect( Collectors.joining( "," ) ); if ( includedTypes.size() > 1 ) { // We're already grouping, so obviously it's allowed. groupingAllowed = true; @@ -108,8 +111,7 @@ public String toString() { } public String notifiedGroupName() { - return includedTypes.stream().map( PojoMassIndexingIndexedTypeContext::entityName ) - .collect( Collectors.joining( "," ) ); + return notifiedGroupName; } public Set> includedTypes() { diff --git a/util/common/src/main/java/org/hibernate/search/util/common/impl/CancellableExecutionCompletableFuture.java b/util/common/src/main/java/org/hibernate/search/util/common/impl/CancellableExecutionCompletableFuture.java index b97fb2b8c62..acd49403880 100644 --- a/util/common/src/main/java/org/hibernate/search/util/common/impl/CancellableExecutionCompletableFuture.java +++ b/util/common/src/main/java/org/hibernate/search/util/common/impl/CancellableExecutionCompletableFuture.java @@ -31,14 +31,12 @@ public boolean cancel(boolean mayInterruptIfRunning) { * Thus we mark 'this' as cancelled *first*, so that any exception in the operation * from now on will be ignored. */ - COMMON_FAILURES_LOGGER.tracef( "Cancelling CompletableFuture %s (mayInterruptIfRunning = %s)", this, - mayInterruptIfRunning ); + COMMON_FAILURES_LOGGER.cancellingCompletableFuture( this, mayInterruptIfRunning ); super.cancel( mayInterruptIfRunning ); - COMMON_FAILURES_LOGGER.tracef( "Cancelling Future %s (mayInterruptIfRunning = %s)", future, mayInterruptIfRunning ); + COMMON_FAILURES_LOGGER.cancellingFuture( future, mayInterruptIfRunning ); boolean cancelled = future.cancel( mayInterruptIfRunning ); if ( !cancelled ) { - COMMON_FAILURES_LOGGER.debugf( "Could not cancel Future %s (mayInterruptIfRunning = %s)", future, - mayInterruptIfRunning ); + COMMON_FAILURES_LOGGER.cancellingFutureFailed( future, mayInterruptIfRunning ); } return cancelled; } diff --git a/util/common/src/main/java/org/hibernate/search/util/common/jar/impl/JandexUtils.java b/util/common/src/main/java/org/hibernate/search/util/common/jar/impl/JandexUtils.java index 1ee3b934471..2b788344b2e 100644 --- a/util/common/src/main/java/org/hibernate/search/util/common/jar/impl/JandexUtils.java +++ b/util/common/src/main/java/org/hibernate/search/util/common/jar/impl/JandexUtils.java @@ -159,7 +159,7 @@ private static Index doBuildJandexIndex(Path classesPath) throws IOException { private static boolean isUnsupportedVersionPath(Path metaInfVersions, Path path) { Path relative = metaInfVersions.relativize( path ); if ( relative.getNameCount() < 2 ) { - COMMON_FAILURES_LOGGER.debug( "Unexpected structure for META-INF/versions entry: " + path ); + COMMON_FAILURES_LOGGER.metaInfVersionBadStructure( path ); return true; } try { @@ -169,7 +169,7 @@ private static boolean isUnsupportedVersionPath(Path metaInfVersions, Path path) } } catch (NumberFormatException ex) { - COMMON_FAILURES_LOGGER.debug( "Failed to parse META-INF/versions entry: " + path, ex ); + COMMON_FAILURES_LOGGER.metaInfVersionParsingFailed( path, ex ); return true; } return false; diff --git a/util/common/src/main/java/org/hibernate/search/util/common/logging/impl/CommonFailuresLog.java b/util/common/src/main/java/org/hibernate/search/util/common/logging/impl/CommonFailuresLog.java index 33e6cf5b3bb..0732be0e1f2 100644 --- a/util/common/src/main/java/org/hibernate/search/util/common/logging/impl/CommonFailuresLog.java +++ b/util/common/src/main/java/org/hibernate/search/util/common/logging/impl/CommonFailuresLog.java @@ -5,17 +5,22 @@ package org.hibernate.search.util.common.logging.impl; +import static org.jboss.logging.Logger.Level.DEBUG; +import static org.jboss.logging.Logger.Level.TRACE; + import java.io.IOException; import java.lang.invoke.MethodHandles; import java.lang.reflect.Member; import java.lang.reflect.Type; import java.net.URI; import java.net.URL; +import java.nio.file.Path; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; import org.hibernate.search.util.common.SearchException; import org.hibernate.search.util.common.logging.CategorizedLogger; -import org.jboss.logging.BasicLogger; import org.jboss.logging.Logger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.FormatWith; @@ -36,7 +41,7 @@ @ValidIdRanges({ @ValidIdRange(min = MessageConstants.UTIL_ID_RANGE_MIN, max = MessageConstants.UTIL_ID_RANGE_MAX), }) -public interface CommonFailuresLog extends BasicLogger { +public interface CommonFailuresLog { String CATEGORY_NAME = "org.hibernate.search.common.failures"; @@ -129,4 +134,23 @@ SearchException errorInvokingStaticMember(Member member, String argumentsAsStrin + "See the reference documentation for information about mapping configurers.") SearchException cannotOpenNestedJar(URI uri, @Cause Throwable e); + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 19, value = "Unexpected structure for META-INF/versions entry: %s") + void metaInfVersionBadStructure(Path path); + + @LogMessage(level = DEBUG) + @Message(id = ID_OFFSET + 20, value = "Failed to parse META-INF/versions entry: %s") + void metaInfVersionParsingFailed(Path path, @Cause Throwable e); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 21, value = "Cancelling CompletableFuture %s (mayInterruptIfRunning = %s)") + void cancellingCompletableFuture(CompletableFuture future, boolean mayInterruptIfRunning); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 22, value = "Cancelling Future %s (mayInterruptIfRunning = %s)") + void cancellingFuture(Future future, boolean mayInterruptIfRunning); + + @LogMessage(level = TRACE) + @Message(id = ID_OFFSET + 23, value = "Could not cancel Future %s (mayInterruptIfRunning = %s)") + void cancellingFutureFailed(Future future, boolean mayInterruptIfRunning); }