From 4f6365dff7d4620823574f656ed5bb70039b2a47 Mon Sep 17 00:00:00 2001 From: Scala Steward Date: Thu, 19 Aug 2021 08:14:18 +0200 Subject: [PATCH 1/2] Update scalafmt-core from 2.7.5 to 3.0.0 --- .scalafmt.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.scalafmt.conf b/.scalafmt.conf index 47e558f8b..32253c22d 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,4 +1,4 @@ -version = "2.7.5" +version = "3.0.0" maxColumn = 140 assumeStandardLibraryStripMargin = true From 49a494b779d9d5dba3d2f129054d8f49e8539b5b Mon Sep 17 00:00:00 2001 From: Scala Steward Date: Thu, 19 Aug 2021 08:14:28 +0200 Subject: [PATCH 2/2] Reformat with scalafmt 3.0.0 --- .../avast/sst/datastax/config/advanced.scala | 634 ++++++++++-------- .../com/avast/sst/datastax/config/basic.scala | 170 +++-- .../com/avast/sst/catseffect/TimeUtils.scala | 5 +- .../sst/catseffect/syntax/TimeSyntax.scala | 5 +- .../avast/sst/doobie/DoobieHikariModule.scala | 3 +- .../sst/grpc/server/GrpcServerModule.scala | 9 +- .../client/Http4sBlazeClientModule.scala | 3 +- .../Http4sClientCircuitBreakerModule.scala | 6 +- .../server/Http4sBlazeServerModule.scala | 3 +- .../MicrometerHttp4sServerMetricsModule.scala | 4 +- .../server/micrometer/RouteMetrics.scala | 3 +- .../middleware/CorrelationIdMiddleware.scala | 4 +- .../jvm/micrometer/MicrometerJvmModule.scala | 3 +- .../execution/ConfigurableThreadFactory.scala | 7 +- .../sst/jvm/execution/ExecutorModule.scala | 15 +- .../com/avast/sst/lettuce/LettuceModule.scala | 7 +- .../com/avast/sst/sentry/SentryModule.scala | 8 +- .../com/avast/sst/ssl/SslContextModule.scala | 10 +- 18 files changed, 480 insertions(+), 419 deletions(-) diff --git a/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/advanced.scala b/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/advanced.scala index 1bb787668..0fabed633 100644 --- a/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/advanced.scala +++ b/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/advanced.scala @@ -4,55 +4,68 @@ import scala.concurrent.duration._ /** Advanced datastax driver configuration * - * @param connection Configures query connection. - * @param reconnectOnInit Whether to schedule reconnection attempts if all contact points are unreachable on the first - * initialization attempt. - * - * If this is true, the driver will retry according to the reconnection policy. The - * `SessionBuilder.build()` call -- or the future returned by `SessionBuilder.buildAsync()` -- - * won't complete until a contact point has been reached. - * - * If this is false and no contact points are available, the driver will fail. - * @param reconnectionPolicy The policy that controls how often the driver tries to re-establish connections to down nodes. - * @param retryPolicy The policy that controls if the driver retries requests that have failed on one node. - * @param speculativeExecutionPolicy The policy that controls if the driver pre-emptively tries other nodes if a node takes too long - * to respond. - * @param authProvider if `None` no authentication will occur. - * @param timestampGenerator The generator that assigns a microsecond timestamp to each request. - * @param requestTracker A session-wide component that tracks the outcome of requests. - * By default `com.datastax.oss.driver.internal.core.trackerNoopRequestTracker` is used. - * @param throttler A session-wide component that controls the rate at which requests are executed. - * By default `PassThroughRequestThrottler` is used. - * @param nodeStateListener A session-wide component that listens for node state changes. - * By default `com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener` is used. - * @param schemaChangeListener A session-wide component that listens for node state changes. - * By default `com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener` is used. - * @param addressTranslator The address translator to use to convert the addresses sent by Cassandra nodes into ones that - * the driver uses to connect. - * By default `com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator` is used. - * @param resolveContactPoints Whether to resolve the addresses passed to `Basic.contactPoints`. - * - * If this is true, addresses are created with `InetSocketAddress(String, int)`: the host name will - * be resolved the first time, and the driver will use the resolved IP address for all subsequent - * connection attempts. - * - * If this is false, addresses are created with `InetSocketAddress.createUnresolved()`: the host - * name will be resolved again every time the driver opens a new connection. This is useful for - * containerized environments where DNS records are more likely to change over time (note that the - * JVM and OS have their own DNS caching mechanisms, so you might need additional configuration - * beyond the driver). - * @param protocol The native protocol to use which defines the format of the binary messages between driver and - * Cassandra. - * @param request Request configuration. - * @param metrics Metrics configuration. - * Disabled by default. - * @param heartbeat Heartbeat configuration to check if node is alive. - * @param socket Socket configuration. - * @param metadata Metadata about the Cassandra cluster. - * @param controlConnection Configures dedicated administrative connection. - * @param preparedStatements Prepared statements configuration. - * @param netty Netty configuration which is used internally by driver. - * @param coalescer The component that coalesces writes on the connections. + * @param connection + * Configures query connection. + * @param reconnectOnInit + * Whether to schedule reconnection attempts if all contact points are unreachable on the first initialization attempt. + * + * If this is true, the driver will retry according to the reconnection policy. The `SessionBuilder.build()` call -- or the future returned + * by `SessionBuilder.buildAsync()` -- won't complete until a contact point has been reached. + * + * If this is false and no contact points are available, the driver will fail. + * @param reconnectionPolicy + * The policy that controls how often the driver tries to re-establish connections to down nodes. + * @param retryPolicy + * The policy that controls if the driver retries requests that have failed on one node. + * @param speculativeExecutionPolicy + * The policy that controls if the driver pre-emptively tries other nodes if a node takes too long to respond. + * @param authProvider + * if `None` no authentication will occur. + * @param timestampGenerator + * The generator that assigns a microsecond timestamp to each request. + * @param requestTracker + * A session-wide component that tracks the outcome of requests. By default + * `com.datastax.oss.driver.internal.core.trackerNoopRequestTracker` is used. + * @param throttler + * A session-wide component that controls the rate at which requests are executed. By default `PassThroughRequestThrottler` is used. + * @param nodeStateListener + * A session-wide component that listens for node state changes. By default + * `com.datastax.oss.driver.internal.core.metadata.NoopNodeStateListener` is used. + * @param schemaChangeListener + * A session-wide component that listens for node state changes. By default + * `com.datastax.oss.driver.internal.core.metadata.schema.NoopSchemaChangeListener` is used. + * @param addressTranslator + * The address translator to use to convert the addresses sent by Cassandra nodes into ones that the driver uses to connect. By default + * `com.datastax.oss.driver.internal.core.addresstranslation.PassThroughAddressTranslator` is used. + * @param resolveContactPoints + * Whether to resolve the addresses passed to `Basic.contactPoints`. + * + * If this is true, addresses are created with `InetSocketAddress(String, int)`: the host name will be resolved the first time, and the + * driver will use the resolved IP address for all subsequent connection attempts. + * + * If this is false, addresses are created with `InetSocketAddress.createUnresolved()`: the host name will be resolved again every time the + * driver opens a new connection. This is useful for containerized environments where DNS records are more likely to change over time (note + * that the JVM and OS have their own DNS caching mechanisms, so you might need additional configuration beyond the driver). + * @param protocol + * The native protocol to use which defines the format of the binary messages between driver and Cassandra. + * @param request + * Request configuration. + * @param metrics + * Metrics configuration. Disabled by default. + * @param heartbeat + * Heartbeat configuration to check if node is alive. + * @param socket + * Socket configuration. + * @param metadata + * Metadata about the Cassandra cluster. + * @param controlConnection + * Configures dedicated administrative connection. + * @param preparedStatements + * Prepared statements configuration. + * @param netty + * Netty configuration which is used internally by driver. + * @param coalescer + * The component that coalesces writes on the connections. */ final case class AdvancedConfig( connection: ConnectionConfig = AdvancedConfig.Default.connection, @@ -108,10 +121,10 @@ object AdvancedConfig { ) } -/** @param warnIfSetKeyspace Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active - * keyspace. - * @param logWarnings Whether logging of server warnings generated during query execution should be disabled by the - * driver. +/** @param warnIfSetKeyspace + * Whether a warning is logged when a request (such as a CQL `USE ...`) changes the active keyspace. + * @param logWarnings + * Whether logging of server warnings generated during query execution should be disabled by the driver. */ final case class AdvancedRequestConfig(warnIfSetKeyspace: Boolean, trace: TraceConfig, logWarnings: Boolean) @@ -121,17 +134,22 @@ object AdvancedRequestConfig { /** Configure query connection properties. * - * @param initQueryTimeout The timeout to use for internal queries that run as part of the initialization process. If - * this timeout fires, the initialization of the connection will fail. If this is the first - * connection ever, the driver will fail to initialize as well, otherwise it will retry the - * connection later. - * @param setKeyspaceTimeout The timeout to use when the driver changes the keyspace on a connection at runtime. - * @param localPool The driver maintains a connection pool to each node - * @param remotePool The driver maintains a connection pool to each node - * @param maxRequestsPerConnection The maximum number of requests that can be executed concurrently on a connection. - * This must be between 1 and 32768. - * @param maxOrphanRequests The maximum number of "orphaned" requests before a connection gets closed automatically. - * @param warnOnInitError Whether to log non-fatal errors when the driver tries to open a new connection. + * @param initQueryTimeout + * The timeout to use for internal queries that run as part of the initialization process. If this timeout fires, the initialization of + * the connection will fail. If this is the first connection ever, the driver will fail to initialize as well, otherwise it will retry + * the connection later. + * @param setKeyspaceTimeout + * The timeout to use when the driver changes the keyspace on a connection at runtime. + * @param localPool + * The driver maintains a connection pool to each node + * @param remotePool + * The driver maintains a connection pool to each node + * @param maxRequestsPerConnection + * The maximum number of requests that can be executed concurrently on a connection. This must be between 1 and 32768. + * @param maxOrphanRequests + * The maximum number of "orphaned" requests before a connection gets closed automatically. + * @param warnOnInitError + * Whether to log non-fatal errors when the driver tries to open a new connection. */ final case class ConnectionConfig( connectTimeout: Duration = ConnectionConfig.Default.connectTimeout, @@ -149,10 +167,10 @@ object ConnectionConfig { ConnectionConfig(ConnectTimeout, InitQueryTimeout, InitQueryTimeout, PoolConfig.Default, PoolConfig.Default, 1024, 256, true) } -/** The driver maintains a connection pool to each node, according to the distance assigned to it - * by the load balancing policy +/** The driver maintains a connection pool to each node, according to the distance assigned to it by the load balancing policy * - * @param size The number of connections in the pool + * @param size + * The number of connections in the pool */ final case class PoolConfig(size: Int) @@ -162,10 +180,13 @@ object PoolConfig { /** The policy that controls how often the driver tries to re-establish connections to down nodes. * - * @param class The class of the policy. If it is not qualified, the driver assumes that it resides in the - * package `com.datastax.oss.driver.internal.core.connection`. - * @param baseDelay Reconnection policy starts with the base delay. - * @param maxDelay Reconnection policy increases delay up to the max delay. + * @param class + * The class of the policy. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.connection`. + * @param baseDelay + * Reconnection policy starts with the base delay. + * @param maxDelay + * Reconnection policy increases delay up to the max delay. */ final case class ReconnectionPolicyConfig( `class`: String = ReconnectionPolicyConfig.Default.`class`, @@ -178,7 +199,9 @@ object ReconnectionPolicyConfig { val Constant: ReconnectionPolicyConfig = ReconnectionPolicyConfig("com.datastax.oss.driver.internal.core.connection.ConstantReconnectionPolicy", 1.second, None) - /** A reconnection policy that waits exponentially longer between each reconnection attempt (but keeps a constant delay once a maximum delay is reached). */ + /** A reconnection policy that waits exponentially longer between each reconnection attempt (but keeps a constant delay once a maximum + * delay is reached). + */ val Exponential: ReconnectionPolicyConfig = ReconnectionPolicyConfig("com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy", 1.second, Some(60.seconds)) @@ -187,8 +210,9 @@ object ReconnectionPolicyConfig { /** The policy that controls if the driver retries requests that have failed on one node * - * @param class The class of the policy. If it is not qualified, the driver assumes that it resides in the - * package `com.datastax.oss.driver.internal.core.retry`. + * @param class + * The class of the policy. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.retry`. */ final case class RetryPolicyConfig(`class`: String) @@ -198,12 +222,13 @@ object RetryPolicyConfig { /** The policy that controls if the driver preemptively tries other nodes if a node takes too long to respond. * - * @param class The class of the policy. If it is not qualified, the driver assumes that it resides in the - * package `com.datastax.oss.driver.internal.core.specex`. - * @param maxExecutions The maximum number of executions (including the initial, non-speculative execution). - * This must be at least one. - * @param delay The delay between each execution. 0 is allowed, and will result in all executions being sent - * simultaneously when the request starts. + * @param class + * The class of the policy. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.specex`. + * @param maxExecutions + * The maximum number of executions (including the initial, non-speculative execution). This must be at least one. + * @param delay + * The delay between each execution. 0 is allowed, and will result in all executions being sent simultaneously when the request starts. */ final case class SpeculativeExecutionPolicyConfig(`class`: String, maxExecutions: Option[Int], delay: Option[Duration]) @@ -225,18 +250,21 @@ object SpeculativeExecutionPolicyConfig { /** The component that handles authentication on each new connection. * - * @param `class` custom class that implements AuthProvider and has a public constructor with a DriverContext argument + * @param `class` + * custom class that implements AuthProvider and has a public constructor with a DriverContext argument */ final case class AuthProviderConfig(`class`: String, username: String, password: String) /** The generator that assigns a microsecond timestamp to each request. * - * @param class The class of the generator. If it is not qualified, the driver assumes that it resides in the - * package `com.datastax.oss.driver.internal.core.time`. - * @param driftWarning configure timestamp drift logging - * @param forceJavaClock Whether to force the driver to use Java's millisecond-precision system clock. - * If this is false, the driver will try to access the microsecond-precision OS clock via native - * calls (and fallback to the Java one if the native calls fail). + * @param class + * The class of the generator. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.time`. + * @param driftWarning + * configure timestamp drift logging + * @param forceJavaClock + * Whether to force the driver to use Java's millisecond-precision system clock. If this is false, the driver will try to access the + * microsecond-precision OS clock via native calls (and fallback to the Java one if the native calls fail). */ final case class TimestampGeneratorConfig( `class`: String = TimestampGeneratorConfig.Default.`class`, @@ -263,9 +291,11 @@ object TimestampGeneratorConfig { /** Configure warn logging when timestamp drifts. * - * @param threshold How far in the future timestamps are allowed to drift before the warning is logged. - * If it is undefined or set to 0, warnings are disabled. - * @param interval How often the warning will be logged if timestamps keep drifting above the threshold. + * @param threshold + * How far in the future timestamps are allowed to drift before the warning is logged. If it is undefined or set to 0, warnings are + * disabled. + * @param interval + * How often the warning will be logged if timestamps keep drifting above the threshold. */ final case class DriftWarningConfig(threshold: Duration, interval: Duration) @@ -275,9 +305,11 @@ object DriftWarningConfig { /** A session-wide component that tracks the outcome of requests. * - * @param class The class of the tracker. If it is not qualified, the driver assumes that it resides in the - * package `com.datastax.oss.driver.internal.core.tracker`. - * @param logs Parameters for RequestLogger + * @param class + * The class of the tracker. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.tracker`. + * @param logs + * Parameters for RequestLogger */ final case class RequestTrackerConfig( classes: List[String] = List("com.datastax.oss.driver.internal.core.tracker.NoopRequestTracker"), @@ -290,18 +322,24 @@ object RequestTrackerConfig { /** Parameters for RequestLogger. * - * @param successEnabled Whether to log successful requests. - * @param errorEnabled Whether to log failed requests. - * @param slow Slow requests logging. - * @param maxQueryLength The maximum length of the query string in the log message. If it is longer than that, it - * will be truncated. - * @param showValues Whether to log bound values in addition to the query string. - * @param maxValueLength The maximum length for bound values in the log message. If the formatted representation of a - * value is longer than that, it will be truncated. - * @param maxValues The maximum number of bound values to log. If a request has more values, the list of values - * will be truncated. - * @param showStackTraces Whether to log stack traces for failed queries. If this is disabled, the log will just - * include the exception's string representation (generally the class name and message). + * @param successEnabled + * Whether to log successful requests. + * @param errorEnabled + * Whether to log failed requests. + * @param slow + * Slow requests logging. + * @param maxQueryLength + * The maximum length of the query string in the log message. If it is longer than that, it will be truncated. + * @param showValues + * Whether to log bound values in addition to the query string. + * @param maxValueLength + * The maximum length for bound values in the log message. If the formatted representation of a value is longer than that, it will be + * truncated. + * @param maxValues + * The maximum number of bound values to log. If a request has more values, the list of values will be truncated. + * @param showStackTraces + * Whether to log stack traces for failed queries. If this is disabled, the log will just include the exception's string representation + * (generally the class name and message). */ final case class LogsConfig( successEnabled: Option[Boolean], @@ -316,24 +354,27 @@ final case class LogsConfig( /** Strategy to classify request as "slow". * - * @param threshold The threshold to classify a successful request as "slow". If this is unset, all successful - * requests will be considered as normal. - * @param enabled Whether to log slow requests. + * @param threshold + * The threshold to classify a successful request as "slow". If this is unset, all successful requests will be considered as normal. + * @param enabled + * Whether to log slow requests. */ final case class SlowConfig(threshold: Option[Duration], enabled: Option[Boolean]) /** A session-wide component that controls the rate at which requests are executed. * - * @param class The class of the throttler. If it is not qualified, the driver assumes that it resides in - * the package com.datastax.oss.driver.internal.core.session.throttling. - * @param maxQueueSize The maximum number of requests that can be enqueued when the throttling threshold is exceeded. - * Beyond that size, requests will fail with a RequestThrottlingException. - * @param maxConcurrentRequests The maximum number of requests that are allowed to execute in parallel. - * Only used by ConcurrencyLimitingRequestThrottler. - * @param maxRequestsPerSecond The maximum allowed request rate. - * Only used by RateLimitingRequestThrottler. - * @param drainInterval How often the throttler attempts to dequeue requests. - * Only used by RateLimitingRequestThrottler. + * @param class + * The class of the throttler. If it is not qualified, the driver assumes that it resides in the package + * com.datastax.oss.driver.internal.core.session.throttling. + * @param maxQueueSize + * The maximum number of requests that can be enqueued when the throttling threshold is exceeded. Beyond that size, requests will fail + * with a RequestThrottlingException. + * @param maxConcurrentRequests + * The maximum number of requests that are allowed to execute in parallel. Only used by ConcurrencyLimitingRequestThrottler. + * @param maxRequestsPerSecond + * The maximum allowed request rate. Only used by RateLimitingRequestThrottler. + * @param drainInterval + * How often the throttler attempts to dequeue requests. Only used by RateLimitingRequestThrottler. */ final case class ThrottlerConfig( `class`: String, @@ -370,12 +411,11 @@ object ThrottlerConfig { val Default: ThrottlerConfig = PassThrough } -/** A session-wide component that listens for node state changes. If it is not qualified, the driver - * assumes that it resides in the package `com.datastax.oss.driver.internal.core.metadata`. +/** A session-wide component that listens for node state changes. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.metadata`. * - * The driver provides a single no-op implementation out of the box: `NoopNodeStateListener`. - * You can also specify a custom class that implements NodeStateListener and has a public - * constructor with a DriverContext argument. + * The driver provides a single no-op implementation out of the box: `NoopNodeStateListener`. You can also specify a custom class that + * implements NodeStateListener and has a public constructor with a DriverContext argument. */ final case class NodeStateListenerConfig(classes: List[String]) @@ -387,13 +427,12 @@ object NodeStateListenerConfig { val Default: NodeStateListenerConfig = Noop } -/** A session-wide component that listens for node state changes. If it is not qualified, the driver - * assumes that it resides in the package `com.datastax.oss.driver.internal.core.metadata.schema`. +/** A session-wide component that listens for node state changes. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.metadata.schema`. * * The driver provides a single no-op implementation out of the box: `NoopSchemaChangeListener`. * - * You can also specify a custom class that implements `SchemaChangeListener` and has a public - * constructor with a DriverContext argument. + * You can also specify a custom class that implements `SchemaChangeListener` and has a public constructor with a DriverContext argument. */ final case class SchemaChangeListenerConfig(classes: List[String]) @@ -407,12 +446,10 @@ object SchemaChangeListenerConfig { val Default: SchemaChangeListenerConfig = Noop } -/** The address translator to use to convert the addresses sent by Cassandra nodes into ones that - * the driver uses to connect. +/** The address translator to use to convert the addresses sent by Cassandra nodes into ones that the driver uses to connect. * - * This is only needed if the nodes are not directly reachable from the driver (for example, the - * driver is in a different network region and needs to use a public IP, or it connects through a - * proxy). + * This is only needed if the nodes are not directly reachable from the driver (for example, the driver is in a different network region + * and needs to use a public IP, or it connects through a proxy). */ final case class AddressTranslatorConfig(`class`: String) @@ -422,12 +459,13 @@ object AddressTranslatorConfig { /** Protocol for query connection. * - * @param version The native protocol version to use. - * If this option is absent, the driver looks up the versions of the nodes at startup (by default - * in "system.peers.release_version"), and chooses the highest common protocol version. - * @param compression The name of the algorithm used to compress protocol frames. - * @param maxFrameLength The maximum length of the frames supported by the driver in megabytes. Beyond that limit, requests will - * fail with an exception. + * @param version + * The native protocol version to use. If this option is absent, the driver looks up the versions of the nodes at startup (by default in + * "system.peers.release_version"), and chooses the highest common protocol version. + * @param compression + * The name of the algorithm used to compress protocol frames. + * @param maxFrameLength + * The maximum length of the frames supported by the driver in megabytes. Beyond that limit, requests will fail with an exception. */ final case class ProtocolConfig(version: Option[String], compression: Option[String], maxFrameLength: Int) @@ -437,9 +475,12 @@ object ProtocolConfig { /** Trace configuration * - * @param attempts How many times the driver will attempt to fetch the query if it is not ready yet. - * @param interval The interval between each attempt. - * @param consistency The consistency level to use for trace queries. + * @param attempts + * How many times the driver will attempt to fetch the query if it is not ready yet. + * @param interval + * The interval between each attempt. + * @param consistency + * The consistency level to use for trace queries. */ final case class TraceConfig(attempts: Int, interval: Duration, consistency: ConsistencyLevel) @@ -449,8 +490,10 @@ object TraceConfig { /** Metrics configuration * - * @param session The session-level metrics (all disabled by default). - * @param node The node-level metrics (all disabled by default). + * @param session + * The session-level metrics (all disabled by default). + * @param node + * The node-level metrics (all disabled by default). */ final case class MetricsConfig(session: Option[SessionConfig], node: Option[NodeConfig]) @@ -460,9 +503,12 @@ object MetricsConfig { /** The session-level metrics (all disabled by default). * - * @param enabled The session-level metrics (all disabled by default). - * @param cqlRequests Extra configuration (for the metrics that need it). Required if the 'cql-requests' metric is enabled - * @param throttling Configures request throttling metrics.. + * @param enabled + * The session-level metrics (all disabled by default). + * @param cqlRequests + * Extra configuration (for the metrics that need it). Required if the 'cql-requests' metric is enabled + * @param throttling + * Configures request throttling metrics.. */ final case class SessionConfig( enabled: List[Int] = List.empty, @@ -472,21 +518,22 @@ final case class SessionConfig( /** Extra metrics configuration * - * @param highestLatency The largest latency that we expect to record. - * @param significantDigits The number of significant decimal digits to which internal structures will maintain - * value resolution and separation (for example, 3 means that recordings up to 1 second - * will be recorded with a resolution of 1 millisecond or better). - * This must be between 0 and 5. If the value is out of range, it defaults to 3 and a - * warning is logged. - * @param refreshInterval The interval at which percentile data is refreshed. + * @param highestLatency + * The largest latency that we expect to record. + * @param significantDigits + * The number of significant decimal digits to which internal structures will maintain value resolution and separation (for example, 3 + * means that recordings up to 1 second will be recorded with a resolution of 1 millisecond or better). This must be between 0 and 5. If + * the value is out of range, it defaults to 3 and a warning is logged. + * @param refreshInterval + * The interval at which percentile data is refreshed. */ final case class CqlRequestsConfig(highestLatency: Duration = 3.seconds, significantDigits: Int = 3, refreshInterval: Duration = 5.minutes) /** How long requests are being throttled * - * @param delay This is the time between the start of the session.execute() call, and the moment when the - * throttler allows the request to proceed. - * Required: if the 'throttling.delay' metric is enabled + * @param delay + * This is the time between the start of the session.execute() call, and the moment when the throttler allows the request to proceed. + * Required: if the 'throttling.delay' metric is enabled */ final case class ThrottlingConfig(delay: Option[DelayConfig]) @@ -495,8 +542,10 @@ final case class DelayConfig(highestLatency: Duration = 3.seconds, significantDi /** Node-level metric. * - * @param enabled node-level metrics - * @param cqlRequests Required: if the 'cql-messages' metric is enabled + * @param enabled + * node-level metrics + * @param cqlRequests + * Required: if the 'cql-messages' metric is enabled */ final case class NodeConfig(enabled: List[Int], cqlRequests: Option[CqlMessagesConfig]) @@ -504,17 +553,20 @@ final case class CqlMessagesConfig(highestLatency: Duration = 3.seconds, signifi /** Socket configuration. * - * @param tcpNoDelay Whether or not to disable the Nagle algorithm. - * By default, this option is set to true (Nagle disabled), because the driver has its own - * internal message coalescing algorithm. - * @param keepAlive All other socket options are unset by default. The actual value depends on the underlying - * Netty transport. - * @param reuseAddress Whether or not to allow address reuse. - * @param lingerInterval Sets the linger interval. - * If the value is zero or greater, then it represents a timeout value, in seconds; - * if the value is negative, it means that this option is disabled. - * @param receiveBufferSize Sets a hint to the size of the underlying buffers for incoming network I/O. - * @param sendBufferSize Sets a hint to the size of the underlying buffers for outgoing network I/O. + * @param tcpNoDelay + * Whether or not to disable the Nagle algorithm. By default, this option is set to true (Nagle disabled), because the driver has its own + * internal message coalescing algorithm. + * @param keepAlive + * All other socket options are unset by default. The actual value depends on the underlying Netty transport. + * @param reuseAddress + * Whether or not to allow address reuse. + * @param lingerInterval + * Sets the linger interval. If the value is zero or greater, then it represents a timeout value, in seconds; if the value is negative, + * it means that this option is disabled. + * @param receiveBufferSize + * Sets a hint to the size of the underlying buffers for incoming network I/O. + * @param sendBufferSize + * Sets a hint to the size of the underlying buffers for outgoing network I/O. */ final case class SocketConfig( tcpNoDelay: Boolean, @@ -529,12 +581,13 @@ object SocketConfig { val Default: SocketConfig = SocketConfig(true, None, None, None, None, None) } -/** If a connection stays idle for that duration (no reads), the driver sends a dummy message on it to make sure - * it's still alive. If not, the connection is trashed and replaced. +/** If a connection stays idle for that duration (no reads), the driver sends a dummy message on it to make sure it's still alive. If not, + * the connection is trashed and replaced. * - * @param interval The heartbeat interval - * @param timeout How long the driver waits for the response to a heartbeat. If this timeout fires, the heartbeat is - * considered failed. + * @param interval + * The heartbeat interval + * @param timeout + * How long the driver waits for the response to a heartbeat. If this timeout fires, the heartbeat is considered failed. */ final case class HeartbeatConfig(interval: Duration, timeout: Duration) @@ -544,9 +597,12 @@ object HeartbeatConfig { /** Metadata * - * @param debouncer Debouncing to smoothen out oscillations if conflicting events are sent out in short bursts. - * @param schema Options relating to schema metadata. - * @param tokenMap Whether token metadata (Cluster.getMetadata.getTokenMap) is enabled. + * @param debouncer + * Debouncing to smoothen out oscillations if conflicting events are sent out in short bursts. + * @param schema + * Options relating to schema metadata. + * @param tokenMap + * Whether token metadata (Cluster.getMetadata.getTokenMap) is enabled. */ final case class MetadataConfig( debouncer: TopologyEventDebouncerConfig = MetadataConfig.Default.debouncer, @@ -560,12 +616,12 @@ object MetadataConfig { /** The debouncer helps smoothen out oscillations if conflicting events are sent out in short bursts. * - * @param window How long the driver waits to propagate an event. If another event is received within that time, the - * window is reset and a batch of accumulated events will be delivered. - * Debouncing may be disabled by setting the window to 0 or max-events to 1 (not recommended). - * @param maxEvents The maximum number of events that can accumulate. If this count is reached, the events are - * delivered immediately and the time window is reset. This avoids holding events indefinitely - * if the window keeps getting reset. + * @param window + * How long the driver waits to propagate an event. If another event is received within that time, the window is reset and a batch of + * accumulated events will be delivered. Debouncing may be disabled by setting the window to 0 or max-events to 1 (not recommended). + * @param maxEvents + * The maximum number of events that can accumulate. If this count is reached, the events are delivered immediately and the time window + * is reset. This avoids holding events indefinitely if the window keeps getting reset. */ final case class TopologyEventDebouncerConfig( window: Duration = TopologyEventDebouncerConfig.Default.window, @@ -576,16 +632,20 @@ object TopologyEventDebouncerConfig { val Default: TopologyEventDebouncerConfig = TopologyEventDebouncerConfig(1.second, 20) } -/** Options relating to schema metadata (Cluster.getMetadata.getKeyspaces). - * This metadata is exposed by the driver for informational purposes, and is also necessary for token-aware routing. +/** Options relating to schema metadata (Cluster.getMetadata.getKeyspaces). This metadata is exposed by the driver for informational + * purposes, and is also necessary for token-aware routing. * - * @param enabled Whether schema metadata is enabled. - * If this is false, the schema will remain empty, or to the last known value. - * @param refreshedKeyspaces The list of keyspaces for which schema and token metadata should be maintained. If this - * property is absent or empty, all existing keyspaces are processed. - * @param requestTimeout The timeout for the requests to the schema tables. - * @param requestPageSize The page size for the requests to the schema tables. - * @param debouncer Protects against bursts of schema updates. + * @param enabled + * Whether schema metadata is enabled. If this is false, the schema will remain empty, or to the last known value. + * @param refreshedKeyspaces + * The list of keyspaces for which schema and token metadata should be maintained. If this property is absent or empty, all existing + * keyspaces are processed. + * @param requestTimeout + * The timeout for the requests to the schema tables. + * @param requestPageSize + * The page size for the requests to the schema tables. + * @param debouncer + * Protects against bursts of schema updates. */ final case class SchemaConfig( enabled: Boolean = SchemaConfig.Default.enabled, @@ -599,15 +659,15 @@ object SchemaConfig { val Default: SchemaConfig = SchemaConfig(true, List.empty, RequestTimeout, RequestPageSize, DebouncerConfig.Default) } -/** Protects against bursts of schema updates (for example when a client issues a sequence of DDL queries), by - * coalescing them into a single update. +/** Protects against bursts of schema updates (for example when a client issues a sequence of DDL queries), by coalescing them into a single + * update. * - * @param window How long the driver waits to apply a refresh. If another refresh is requested within that - * time, the window is reset and a single refresh will be triggered when it ends. - * Debouncing may be disabled by setting the window to 0 or max-events to 1 (this is highly - * discouraged for schema refreshes). - * @param maxEvents The maximum number of refreshes that can accumulate. If this count is reached, a refresh - * is done immediately and the window is reset. + * @param window + * How long the driver waits to apply a refresh. If another refresh is requested within that time, the window is reset and a single + * refresh will be triggered when it ends. Debouncing may be disabled by setting the window to 0 or max-events to 1 (this is highly + * discouraged for schema refreshes). + * @param maxEvents + * The maximum number of refreshes that can accumulate. If this count is reached, a refresh is done immediately and the window is reset. */ final case class DebouncerConfig(window: Duration = DebouncerConfig.Default.window, maxEvents: Int = DebouncerConfig.Default.maxEvents) @@ -615,11 +675,10 @@ object DebouncerConfig { val Default: DebouncerConfig = DebouncerConfig(1.second, 20) } -/** Whether token metadata (Cluster.getMetadata.getTokenMap) is `enabled`. - * This metadata is exposed by the driver for informational purposes, and is also necessary for token-aware routing. - * If this is false, it will remain empty, or to the last known value. Note that its computation - * requires information about the schema; therefore if schema metadata is disabled or filtered to - * a subset of keyspaces, the token map will be incomplete, regardless of the value of this property. +/** Whether token metadata (Cluster.getMetadata.getTokenMap) is `enabled`. This metadata is exposed by the driver for informational + * purposes, and is also necessary for token-aware routing. If this is false, it will remain empty, or to the last known value. Note that + * its computation requires information about the schema; therefore if schema metadata is disabled or filtered to a subset of keyspaces, + * the token map will be incomplete, regardless of the value of this property. */ final case class TokenMapConfig(enabled: Boolean) @@ -629,27 +688,24 @@ object ControlConnectionConfig { val Default: ControlConnectionConfig = ControlConnectionConfig(InitQueryTimeout, SchemaAgreementConfig.Default) } -/** Due to the distributed nature of Cassandra, schema changes made on one node might not be - * immediately visible to others. Under certain circumstances, the driver waits until all nodes - * agree on a common schema version (namely: before a schema refresh, before repreparing all - * queries on a newly up node, and before completing a successful schema-altering query). To do - * so, it queries system tables to find out the schema version of all nodes that are currently - * UP. If all the versions match, the check succeeds, otherwise it is retried periodically, until - * a given timeout. +/** Due to the distributed nature of Cassandra, schema changes made on one node might not be immediately visible to others. Under certain + * circumstances, the driver waits until all nodes agree on a common schema version (namely: before a schema refresh, before repreparing + * all queries on a newly up node, and before completing a successful schema-altering query). To do so, it queries system tables to find + * out the schema version of all nodes that are currently UP. If all the versions match, the check succeeds, otherwise it is retried + * periodically, until a given timeout. * - * A schema agreement failure is not fatal, but it might produce unexpected results (for example, - * getting an "unconfigured table" error for a table that you created right before, just because - * the two queries went to different coordinators). + * A schema agreement failure is not fatal, but it might produce unexpected results (for example, getting an "unconfigured table" error for + * a table that you created right before, just because the two queries went to different coordinators). * - * Note that schema agreement never succeeds in a mixed-version cluster (it would be challenging - * because the way the schema version is computed varies across server versions); the assumption - * is that schema updates are unlikely to happen during a rolling upgrade anyway. + * Note that schema agreement never succeeds in a mixed-version cluster (it would be challenging because the way the schema version is + * computed varies across server versions); the assumption is that schema updates are unlikely to happen during a rolling upgrade anyway. * - * @param interval The interval between each attempt. - * @param timeout The timeout after which schema agreement fails. - * If this is set to 0, schema agreement is skipped and will always fail. - * @param warnOnFailure Whether to log a warning if schema agreement fails. - * You might want to change this if you've set the timeout to 0. + * @param interval + * The interval between each attempt. + * @param timeout + * The timeout after which schema agreement fails. If this is set to 0, schema agreement is skipped and will always fail. + * @param warnOnFailure + * Whether to log a warning if schema agreement fails. You might want to change this if you've set the timeout to 0. */ final case class SchemaAgreementConfig( interval: Duration = SchemaAgreementConfig.Default.interval, @@ -661,7 +717,8 @@ object SchemaAgreementConfig { val Default: SchemaAgreementConfig = SchemaAgreementConfig(200.milliseconds, 10.seconds, true) } -/** @param prepareOnAllNodes Overridable in a profile. +/** @param prepareOnAllNodes + * Overridable in a profile. */ final case class PreparedStatementsConfig( prepareOnAllNodes: Boolean = PreparedStatementsConfig.Default.prepareOnAllNodes, @@ -674,33 +731,34 @@ object PreparedStatementsConfig { /** How the driver replicates prepared statements on a node that just came back up or joined the cluster. * - * @param enabled Whether the driver tries to prepare on new nodes at all. + * @param enabled + * Whether the driver tries to prepare on new nodes at all. * - * The reason why you might want to disable it is to optimize reconnection time when you - * believe nodes often get marked down because of temporary network issues, rather than the - * node really crashing. In that case, the node still has prepared statements in its cache when - * the driver reconnects, so re-preparing is redundant. + * The reason why you might want to disable it is to optimize reconnection time when you believe nodes often get marked down because of + * temporary network issues, rather than the node really crashing. In that case, the node still has prepared statements in its cache when + * the driver reconnects, so re-preparing is redundant. * - * On the other hand, if that assumption turns out to be wrong and the node had really - * restarted, its prepared statement cache is empty (before CASSANDRA-8831), and statements - * need to be re-prepared on the fly the first time they get executed; this causes a - * performance penalty (one extra roundtrip to resend the query to prepare, and another to - * retry the execution). - * @param checkSystemTable Whether to check `system.prepared_statements` on the target node before repreparing. + * On the other hand, if that assumption turns out to be wrong and the node had really restarted, its prepared statement cache is empty + * (before CASSANDRA-8831), and statements need to be re-prepared on the fly the first time they get executed; this causes a performance + * penalty (one extra roundtrip to resend the query to prepare, and another to retry the execution). + * @param checkSystemTable + * Whether to check `system.prepared_statements` on the target node before repreparing. * - * This table exists since CASSANDRA-8831 (merged in 3.10). It stores the statements already - * prepared on the node, and preserves them across restarts. + * This table exists since CASSANDRA-8831 (merged in 3.10). It stores the statements already prepared on the node, and preserves them + * across restarts. * - * Checking the table first avoids repreparing unnecessarily, but the cost of the query is not - * always worth the improvement, especially if the number of statements is low. + * Checking the table first avoids repreparing unnecessarily, but the cost of the query is not always worth the improvement, especially if + * the number of statements is low. * - * If the table does not exist, or the query fails for any other reason, the error is ignored - * and the driver proceeds to reprepare statements according to the other parameters. - * @param maxStatements The maximum number of statements that should be reprepared. 0 or a negative value means no - * limit. - * @param maxParallelism The maximum number of concurrent requests when repreparing. - * @param timeout The request timeout. This applies both to querying the system.prepared_statements table (if - * relevant), and the prepare requests themselves. + * If the table does not exist, or the query fails for any other reason, the error is ignored and the driver proceeds to reprepare + * statements according to the other parameters. + * @param maxStatements + * The maximum number of statements that should be reprepared. 0 or a negative value means no limit. + * @param maxParallelism + * The maximum number of concurrent requests when repreparing. + * @param timeout + * The request timeout. This applies both to querying the system.prepared_statements table (if relevant), and the prepare requests + * themselves. */ final case class ReprepareOnUpConfig( enabled: Boolean = ReprepareOnUpConfig.Default.enabled, @@ -716,18 +774,19 @@ object ReprepareOnUpConfig { /** Options related to the Netty event loop groups used internally by the driver. * - * @param daemon Whether the threads created by the driver should be daemon threads. - * This will apply to the threads in io-group, admin-group, and the timer thread. - * @param ioGroup The event loop group used for I/O operations (reading and writing to Cassandra nodes). - * By default, threads in this group are named after the session name, "-io-" and an incrementing - * counter, for example "s0-io-0". - * @param adminGroup The event loop group used for admin tasks not related to request I/O (handle cluster events, - * refresh metadata, schedule reconnections, etc.) - * By default, threads in this group are named after the session name, "-admin-" and an - * incrementing counter, for example "s0-admin-0". - * @param timer The timer used for scheduling request timeouts and speculative executions - * By default, this thread is named after the session name and "-timer-0", for example - * "s0-timer-0". + * @param daemon + * Whether the threads created by the driver should be daemon threads. This will apply to the threads in io-group, admin-group, and the + * timer thread. + * @param ioGroup + * The event loop group used for I/O operations (reading and writing to Cassandra nodes). By default, threads in this group are named + * after the session name, "-io-" and an incrementing counter, for example "s0-io-0". + * @param adminGroup + * The event loop group used for admin tasks not related to request I/O (handle cluster events, refresh metadata, schedule reconnections, + * etc.) By default, threads in this group are named after the session name, "-admin-" and an incrementing counter, for example + * "s0-admin-0". + * @param timer + * The timer used for scheduling request timeouts and speculative executions By default, this thread is named after the session name and + * "-timer-0", for example "s0-timer-0". */ final case class NettyConfig( daemon: Boolean = NettyConfig.Default.daemon, @@ -742,11 +801,11 @@ object NettyConfig { /** The event loop group used for I/O operations (reading and writing to Cassandra nodes). * - * @param size The number of threads. - * If this is set to 0, the driver will use `Runtime.getRuntime().availableProcessors() * 2`. - * @param shutdown The options to shut down the event loop group gracefully when the driver closes. If a task - * gets submitted during the quiet period, it is accepted and the quiet period starts over. - * The timeout limits the overall shutdown time. + * @param size + * The number of threads. If this is set to 0, the driver will use `Runtime.getRuntime().availableProcessors() * 2`. + * @param shutdown + * The options to shut down the event loop group gracefully when the driver closes. If a task gets submitted during the quiet period, it + * is accepted and the quiet period starts over. The timeout limits the overall shutdown time. */ final case class GroupConfig(size: Int, shutdown: ShutdownConfig) @@ -754,8 +813,8 @@ object GroupConfig { val Default: GroupConfig = GroupConfig(2, ShutdownConfig.Default) } -/** The options to shut down the event loop group gracefully when the driver closes. If a task - * gets submitted during the quiet period, it is accepted and the quiet period starts over. +/** The options to shut down the event loop group gracefully when the driver closes. If a task gets submitted during the quiet period, it is + * accepted and the quiet period starts over. * * The timeout limits the overall shutdown time. */ @@ -767,27 +826,23 @@ object ShutdownConfig { /** The timer used for scheduling request timeouts and speculative executions. * - * @param tickDuration The timer tick duration. - * This is how frequent the timer should wake up to check for timed-out tasks or speculative - * executions. Lower resolution (i.e. longer durations) will leave more CPU cycles for running - * I/O operations at the cost of precision of exactly when a request timeout will expire or a - * speculative execution will run. Higher resolution (i.e. shorter durations) will result in - * more precise request timeouts and speculative execution scheduling, but at the cost of CPU - * cycles taken from I/O operations, which could lead to lower overall I/O throughput. - * - * The default value is 100 milliseconds, which is a comfortable value for most use cases. - * However if you are using more agressive timeouts or speculative execution delays, then you - * should lower the timer tick duration as well, so that its value is always equal to or lesser - * than the timeout duration and/or speculative execution delay you intend to use. - * - * Note for Windows users: avoid setting this to aggressive values, that is, anything under 100 - * milliseconds; doing so is known to cause extreme CPU usage. Also, the tick duration must be - * a multiple of 10 under Windows; if that is not the case, it will be automatically rounded - * down to the nearest multiple of 10 (e.g. 99 milliseconds will be rounded down to 90 - * milliseconds). - * @param ticksPerWheel Number of ticks in a Timer wheel. The underlying implementation uses Netty's - * HashedWheelTimer, which uses hashes to arrange the timeouts. This effectively controls the - * size of the timer wheel. + * @param tickDuration + * The timer tick duration. This is how frequent the timer should wake up to check for timed-out tasks or speculative executions. Lower + * resolution (i.e. longer durations) will leave more CPU cycles for running I/O operations at the cost of precision of exactly when a + * request timeout will expire or a speculative execution will run. Higher resolution (i.e. shorter durations) will result in more + * precise request timeouts and speculative execution scheduling, but at the cost of CPU cycles taken from I/O operations, which could + * lead to lower overall I/O throughput. + * + * The default value is 100 milliseconds, which is a comfortable value for most use cases. However if you are using more agressive timeouts + * or speculative execution delays, then you should lower the timer tick duration as well, so that its value is always equal to or lesser + * than the timeout duration and/or speculative execution delay you intend to use. + * + * Note for Windows users: avoid setting this to aggressive values, that is, anything under 100 milliseconds; doing so is known to cause + * extreme CPU usage. Also, the tick duration must be a multiple of 10 under Windows; if that is not the case, it will be automatically + * rounded down to the nearest multiple of 10 (e.g. 99 milliseconds will be rounded down to 90 milliseconds). + * @param ticksPerWheel + * Number of ticks in a Timer wheel. The underlying implementation uses Netty's HashedWheelTimer, which uses hashes to arrange the + * timeouts. This effectively controls the size of the timer wheel. */ final case class TimerConfig( tickDuration: Duration = TimerConfig.Default.tickDuration, @@ -798,10 +853,11 @@ object TimerConfig { val Default: TimerConfig = TimerConfig(100.milliseconds, 2048) } -/** The component that coalesces writes on the connections. - * This is exposed mainly to facilitate tuning during development. You shouldn't have to adjust this. +/** The component that coalesces writes on the connections. This is exposed mainly to facilitate tuning during development. You shouldn't + * have to adjust this. * - * @param rescheduleInterval The reschedule interval. + * @param rescheduleInterval + * The reschedule interval. */ final case class CoalescerConfig(rescheduleInterval: Duration = CoalescerConfig.Default.rescheduleInterval) diff --git a/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/basic.scala b/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/basic.scala index 096e0e04e..94ff32dc4 100644 --- a/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/basic.scala +++ b/cassandra-datastax-driver/src/main/scala/com/avast/sst/datastax/config/basic.scala @@ -4,46 +4,43 @@ import scala.concurrent.duration._ /** Basic datastax driver configuration. * - * @param contactPoints The contact points to use for the initial connection to the cluster. - * These are addresses of Cassandra nodes that the driver uses to discover the cluster topology. - * - * Only one contact point is required (the driver will retrieve the address of the other nodes - * automatically), but it is usually a good idea to provide more than one contact point, because if - * that single contact point is unavailable, the driver cannot initialize itself correctly. - * This must be a list of strings with each contact point specified as "host:port". If the host is - * a DNS name that resolves to multiple A-records, all the corresponding addresses will be used. Do - * not use "localhost" as the host name (since it resolves to both IPv4 and IPv6 addresses on some - * platforms). - * - * Note that Cassandra 3 and below requires all nodes in a cluster to share the same port (see - * CASSANDRA-7544). - * @param sessionName A name that uniquely identifies the driver instance created from this configuration. This is - * used as a prefix for log messages and metrics. - * - * If this option is absent, the driver will generate an identifier composed of the letter 's' - * followed by an incrementing counter. If you provide a different value, try to keep it short to - * keep the logs readable. Also, make sure it is unique: reusing the same value will not break the - * driver, but it will mix up the logs and metrics. - * @param sessionKeyspace The name of the keyspace that the session should initially be connected to. - * - * This expects the same format as in a CQL query: case-sensitive names must be quoted. - * - * If this option is absent, the session won't be connected to any keyspace, and you'll have to - * either qualify table names in your queries, or use the per-query keyspace feature available in - * Cassandra 4 and above (see `com.datastax.oss.driver.api.core.session.Request.getKeyspace()`). - * @param request This configures basic request properties such as timeout, page size etc. - * @param loadBalancingPolicy The policy that decides the "query plan" for each query; that is, which nodes to try as - * coordinators, and in which order. - * - * Overridable in a profile. Note that the driver creates as few instances as possible: if a - * named profile inherits from the default profile, or if two sibling profiles have the exact - * same configuration, they will share a single policy instance at runtime. - * If there are multiple load balancing policies in a single driver instance, they work together - * in the following way: - * - each request gets a query plan from its profile's policy (or the default policy if the - * request has no profile, or the profile does not override the policy). - * - when the policies assign distances to nodes, the driver uses the closest assigned distance - * for any given node. + * @param contactPoints + * The contact points to use for the initial connection to the cluster. These are addresses of Cassandra nodes that the driver uses to + * discover the cluster topology. + * + * Only one contact point is required (the driver will retrieve the address of the other nodes automatically), but it is usually a good + * idea to provide more than one contact point, because if that single contact point is unavailable, the driver cannot initialize itself + * correctly. This must be a list of strings with each contact point specified as "host:port". If the host is a DNS name that resolves to + * multiple A-records, all the corresponding addresses will be used. Do not use "localhost" as the host name (since it resolves to both + * IPv4 and IPv6 addresses on some platforms). + * + * Note that Cassandra 3 and below requires all nodes in a cluster to share the same port (see CASSANDRA-7544). + * @param sessionName + * A name that uniquely identifies the driver instance created from this configuration. This is used as a prefix for log messages and + * metrics. + * + * If this option is absent, the driver will generate an identifier composed of the letter 's' followed by an incrementing counter. If you + * provide a different value, try to keep it short to keep the logs readable. Also, make sure it is unique: reusing the same value will not + * break the driver, but it will mix up the logs and metrics. + * @param sessionKeyspace + * The name of the keyspace that the session should initially be connected to. + * + * This expects the same format as in a CQL query: case-sensitive names must be quoted. + * + * If this option is absent, the session won't be connected to any keyspace, and you'll have to either qualify table names in your queries, + * or use the per-query keyspace feature available in Cassandra 4 and above (see + * `com.datastax.oss.driver.api.core.session.Request.getKeyspace()`). + * @param request + * This configures basic request properties such as timeout, page size etc. + * @param loadBalancingPolicy + * The policy that decides the "query plan" for each query; that is, which nodes to try as coordinators, and in which order. + * + * Overridable in a profile. Note that the driver creates as few instances as possible: if a named profile inherits from the default + * profile, or if two sibling profiles have the exact same configuration, they will share a single policy instance at runtime. If there are + * multiple load balancing policies in a single driver instance, they work together in the following way: + * - each request gets a query plan from its profile's policy (or the default policy if the request has no profile, or the profile does + * not override the policy). + * - when the policies assign distances to nodes, the driver uses the closest assigned distance for any given node. */ final case class BasicConfig( contactPoints: List[String] = BasicConfig.Default.contactPoints, @@ -60,38 +57,34 @@ object BasicConfig { /** Request configuration. * - * @param timeout How long the driver waits for a request to complete. This is a global limit on the duration of - * a `session.execute()` call, including any internal retries the driver might do. - * - * By default, this value is set pretty high to ensure that DDL queries don't time out, in order - * to provide the best experience for new users trying the driver with the out-of-the-box - * configuration. - * For any serious deployment, we recommend that you use separate configuration profiles for DDL - * and DML; you can then set the DML timeout much lower (down to a few milliseconds if needed). - * - * Note that, because timeouts are scheduled on the driver's timer thread, the duration specified - * here must be greater than the timer tick duration defined by the - * advanced.netty.timer.tick-duration setting (see below). If that is not the case, timeouts will - * not be triggered as timely as desired. - * - * Overridable in a profile. - * @param consistency The consistency level. - * Overridable in a profile. - * @param pageSize The page size. This controls how many rows will be retrieved simultaneously in a single - * network roundtrip (the goal being to avoid loading too many results in memory at the same - * time). If there are more results, additional requests will be used to retrieve them (either - * automatically if you iterate with the sync API, or explicitly with the async API's - * fetchNextPage method). - * - * If the value is 0 or negative, it will be ignored and the request will not be paged. - * - * Overridable in a profile. - * @param serialConsistency The serial consistency level. - * The allowed values are SERIAL and LOCAL_SERIAL. - * Overridable in a profile. - * @param defaultIdempotence The default idempotence of a request, that will be used for all `Request` instances where - * `isIdempotent()` returns null. - * Overridable in a profile. + * @param timeout + * How long the driver waits for a request to complete. This is a global limit on the duration of a `session.execute()` call, including + * any internal retries the driver might do. + * + * By default, this value is set pretty high to ensure that DDL queries don't time out, in order to provide the best experience for new + * users trying the driver with the out-of-the-box configuration. For any serious deployment, we recommend that you use separate + * configuration profiles for DDL and DML; you can then set the DML timeout much lower (down to a few milliseconds if needed). + * + * Note that, because timeouts are scheduled on the driver's timer thread, the duration specified here must be greater than the timer tick + * duration defined by the advanced.netty.timer.tick-duration setting (see below). If that is not the case, timeouts will not be triggered + * as timely as desired. + * + * Overridable in a profile. + * @param consistency + * The consistency level. Overridable in a profile. + * @param pageSize + * The page size. This controls how many rows will be retrieved simultaneously in a single network roundtrip (the goal being to avoid + * loading too many results in memory at the same time). If there are more results, additional requests will be used to retrieve them + * (either automatically if you iterate with the sync API, or explicitly with the async API's fetchNextPage method). + * + * If the value is 0 or negative, it will be ignored and the request will not be paged. + * + * Overridable in a profile. + * @param serialConsistency + * The serial consistency level. The allowed values are SERIAL and LOCAL_SERIAL. Overridable in a profile. + * @param defaultIdempotence + * The default idempotence of a request, that will be used for all `Request` instances where `isIdempotent()` returns null. Overridable + * in a profile. */ final case class BasicRequestConfig( timeout: Duration = BasicRequestConfig.Default.timeout, @@ -108,20 +101,21 @@ object BasicRequestConfig { /** The policy that decides the "query plan" for each query; that is, which nodes to try as coordinators, and in which order. * - * @param `class` The class of the policy. If it is not qualified, the driver assumes that it resides in the - * package `com.datastax.oss.driver.internal.core.loadbalancing`. - * @param localDatacenter The datacenter that is considered "local": the default policy will only include nodes from - * this datacenter in its query plans. + * @param `class` + * The class of the policy. If it is not qualified, the driver assumes that it resides in the package + * `com.datastax.oss.driver.internal.core.loadbalancing`. + * @param localDatacenter + * The datacenter that is considered "local": the default policy will only include nodes from this datacenter in its query plans. * - * This option can only be absent if you specified no contact points: in that case, the driver - * defaults to 127.0.0.1:9042, and that node's datacenter is used as the local datacenter. + * This option can only be absent if you specified no contact points: in that case, the driver defaults to 127.0.0.1:9042, and that node's + * datacenter is used as the local datacenter. * - * As soon as you provide contact points (either through the configuration or through the cluster - * builder), you must define the local datacenter explicitly, and initialization will fail if - * this property is absent. In addition, all contact points should be from this datacenter; - * warnings will be logged for nodes that are from a different one. + * As soon as you provide contact points (either through the configuration or through the cluster builder), you must define the local + * datacenter explicitly, and initialization will fail if this property is absent. In addition, all contact points should be from this + * datacenter; warnings will be logged for nodes that are from a different one. * - * @param filter A custom filter to include/exclude nodes. + * @param filter + * A custom filter to include/exclude nodes. */ final case class LoadBalancingPolicyConfig( `class`: String = LoadBalancingPolicyConfig.Default.`class`, @@ -135,12 +129,12 @@ object LoadBalancingPolicyConfig { /** A custom filter to include/exclude nodes. * - * The predicate's `test(Node)` method will be invoked each time the policy processes a - * topology or state change: if it returns false, the node will be set at distance IGNORED - * (meaning the driver won't ever connect to it), and never included in any query plan. + * The predicate's `test(Node)` method will be invoked each time the policy processes a topology or state change: if it returns false, the + * node will be set at distance IGNORED (meaning the driver won't ever connect to it), and never included in any query plan. * - * @param `class` it must be the fully-qualified name of a class that implements `java.util.function.Predicate`, - * and has a public constructor taking a single `DriverContext` argument. + * @param `class` + * it must be the fully-qualified name of a class that implements `java.util.function.Predicate`, and has a public constructor + * taking a single `DriverContext` argument. */ final case class FilterConfig(`class`: String) diff --git a/cats-effect/src/main/scala/com/avast/sst/catseffect/TimeUtils.scala b/cats-effect/src/main/scala/com/avast/sst/catseffect/TimeUtils.scala index 5b8d362d7..105c76e2d 100644 --- a/cats-effect/src/main/scala/com/avast/sst/catseffect/TimeUtils.scala +++ b/cats-effect/src/main/scala/com/avast/sst/catseffect/TimeUtils.scala @@ -22,9 +22,8 @@ object TimeUtils { } yield result } - /** Measures the time it takes the effect to finish and records it using the provided function. It distinguishes between successful - * and failure state. - * Please note, that in case of the effect cancellation the `record` is not invoked at all. + /** Measures the time it takes the effect to finish and records it using the provided function. It distinguishes between successful and + * failure state. Please note, that in case of the effect cancellation the `record` is not invoked at all. */ def timeCase[F[_], A](f: F[A])(record: Either[Duration, Duration] => F[Unit])(implicit F: Bracket[F, Throwable], C: Clock[F]): F[A] = { def calculateAndRecordAs(start: Long)(wrap: Duration => Either[Duration, Duration]): F[Unit] = { diff --git a/cats-effect/src/main/scala/com/avast/sst/catseffect/syntax/TimeSyntax.scala b/cats-effect/src/main/scala/com/avast/sst/catseffect/syntax/TimeSyntax.scala index baa404624..fdfc26010 100644 --- a/cats-effect/src/main/scala/com/avast/sst/catseffect/syntax/TimeSyntax.scala +++ b/cats-effect/src/main/scala/com/avast/sst/catseffect/syntax/TimeSyntax.scala @@ -20,9 +20,8 @@ object TimeSyntax { /** Measures the time it takes the effect to finish and records it using the provided function. */ def time(record: Duration => F[Unit])(implicit F: Bracket[F, Throwable], C: Clock[F]): F[A] = TimeUtils.time(f)(record) - /** Measures the time it takes the effect to finish and records it using the provided function. It distinguishes between successful - * and failure state. - * Please note, that in case of the effect cancellation the `record` is not invoked at all. + /** Measures the time it takes the effect to finish and records it using the provided function. It distinguishes between successful and + * failure state. Please note, that in case of the effect cancellation the `record` is not invoked at all. */ def timeCase(record: Either[Duration, Duration] => F[Unit])(implicit F: Bracket[F, Throwable], C: Clock[F]): F[A] = { TimeUtils.timeCase(f)(record) diff --git a/doobie-hikari/src/main/scala/com/avast/sst/doobie/DoobieHikariModule.scala b/doobie-hikari/src/main/scala/com/avast/sst/doobie/DoobieHikariModule.scala index 72f49e14d..6f052a8e0 100644 --- a/doobie-hikari/src/main/scala/com/avast/sst/doobie/DoobieHikariModule.scala +++ b/doobie-hikari/src/main/scala/com/avast/sst/doobie/DoobieHikariModule.scala @@ -16,7 +16,8 @@ object DoobieHikariModule { /** Makes [[doobie.hikari.HikariTransactor]] initialized with the given config. * - * @param boundedConnectExecutionContext [[scala.concurrent.ExecutionContext]] used for creating connections (should be bounded!) + * @param boundedConnectExecutionContext + * [[scala.concurrent.ExecutionContext]] used for creating connections (should be bounded!) */ def make[F[_]: Async]( config: DoobieHikariConfig, diff --git a/grpc-server/src/main/scala/com/avast/sst/grpc/server/GrpcServerModule.scala b/grpc-server/src/main/scala/com/avast/sst/grpc/server/GrpcServerModule.scala index 6f9835e08..4781fccbe 100644 --- a/grpc-server/src/main/scala/com/avast/sst/grpc/server/GrpcServerModule.scala +++ b/grpc-server/src/main/scala/com/avast/sst/grpc/server/GrpcServerModule.scala @@ -11,9 +11,12 @@ object GrpcServerModule { /** Makes [[io.grpc.Server]] (Netty) initialized with the given config, services and interceptors. * - * @param services service implementations to be added to the handler registry - * @param executionContext executor to be used for the server - * @param interceptors that are run for all the services + * @param services + * service implementations to be added to the handler registry + * @param executionContext + * executor to be used for the server + * @param interceptors + * that are run for all the services */ def make[F[_]: Sync]( config: GrpcServerConfig, diff --git a/http4s-client-blaze/src/main/scala/com/avast/sst/http4s/client/Http4sBlazeClientModule.scala b/http4s-client-blaze/src/main/scala/com/avast/sst/http4s/client/Http4sBlazeClientModule.scala index 9f4fb61c2..ee97de78c 100644 --- a/http4s-client-blaze/src/main/scala/com/avast/sst/http4s/client/Http4sBlazeClientModule.scala +++ b/http4s-client-blaze/src/main/scala/com/avast/sst/http4s/client/Http4sBlazeClientModule.scala @@ -10,7 +10,8 @@ object Http4sBlazeClientModule { /** Makes [[org.http4s.client.Client]] (Blaze) initialized with the given config. * - * @param executionContext callback handling [[scala.concurrent.ExecutionContext]] + * @param executionContext + * callback handling [[scala.concurrent.ExecutionContext]] */ def make[F[_]: ConcurrentEffect]( config: Http4sBlazeClientConfig, diff --git a/http4s-client-monix-catnap/src/main/scala/com/avast/sst/http4s/client/monix/catnap/Http4sClientCircuitBreakerModule.scala b/http4s-client-monix-catnap/src/main/scala/com/avast/sst/http4s/client/monix/catnap/Http4sClientCircuitBreakerModule.scala index 378ee5f50..0535dd27d 100644 --- a/http4s-client-monix-catnap/src/main/scala/com/avast/sst/http4s/client/monix/catnap/Http4sClientCircuitBreakerModule.scala +++ b/http4s-client-monix-catnap/src/main/scala/com/avast/sst/http4s/client/monix/catnap/Http4sClientCircuitBreakerModule.scala @@ -11,8 +11,8 @@ object Http4sClientCircuitBreakerModule { /** Wraps [[org.http4s.client.Client]] with the given [[monix.catnap.CircuitBreaker]]. * - * The circuit breaker is special in that it also catches any HTTP responses considered as server failures - * according to the [[com.avast.sst.http4s.client.monix.catnap.HttpStatusClassifier]]. + * The circuit breaker is special in that it also catches any HTTP responses considered as server failures according to the + * [[com.avast.sst.http4s.client.monix.catnap.HttpStatusClassifier]]. */ def make[F[_]: Sync]( client: Client[F], @@ -26,7 +26,7 @@ object Http4sClientCircuitBreakerModule { Client[F] { request => val raisedInternal = client.run(request).allocated.flatMap { case tuple @ (response, _) if !httpStatusClassifier.isServerFailure(response.status) => F.pure(tuple) - case (response, close) => F.raiseError[(Response[F], F[Unit])](new ServerFailure(response, close)) + case (response, close) => F.raiseError[(Response[F], F[Unit])](new ServerFailure(response, close)) } val lifted = circuitBreaker.protect(raisedInternal).recover { case serverFailure: ServerFailure => (serverFailure.response, serverFailure.close) diff --git a/http4s-server-blaze/src/main/scala/com/avast/sst/http4s/server/Http4sBlazeServerModule.scala b/http4s-server-blaze/src/main/scala/com/avast/sst/http4s/server/Http4sBlazeServerModule.scala index a09c85e2d..41b3b5d0e 100644 --- a/http4s-server-blaze/src/main/scala/com/avast/sst/http4s/server/Http4sBlazeServerModule.scala +++ b/http4s-server-blaze/src/main/scala/com/avast/sst/http4s/server/Http4sBlazeServerModule.scala @@ -13,7 +13,8 @@ object Http4sBlazeServerModule { /** Makes [[org.http4s.server.Server]] (Blaze) initialized with the given config and [[org.http4s.HttpApp]]. * - * @param executionContext callback handling [[scala.concurrent.ExecutionContext]] + * @param executionContext + * callback handling [[scala.concurrent.ExecutionContext]] */ def make[F[_]: ConcurrentEffect: Timer]( config: Http4sBlazeServerConfig, diff --git a/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/MicrometerHttp4sServerMetricsModule.scala b/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/MicrometerHttp4sServerMetricsModule.scala index c995a3153..b2c6b51c8 100644 --- a/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/MicrometerHttp4sServerMetricsModule.scala +++ b/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/MicrometerHttp4sServerMetricsModule.scala @@ -11,8 +11,8 @@ class MicrometerHttp4sServerMetricsModule[F[_]](val serverMetrics: HttpRoutes[F] object MicrometerHttp4sServerMetricsModule { - /** Makes [[com.avast.sst.http4s.server.micrometer.MicrometerHttp4sServerMetricsModule]] that can be used to setup monitoring - * of the whole HTTP server and individual routes. + /** Makes [[com.avast.sst.http4s.server.micrometer.MicrometerHttp4sServerMetricsModule]] that can be used to setup monitoring of the whole + * HTTP server and individual routes. */ def make[F[_]: Effect](meterRegistry: MeterRegistry, blocker: Blocker, clock: Clock[F]): F[MicrometerHttp4sServerMetricsModule[F]] = { implicit val c: Clock[F] = clock diff --git a/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/RouteMetrics.scala b/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/RouteMetrics.scala index 5661e91d4..3bb0b8958 100644 --- a/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/RouteMetrics.scala +++ b/http4s-server-micrometer/src/main/scala/com/avast/sst/http4s/server/micrometer/RouteMetrics.scala @@ -14,7 +14,8 @@ class RouteMetrics[F[_]: Sync](meterRegistry: MeterRegistry) { /** Wraps a single route with the usual metrics (count, times, HTTP status codes). * - * @param name will be used in metric name + * @param name + * will be used in metric name */ def wrap(name: String)(route: => F[Response[F]]): F[Response[F]] = { for { diff --git a/http4s-server/src/main/scala/com/avast/sst/http4s/server/middleware/CorrelationIdMiddleware.scala b/http4s-server/src/main/scala/com/avast/sst/http4s/server/middleware/CorrelationIdMiddleware.scala index 63372b51c..7ed9790ec 100644 --- a/http4s-server/src/main/scala/com/avast/sst/http4s/server/middleware/CorrelationIdMiddleware.scala +++ b/http4s-server/src/main/scala/com/avast/sst/http4s/server/middleware/CorrelationIdMiddleware.scala @@ -11,8 +11,8 @@ import org.typelevel.vault.Key import java.util.UUID -/** Provides correlation ID functionality. Either generates new correlation ID for a request or takes the one sent in HTTP header - * and puts it to [[org.http4s.Request]] attributes. It is also filled into HTTP response header. +/** Provides correlation ID functionality. Either generates new correlation ID for a request or takes the one sent in HTTP header and puts + * it to [[org.http4s.Request]] attributes. It is also filled into HTTP response header. * * Use method `retrieveCorrelationId` to get the value from request attributes. */ diff --git a/jvm-micrometer/src/main/scala/com/avast/sst/jvm/micrometer/MicrometerJvmModule.scala b/jvm-micrometer/src/main/scala/com/avast/sst/jvm/micrometer/MicrometerJvmModule.scala index 2a948c04e..f40e8ded3 100644 --- a/jvm-micrometer/src/main/scala/com/avast/sst/jvm/micrometer/MicrometerJvmModule.scala +++ b/jvm-micrometer/src/main/scala/com/avast/sst/jvm/micrometer/MicrometerJvmModule.scala @@ -7,7 +7,8 @@ import io.micrometer.core.instrument.binder.system.ProcessorMetrics object MicrometerJvmModule { - /** Sets up publishing of JVM metrics (class loading, GC, memory, CPU, ...) into the given [[io.micrometer.core.instrument.MeterRegistry]] */ + /** Sets up publishing of JVM metrics (class loading, GC, memory, CPU, ...) into the given [[io.micrometer.core.instrument.MeterRegistry]] + */ def make[F[_]: Sync](registry: MeterRegistry): F[Unit] = { Sync[F].delay { new ClassLoaderMetrics().bindTo(registry) diff --git a/jvm/src/main/scala/com/avast/sst/jvm/execution/ConfigurableThreadFactory.scala b/jvm/src/main/scala/com/avast/sst/jvm/execution/ConfigurableThreadFactory.scala index b8cac20c9..502c0e66e 100644 --- a/jvm/src/main/scala/com/avast/sst/jvm/execution/ConfigurableThreadFactory.scala +++ b/jvm/src/main/scala/com/avast/sst/jvm/execution/ConfigurableThreadFactory.scala @@ -7,8 +7,8 @@ import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.{ForkJoinPool, ForkJoinWorkerThread, ThreadFactory} -/** Thread factory (both [[java.util.concurrent.ThreadFactory]] and [[java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory]]) - * that creates new threads according to the provided [[com.avast.sst.jvm.execution.ConfigurableThreadFactory.Config]]. +/** Thread factory (both [[java.util.concurrent.ThreadFactory]] and [[java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory]]) that + * creates new threads according to the provided [[com.avast.sst.jvm.execution.ConfigurableThreadFactory.Config]]. */ class ConfigurableThreadFactory(config: Config) extends ThreadFactory with ForkJoinWorkerThreadFactory { @@ -32,7 +32,8 @@ class ConfigurableThreadFactory(config: Config) extends ThreadFactory with ForkJ object ConfigurableThreadFactory { - /** @param nameFormat Formatted with long number, e.g. my-thread-%02d + /** @param nameFormat + * Formatted with long number, e.g. my-thread-%02d */ final case class Config( nameFormat: Option[String] = None, diff --git a/jvm/src/main/scala/com/avast/sst/jvm/execution/ExecutorModule.scala b/jvm/src/main/scala/com/avast/sst/jvm/execution/ExecutorModule.scala index 5a3985dd8..024f0a5fd 100644 --- a/jvm/src/main/scala/com/avast/sst/jvm/execution/ExecutorModule.scala +++ b/jvm/src/main/scala/com/avast/sst/jvm/execution/ExecutorModule.scala @@ -55,8 +55,8 @@ object ExecutorModule { private final val DefaultBlockingExecutorConfig = ThreadPoolExecutorConfig(0, Int.MaxValue, allowCoreThreadTimeout = true) - /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with default callback executor and extra [[cats.effect.Blocker]] executor - * for blocking operations. + /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with default callback executor and extra [[cats.effect.Blocker]] executor for + * blocking operations. */ def makeDefault[F[_]: Sync]: Resource[F, ExecutorModule[F]] = { for { @@ -72,8 +72,8 @@ object ExecutorModule { } yield new ExecutorModule[F](numOfCpus, executor, blockingExecutor) } - /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with the provided callback executor and extra [[cats.effect.Blocker]] - * executor for blocking operations. + /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with the provided callback executor and extra [[cats.effect.Blocker]] executor + * for blocking operations. */ def makeFromExecutionContext[F[_]: Sync]( executor: ExecutionContext, @@ -85,8 +85,7 @@ object ExecutorModule { } yield new ExecutorModule[F](numOfCpus, executor, blockingExecutor) } - /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with executor and extra [[cats.effect.Blocker]] executor - * for blocking operations. + /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with executor and extra [[cats.effect.Blocker]] executor for blocking operations. */ def makeFromConfig[F[_]: Sync]( executorConfig: ThreadPoolExecutorConfig, @@ -100,8 +99,8 @@ object ExecutorModule { } yield new ExecutorModule[F](numOfCpus, executor, blockingExecutor) } - /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with fork-join executor and extra [[cats.effect.Blocker]] executor - * for blocking operations. + /** Makes [[com.avast.sst.jvm.execution.ExecutorModule]] with fork-join executor and extra [[cats.effect.Blocker]] executor for blocking + * operations. */ def makeForkJoinFromConfig[F[_]: Sync]( executorConfig: ForkJoinPoolConfig, diff --git a/lettuce/src/main/scala/com/avast/sst/lettuce/LettuceModule.scala b/lettuce/src/main/scala/com/avast/sst/lettuce/LettuceModule.scala index 0c11a110e..2c11d25ea 100644 --- a/lettuce/src/main/scala/com/avast/sst/lettuce/LettuceModule.scala +++ b/lettuce/src/main/scala/com/avast/sst/lettuce/LettuceModule.scala @@ -12,7 +12,8 @@ import java.time.Duration object LettuceModule { - /** Makes [[io.lettuce.core.RedisClient]] initialized with the given config and optionally [[io.lettuce.core.resource.ClientResources]]. */ + /** Makes [[io.lettuce.core.RedisClient]] initialized with the given config and optionally [[io.lettuce.core.resource.ClientResources]]. + */ def makeClient[F[_]: Sync](config: LettuceConfig, clientResources: Option[ClientResources] = None): Resource[F, RedisClient] = { lazy val create = clientResources match { case Some(resources) => RedisClient.create(resources) @@ -28,7 +29,9 @@ object LettuceModule { }(c => sync.delay(c.shutdown())) } - /** Makes [[io.lettuce.core.api.StatefulRedisConnection]] initialized with the given config and optionally [[io.lettuce.core.resource.ClientResources]]. */ + /** Makes [[io.lettuce.core.api.StatefulRedisConnection]] initialized with the given config and optionally + * [[io.lettuce.core.resource.ClientResources]]. + */ @SuppressWarnings(Array("scalafix:DisableSyntax.==", "scalafix:DisableSyntax.null")) def makeConnection[F[_]: Async, K, V]( config: LettuceConfig, diff --git a/sentry/src/main/scala/com/avast/sst/sentry/SentryModule.scala b/sentry/src/main/scala/com/avast/sst/sentry/SentryModule.scala index 51477734b..d0c8528b9 100644 --- a/sentry/src/main/scala/com/avast/sst/sentry/SentryModule.scala +++ b/sentry/src/main/scala/com/avast/sst/sentry/SentryModule.scala @@ -23,11 +23,11 @@ object SentryModule { }(_ => Sync[F].delay(Sentry.close())) } - /** Makes [[io.sentry.SentryClient]] initialized with the given config and overrides the `release` property - * with `Implementation-Title`@`Implementation-Version` from the `MANIFEST.MF` file inside the same JAR (package) as the `Main` class. + /** Makes [[io.sentry.SentryClient]] initialized with the given config and overrides the `release` property with + * `Implementation-Title`@`Implementation-Version` from the `MANIFEST.MF` file inside the same JAR (package) as the `Main` class. * - * This format is recommended by Sentry ([[https://docs.sentry.io/workflow/releases]]) - * because releases are global and must be differentiated. + * This format is recommended by Sentry ([[https://docs.sentry.io/workflow/releases]]) because releases are global and must be + * differentiated. */ def makeWithReleaseFromPackage[F[_]: Sync, Main: ClassTag](config: SentryConfig): Resource[F, Unit] = { for { diff --git a/ssl-config/src/main/scala/com/avast/sst/ssl/SslContextModule.scala b/ssl-config/src/main/scala/com/avast/sst/ssl/SslContextModule.scala index 37cd1b765..2724167f3 100644 --- a/ssl-config/src/main/scala/com/avast/sst/ssl/SslContextModule.scala +++ b/ssl-config/src/main/scala/com/avast/sst/ssl/SslContextModule.scala @@ -18,7 +18,8 @@ object SslContextModule { /** Initializes [[javax.net.ssl.SSLContext]] from the provided config. * - * @param withReference Whether we should use reference config of "ssl-config" library as well. + * @param withReference + * Whether we should use reference config of "ssl-config" library as well. */ def make[F[_]: Sync](config: Config, withReference: Boolean = true): F[SSLContext] = Sync[F].delay { @@ -34,10 +35,11 @@ object SslContextModule { /** Initializes [[javax.net.ssl.SSLContext]] from the provided config if it is enabled. * - * Expects a boolean value `enabled` at the root of the provided [[com.typesafe.config.Config]] - * which determines whether to initialize the context or not. + * Expects a boolean value `enabled` at the root of the provided [[com.typesafe.config.Config]] which determines whether to initialize + * the context or not. * - * @param withReference Whether we should use reference config of "ssl-config" library as well. + * @param withReference + * Whether we should use reference config of "ssl-config" library as well. */ def makeIfEnabled[F[_]: Sync](config: Config, withReference: Boolean = true): F[Option[SSLContext]] = { if (config.hasPath(SslContextEnabledKey) && config.getBoolean(SslContextEnabledKey)) {