Skip to content

Commit

Permalink
chore(deps): Update to Rust 1.71.0 (vectordotdev#18075)
Browse files Browse the repository at this point in the history
* chore(deps): Update to Rust 1.71.0

Signed-off-by: Jesse Szwedko <[email protected]>

* clippy

Signed-off-by: Jesse Szwedko <[email protected]>

* fmt

Signed-off-by: Jesse Szwedko <[email protected]>

* clippy

Signed-off-by: Jesse Szwedko <[email protected]>

---------

Signed-off-by: Jesse Szwedko <[email protected]>
  • Loading branch information
jszwedko authored Jul 25, 2023
1 parent 3968325 commit 1dd505f
Show file tree
Hide file tree
Showing 22 changed files with 34 additions and 37 deletions.
2 changes: 1 addition & 1 deletion Tiltfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ load('ext://helm_resource', 'helm_resource', 'helm_repo')
docker_build(
ref='timberio/vector',
context='.',
build_args={'RUST_VERSION': '1.70.0'},
build_args={'RUST_VERSION': '1.71.0'},
dockerfile='tilt/Dockerfile'
)

Expand Down
2 changes: 1 addition & 1 deletion lib/codecs/src/decoding/format/native.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ pub struct NativeDeserializerConfig;
impl NativeDeserializerConfig {
/// Build the `NativeDeserializer` from this configuration.
pub fn build(&self) -> NativeDeserializer {
NativeDeserializer::default()
NativeDeserializer
}

/// Return the type of event build by this deserializer.
Expand Down
4 changes: 2 additions & 2 deletions lib/vector-common/src/finalizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ where
Self {
sender: Some(todo_tx),
flush: flush1,
_phantom: PhantomData::default(),
_phantom: PhantomData,
},
finalizer_stream(shutdown, todo_rx, S::default(), flush2).boxed(),
)
Expand Down Expand Up @@ -199,7 +199,7 @@ pub struct EmptyStream<T>(PhantomData<T>);

impl<T> Default for EmptyStream<T> {
fn default() -> Self {
Self(PhantomData::default())
Self(PhantomData)
}
}

Expand Down
14 changes: 7 additions & 7 deletions lib/vector-config/src/schema/visitors/human_name.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ mod tests {
}
}));

let mut visitor = GenerateHumanFriendlyNameVisitor::default();
let mut visitor = GenerateHumanFriendlyNameVisitor;
visitor.visit_root_schema(&mut actual_schema);

assert_schemas_eq(expected_schema, actual_schema);
Expand All @@ -150,7 +150,7 @@ mod tests {
}
}));

let mut visitor = GenerateHumanFriendlyNameVisitor::default();
let mut visitor = GenerateHumanFriendlyNameVisitor;
visitor.visit_root_schema(&mut actual_schema);

assert_schemas_eq(expected_schema, actual_schema);
Expand All @@ -177,7 +177,7 @@ mod tests {
}
}));

let mut visitor = GenerateHumanFriendlyNameVisitor::default();
let mut visitor = GenerateHumanFriendlyNameVisitor;
visitor.visit_root_schema(&mut actual_schema);

assert_schemas_eq(expected_schema, actual_schema);
Expand All @@ -204,7 +204,7 @@ mod tests {
}
}));

let mut visitor = GenerateHumanFriendlyNameVisitor::default();
let mut visitor = GenerateHumanFriendlyNameVisitor;
visitor.visit_root_schema(&mut actual_schema);

assert_schemas_eq(expected_schema, actual_schema);
Expand All @@ -222,7 +222,7 @@ mod tests {

let expected_schema = actual_schema.clone();

let mut visitor = GenerateHumanFriendlyNameVisitor::default();
let mut visitor = GenerateHumanFriendlyNameVisitor;
visitor.visit_root_schema(&mut actual_schema);

assert_schemas_eq(expected_schema, actual_schema);
Expand All @@ -244,7 +244,7 @@ mod tests {

let expected_schema = actual_schema.clone();

let mut visitor = GenerateHumanFriendlyNameVisitor::default();
let mut visitor = GenerateHumanFriendlyNameVisitor;
visitor.visit_root_schema(&mut actual_schema);

assert_schemas_eq(expected_schema, actual_schema);
Expand Down Expand Up @@ -278,7 +278,7 @@ mod tests {
}
}));

let mut visitor = GenerateHumanFriendlyNameVisitor::default();
let mut visitor = GenerateHumanFriendlyNameVisitor;
visitor.visit_root_schema(&mut actual_schema);

assert_schemas_eq(expected_schema, actual_schema);
Expand Down
4 changes: 2 additions & 2 deletions lib/vector-core/src/tls/incoming.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ impl MaybeTlsIncomingStream<TcpStream> {
where
F: FnOnce(Pin<&mut MaybeTlsStream<TcpStream>>, &mut Context) -> Poll<io::Result<T>>,
{
let mut this = self.get_mut();
let this = self.get_mut();
loop {
return match &mut this.state {
StreamState::Accepted(stream) => poll_fn(Pin::new(stream), cx),
Expand Down Expand Up @@ -307,7 +307,7 @@ impl AsyncWrite for MaybeTlsIncomingStream<TcpStream> {
}

fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
let mut this = self.get_mut();
let this = self.get_mut();
match &mut this.state {
StreamState::Accepted(stream) => match Pin::new(stream).poll_shutdown(cx) {
Poll::Ready(Ok(())) => {
Expand Down
2 changes: 1 addition & 1 deletion lib/vector-core/src/transform/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use crate::{
schema, ByteSizeOf,
};

#[cfg(any(feature = "lua"))]
#[cfg(feature = "lua")]
pub mod runtime_transform;

/// Transforms come in two variants. Functions, or tasks.
Expand Down
2 changes: 1 addition & 1 deletion rust-toolchain.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[toolchain]
channel = "1.70.0"
channel = "1.71.0"
profile = "default"
4 changes: 2 additions & 2 deletions src/conditions/datadog_search.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ impl Conditional for DatadogSearchRunner {
impl ConditionalConfig for DatadogSearchConfig {
fn build(&self, _enrichment_tables: &enrichment::TableRegistry) -> crate::Result<Condition> {
let node = parse(&self.source)?;
let matcher = as_log(build_matcher(&node, &EventFilter::default()));
let matcher = as_log(build_matcher(&node, &EventFilter));

Ok(Condition::DatadogSearch(DatadogSearchRunner { matcher }))
}
Expand Down Expand Up @@ -1039,7 +1039,7 @@ mod test {
#[test]
/// Parse each Datadog Search Syntax query and check that it passes/fails.
fn event_filter() {
test_filter(EventFilter::default(), |ev| ev.into_log())
test_filter(EventFilter, |ev| ev.into_log())
}

#[test]
Expand Down
2 changes: 1 addition & 1 deletion src/config/watcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ pub fn spawn_thread<'a>(
debug!(message = "Configuration file change detected.", event = ?event);

// Consume events until delay amount of time has passed since the latest event.
while let Ok(..) = receiver.recv_timeout(delay) {}
while receiver.recv_timeout(delay).is_ok() {}

debug!(message = "Consumed file change events for delay.", delay = ?delay);

Expand Down
2 changes: 1 addition & 1 deletion src/sinks/clickhouse/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ impl ClickhouseSink {
encoding: (
transformer,
Encoder::<Framer>::new(
NewlineDelimitedEncoderConfig::default().build().into(),
NewlineDelimitedEncoderConfig.build().into(),
JsonSerializerConfig::default().build().into(),
),
),
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/datadog/logs/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ where
async fn run_inner(self: Box<Self>, input: BoxStream<'_, Event>) -> Result<(), ()> {
let default_api_key = Arc::clone(&self.default_api_key);

let partitioner = EventPartitioner::default();
let partitioner = EventPartitioner;

let builder_limit = NonZeroUsize::new(64);
let input = input.batched_partitioned(partitioner, self.batch_settings);
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/datadog/metrics/normalizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ mod tests {

fn run_comparisons(inputs: Vec<Metric>, expected_outputs: Vec<Option<Metric>>) {
let mut metric_set = MetricSet::default();
let mut normalizer = DatadogMetricsNormalizer::default();
let mut normalizer = DatadogMetricsNormalizer;

for (input, expected) in inputs.into_iter().zip(expected_outputs) {
let result = normalizer.normalize(&mut metric_set, input);
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/greptimedb/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ impl GreptimeDBRequest {
let mut finalizers = EventFinalizers::default();
let mut request_metadata_builder = RequestMetadataBuilder::default();

let sizer = GreptimeDBBatchSizer::default();
let sizer = GreptimeDBBatchSizer;
let mut estimated_request_size = 0;
for mut metric in metrics.into_iter() {
finalizers.merge(metric.take_finalizers());
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/greptimedb/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ impl GreptimeDBSink {
.normalized_with_default::<GreptimeDBMetricNormalize>()
.batched(
self.batch_settings
.into_item_size_config(GreptimeDBBatchSizer::default()),
.into_item_size_config(GreptimeDBBatchSizer),
)
.map(GreptimeDBRequest::from_metrics)
.into_driver(self.service)
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/loki/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ impl LokiSink {
.map(|event| encoder.encode_event(event))
.filter_map(|event| async { event })
.map(|record| filter.filter_record(record))
.batched_partitioned(RecordPartitioner::default(), self.batch_settings)
.batched_partitioned(RecordPartitioner, self.batch_settings)
.filter_map(|(partition, batch)| async {
if let Some(partition) = partition {
let mut count: usize = 0;
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ pub mod elasticsearch;
pub mod file;
#[cfg(feature = "sinks-gcp")]
pub mod gcp;
#[cfg(any(feature = "sinks-gcp"))]
#[cfg(feature = "sinks-gcp")]
pub mod gcs_common;
#[cfg(feature = "sinks-greptimedb")]
pub mod greptimedb;
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/splunk_hec/metrics/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ where
default_namespace,
))
})
.batched_partitioned(EventPartitioner::default(), self.batch_settings)
.batched_partitioned(EventPartitioner, self.batch_settings)
.request_builder(builder_limit, self.request_builder)
.filter_map(|request| async move {
match request {
Expand Down
2 changes: 1 addition & 1 deletion src/sinks/statsd/normalizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ mod tests {

fn run_comparisons(inputs: Vec<Metric>, expected_outputs: Vec<Option<Metric>>) {
let mut metric_set = MetricSet::default();
let mut normalizer = StatsdNormalizer::default();
let mut normalizer = StatsdNormalizer;

for (input, expected) in inputs.into_iter().zip(expected_outputs) {
let result = normalizer.normalize(&mut metric_set, input);
Expand Down
5 changes: 1 addition & 4 deletions src/sinks/statsd/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,7 @@ where
// other metric types in type-specific ways i.e. incremental gauge updates use a
// different syntax, etc.
.normalized_with_default::<StatsdNormalizer>()
.batched(
self.batch_settings
.into_item_size_config(StatsdBatchSizer::default()),
)
.batched(self.batch_settings.into_item_size_config(StatsdBatchSizer))
// We build our requests "incrementally", which means that for a single batch of
// metrics, we might generate N requests to represent all of the metrics in the batch.
//
Expand Down
6 changes: 3 additions & 3 deletions src/sources/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ pub mod aws_kinesis_firehose;
pub mod aws_s3;
#[cfg(feature = "sources-aws_sqs")]
pub mod aws_sqs;
#[cfg(any(feature = "sources-datadog_agent"))]
#[cfg(feature = "sources-datadog_agent")]
pub mod datadog_agent;
#[cfg(feature = "sources-demo_logs")]
pub mod demo_logs;
Expand Down Expand Up @@ -54,11 +54,11 @@ pub mod journald;
pub mod kafka;
#[cfg(feature = "sources-kubernetes_logs")]
pub mod kubernetes_logs;
#[cfg(all(feature = "sources-logstash"))]
#[cfg(feature = "sources-logstash")]
pub mod logstash;
#[cfg(feature = "sources-mongodb_metrics")]
pub mod mongodb_metrics;
#[cfg(all(feature = "sources-nats"))]
#[cfg(feature = "sources-nats")]
pub mod nats;
#[cfg(feature = "sources-nginx_metrics")]
pub mod nginx_metrics;
Expand Down
2 changes: 1 addition & 1 deletion src/sources/util/grpc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ where
// use independent `tower` layers when the request body itself (the body type, not the actual bytes) must be
// modified or wrapped.. so instead of a cleaner design, we're opting here to bake it all together until the
// crates are sufficiently flexible for us to craft a better design.
.layer(DecompressionAndMetricsLayer::default())
.layer(DecompressionAndMetricsLayer)
.add_service(service)
.serve_with_incoming_shutdown(stream, shutdown.map(|token| tx.send(token).unwrap()))
.in_current_span()
Expand Down
4 changes: 2 additions & 2 deletions src/sources/util/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#![allow(missing_docs)]
#[cfg(any(feature = "sources-http_server"))]
#[cfg(feature = "sources-http_server")]
mod body_decoding;
mod encoding_config;
#[cfg(all(unix, feature = "sources-dnstap"))]
Expand Down Expand Up @@ -46,7 +46,7 @@ pub use unix_datagram::build_unix_datagram_source;
pub use unix_stream::build_unix_stream_source;
pub use wrappers::{AfterRead, AfterReadExt};

#[cfg(any(feature = "sources-http_server"))]
#[cfg(feature = "sources-http_server")]
pub use self::body_decoding::Encoding;
#[cfg(feature = "sources-utils-http-query")]
pub use self::http::add_query_parameters;
Expand Down

0 comments on commit 1dd505f

Please sign in to comment.