Skip to content

Commit

Permalink
Revert "[ 8.12] Downsampling supports date_histogram with tz " (#104421)
Browse files Browse the repository at this point in the history
  • Loading branch information
kkrik-es authored Jan 16, 2024
1 parent 94fdfff commit 7064497
Show file tree
Hide file tree
Showing 18 changed files with 56 additions and 679 deletions.
6 changes: 0 additions & 6 deletions docs/changelog/103511.yaml

This file was deleted.

24 changes: 1 addition & 23 deletions docs/reference/data-streams/downsampling.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -135,29 +135,7 @@ downsampled.
* For
<<search-aggregations-bucket-datehistogram-aggregation,date histogram aggregations>>,
only `fixed_intervals` (and not calendar-aware intervals) are supported.
* Timezone support comes with caveats:

** Date histograms at intervals that are multiples of an hour are based on
values generated at UTC. This works well for timezones that are on the hour, e.g.
+5:00 or -3:00, but requires offsetting the reported time buckets, e.g.
`2020-01-01T10:30:00.000` instead of `2020-03-07T10:00:00.000` for
timezone +5:30 (India), if downsampling aggregates values per hour. In this case,
the results include the field `downsampled_results_offset: true`, to indicate that
the time buckets are shifted. This can be avoided if a downsampling interval of 15
minutes is used, as it allows properly calculating hourly values for the shifted
buckets.

** Date histograms at intervals that are multiples of a day are similarly
affected, in case downsampling aggregates values per day. In this case, the
beginning of each day is always calculated at UTC when generated the downsampled
values, so the time buckets need to be shifted, e.g. reported as
`2020-03-07T19:00:00.000` instead of `2020-03-07T00:00:00.000` for timezone `America/New_York`.
The field `downsampled_results_offset: true` is added in this case too.

** Daylight savings and similar peculiarities around timezones affect
reported results, as <<datehistogram-aggregation-time-zone,documented>>
for date histogram aggregation. Besides, downsampling at daily interval
hinders tracking any information related to daylight savings changes.
* Only Coordinated Universal Time (UTC) date-times are supported.

[discrete]
[[downsampling-restrictions]]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,6 @@ static TransportVersion def(int id) {
public static final TransportVersion MISSED_INDICES_UPDATE_EXCEPTION_ADDED = def(8_558_00_0);
public static final TransportVersion INFERENCE_SERVICE_EMBEDDING_SIZE_ADDED = def(8_559_00_0);
public static final TransportVersion ENRICH_ELASTICSEARCH_VERSION_REMOVED = def(8_560_00_0);
public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ = def(8_560_00_1);

/*
* STOP! READ THIS FIRST! No, really,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.StreamInput;
Expand Down Expand Up @@ -137,9 +138,14 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
EnumSet.of(ClusterBlockLevel.WRITE)
);

@Nullable
public String getDownsamplingInterval() {
return settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL_KEY);
// TODO: refactor this method after adding more downsampling metadata
public boolean isDownsampledIndex() {
final String sourceIndex = settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY);
final String indexDownsamplingStatus = settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS_KEY);
final boolean downsamplingSuccess = DownsampleTaskStatus.SUCCESS.name()
.toLowerCase(Locale.ROOT)
.equals(indexDownsamplingStatus != null ? indexDownsamplingStatus.toLowerCase(Locale.ROOT) : DownsampleTaskStatus.UNKNOWN);
return Strings.isNullOrEmpty(sourceIndex) == false && downsamplingSuccess;
}

public enum State implements Writeable {
Expand Down Expand Up @@ -1229,7 +1235,6 @@ public Index getResizeSourceIndex() {
public static final String INDEX_DOWNSAMPLE_ORIGIN_UUID_KEY = "index.downsample.origin.uuid";

public static final String INDEX_DOWNSAMPLE_STATUS_KEY = "index.downsample.status";
public static final String INDEX_DOWNSAMPLE_INTERVAL_KEY = "index.downsample.interval";
public static final Setting<String> INDEX_DOWNSAMPLE_SOURCE_UUID = Setting.simpleString(
INDEX_DOWNSAMPLE_SOURCE_UUID_KEY,
Property.IndexScope,
Expand Down Expand Up @@ -1272,14 +1277,6 @@ public String toString() {
Property.InternalIndex
);

public static final Setting<String> INDEX_DOWNSAMPLE_INTERVAL = Setting.simpleString(
INDEX_DOWNSAMPLE_INTERVAL_KEY,
"",
Property.IndexScope,
Property.InternalIndex,
Property.PrivateIndex
);

// LIFECYCLE_NAME is here an as optimization, see LifecycleSettings.LIFECYCLE_NAME and
// LifecycleSettings.LIFECYCLE_NAME_SETTING for the 'real' version
public static final String LIFECYCLE_NAME = "index.lifecycle.name";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME,
IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID,
IndexMetadata.INDEX_DOWNSAMPLE_STATUS,
IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -285,13 +285,6 @@ public IndexSettings getIndexSettings() {
return indexSettings;
}

/**
* Returns the MappingLookup for the queried index.
*/
public MappingLookup getMappingLookup() {
return mappingLookup;
}

/**
* Given an index pattern, checks whether it matches against the current shard. The pattern
* may represent a fully qualified index name if the search targets remote shards.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.AggregatorFactory;
Expand All @@ -37,7 +36,6 @@
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.SimpleTimeZone;
import java.util.function.Consumer;

import static java.util.Map.entry;
Expand Down Expand Up @@ -408,46 +406,23 @@ protected ValuesSourceAggregatorFactory innerBuild(
) throws IOException {
final DateIntervalWrapper.IntervalTypeEnum dateHistogramIntervalType = dateHistogramInterval.getIntervalType();

boolean downsampledResultsOffset = false;
final ZoneId tz = timeZone();

String downsamplingInterval = context.getIndexSettings().getIndexMetadata().getDownsamplingInterval();
if (downsamplingInterval != null) {
if (DateIntervalWrapper.IntervalTypeEnum.CALENDAR.equals(dateHistogramIntervalType)) {
throw new IllegalArgumentException(
config.getDescription()
+ " is not supported for aggregation ["
+ getName()
+ "] with interval type ["
+ dateHistogramIntervalType.getPreferredName()
+ "]"
);
}
if (context.getIndexSettings().getIndexMetadata().isDownsampledIndex()
&& DateIntervalWrapper.IntervalTypeEnum.CALENDAR.equals(dateHistogramIntervalType)) {
throw new IllegalArgumentException(
config.getDescription()
+ " is not supported for aggregation ["
+ getName()
+ "] with interval type ["
+ dateHistogramIntervalType.getPreferredName()
+ "]"
);
}

// Downsampled data in time-series indexes contain aggregated values that get calculated over UTC-based intervals.
// When they get aggregated using a different timezone, the resulting buckets may need to be offset to account for
// the difference between UTC (where stored data refers to) and the requested timezone. For instance:
// a. A TZ shifted by -01:15 over hourly downsampled data will lead to buckets with times XX:45, instead of XX:00
// b. A TZ shifted by +07:00 over daily downsampled data will lead to buckets with times 07:00, instead of 00:00
// c. Intervals over DST are approximate, not including gaps in time buckets. This applies to date histogram aggregation in
// general.
if (tz != null && ZoneId.of("UTC").equals(tz) == false && field().equals(DataStreamTimestampFieldMapper.DEFAULT_PATH)) {

// Get the downsampling interval.
DateHistogramInterval interval = new DateHistogramInterval(downsamplingInterval);
long downsamplingResolution = interval.estimateMillis();
long aggregationResolution = dateHistogramInterval.getAsFixedInterval().estimateMillis();

// If the aggregation resolution is not a multiple of the downsampling resolution, the reported time for each
// bucket needs to be shifted by the mod - in addition to rounding that's applied as usual.
// Note that the aggregation resolution gets shifted to match the specified timezone. Timezone.getOffset() normally expects
// a date but it can also process an offset (interval) in milliseconds as it uses the Unix epoch for reference.
long aggregationOffset = SimpleTimeZone.getTimeZone(tz).getOffset(aggregationResolution) % downsamplingResolution;
if (aggregationOffset != 0) {
downsampledResultsOffset = true;
offset += aggregationOffset;
}
}
final ZoneId tz = timeZone();
if (context.getIndexSettings().getIndexMetadata().isDownsampledIndex() && tz != null && ZoneId.of("UTC").equals(tz) == false) {
throw new IllegalArgumentException(
config.getDescription() + " is not supported for aggregation [" + getName() + "] with timezone [" + tz + "]"
);
}

DateHistogramAggregationSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config);
Expand Down Expand Up @@ -498,7 +473,6 @@ protected ValuesSourceAggregatorFactory innerBuild(
order,
keyed,
minDocCount,
downsampledResultsOffset,
rounding,
roundedBounds,
roundedHardBounds,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ Aggregator build(
BucketOrder order,
boolean keyed,
long minDocCount,
boolean downsampledResultsOffset,
@Nullable LongBounds extendedBounds,
@Nullable LongBounds hardBounds,
ValuesSourceConfig valuesSourceConfig,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ public static Aggregator build(
BucketOrder order,
boolean keyed,
long minDocCount,
boolean downsampledResultsOffset,
@Nullable LongBounds extendedBounds,
@Nullable LongBounds hardBounds,
ValuesSourceConfig valuesSourceConfig,
Expand All @@ -97,7 +96,6 @@ public static Aggregator build(
order,
keyed,
minDocCount,
downsampledResultsOffset,
extendedBounds,
hardBounds,
valuesSourceConfig,
Expand All @@ -117,7 +115,6 @@ public static Aggregator build(
order,
keyed,
minDocCount,
downsampledResultsOffset,
extendedBounds,
hardBounds,
valuesSourceConfig,
Expand All @@ -136,7 +133,6 @@ private static FromDateRange adaptIntoRangeOrNull(
BucketOrder order,
boolean keyed,
long minDocCount,
boolean downsampledResultsOffset,
@Nullable LongBounds extendedBounds,
@Nullable LongBounds hardBounds,
ValuesSourceConfig valuesSourceConfig,
Expand Down Expand Up @@ -195,7 +191,6 @@ private static FromDateRange adaptIntoRangeOrNull(
minDocCount,
extendedBounds,
keyed,
downsampledResultsOffset,
fixedRoundingPoints
);
}
Expand Down Expand Up @@ -232,7 +227,6 @@ private static RangeAggregator.Range[] ranges(LongBounds hardBounds, long[] fixe
private final boolean keyed;

private final long minDocCount;
private final boolean downsampledResultsOffset;
private final LongBounds extendedBounds;
private final LongBounds hardBounds;

Expand All @@ -246,7 +240,6 @@ private static RangeAggregator.Range[] ranges(LongBounds hardBounds, long[] fixe
BucketOrder order,
boolean keyed,
long minDocCount,
boolean downsampledResultsOffset,
@Nullable LongBounds extendedBounds,
@Nullable LongBounds hardBounds,
ValuesSourceConfig valuesSourceConfig,
Expand All @@ -262,7 +255,6 @@ private static RangeAggregator.Range[] ranges(LongBounds hardBounds, long[] fixe
order.validate(this);
this.keyed = keyed;
this.minDocCount = minDocCount;
this.downsampledResultsOffset = downsampledResultsOffset;
this.extendedBounds = extendedBounds;
this.hardBounds = hardBounds;
// TODO: Stop using null here
Expand Down Expand Up @@ -336,7 +328,6 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I
emptyBucketInfo,
formatter,
keyed,
downsampledResultsOffset,
metadata()
);
});
Expand All @@ -356,7 +347,6 @@ public InternalAggregation buildEmptyAggregation() {
emptyBucketInfo,
formatter,
keyed,
downsampledResultsOffset,
metadata()
);
}
Expand Down Expand Up @@ -402,7 +392,6 @@ static class FromDateRange extends AdaptingAggregator implements SizedBucketAggr
private final long minDocCount;
private final LongBounds extendedBounds;
private final boolean keyed;
private final boolean downsampledResultsOffset;
private final long[] fixedRoundingPoints;

FromDateRange(
Expand All @@ -416,7 +405,6 @@ static class FromDateRange extends AdaptingAggregator implements SizedBucketAggr
long minDocCount,
LongBounds extendedBounds,
boolean keyed,
boolean downsampledResultsOffset,
long[] fixedRoundingPoints
) throws IOException {
super(parent, subAggregators, delegate);
Expand All @@ -428,7 +416,6 @@ static class FromDateRange extends AdaptingAggregator implements SizedBucketAggr
this.minDocCount = minDocCount;
this.extendedBounds = extendedBounds;
this.keyed = keyed;
this.downsampledResultsOffset = downsampledResultsOffset;
this.fixedRoundingPoints = fixedRoundingPoints;
}

Expand Down Expand Up @@ -467,7 +454,6 @@ protected InternalAggregation adapt(InternalAggregation delegateResult) {
emptyBucketInfo,
format,
keyed,
downsampledResultsOffset,
range.getMetadata()
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
order,
keyed,
minDocCount,
downsampledResultsOffset,
extendedBounds,
hardBounds,
valuesSourceConfig,
Expand All @@ -72,7 +71,6 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
order,
keyed,
minDocCount,
downsampledResultsOffset,
extendedBounds,
hardBounds,
valuesSourceConfig,
Expand All @@ -90,7 +88,6 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) {
private final BucketOrder order;
private final boolean keyed;
private final long minDocCount;
private final boolean downsampledResultsOffset;
private final LongBounds extendedBounds;
private final LongBounds hardBounds;
private final Rounding rounding;
Expand All @@ -101,7 +98,6 @@ public DateHistogramAggregatorFactory(
BucketOrder order,
boolean keyed,
long minDocCount,
boolean downsampledResultsOffset,
Rounding rounding,
LongBounds extendedBounds,
LongBounds hardBounds,
Expand All @@ -115,7 +111,6 @@ public DateHistogramAggregatorFactory(
this.aggregatorSupplier = aggregationSupplier;
this.order = order;
this.keyed = keyed;
this.downsampledResultsOffset = downsampledResultsOffset;
this.minDocCount = minDocCount;
this.extendedBounds = extendedBounds;
this.hardBounds = hardBounds;
Expand Down Expand Up @@ -144,7 +139,6 @@ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound c
order,
keyed,
minDocCount,
downsampledResultsOffset,
extendedBounds,
hardBounds,
config,
Expand All @@ -165,7 +159,6 @@ protected Aggregator createUnmapped(Aggregator parent, Map<String, Object> metad
order,
keyed,
minDocCount,
downsampledResultsOffset,
extendedBounds,
hardBounds,
config,
Expand Down
Loading

0 comments on commit 7064497

Please sign in to comment.