-
Notifications
You must be signed in to change notification settings - Fork 73
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Use current time as training data end time #547
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,6 +21,7 @@ | |
import java.util.ArrayList; | ||
import java.util.Arrays; | ||
import java.util.List; | ||
import java.util.Locale; | ||
import java.util.Map; | ||
import java.util.Map.Entry; | ||
import java.util.Optional; | ||
|
@@ -46,7 +47,6 @@ | |
import org.opensearch.ad.feature.FeatureManager; | ||
import org.opensearch.ad.feature.SearchFeatureDao; | ||
import org.opensearch.ad.model.AnomalyDetector; | ||
import org.opensearch.ad.model.AnomalyDetectorJob; | ||
import org.opensearch.ad.model.Entity; | ||
import org.opensearch.ad.model.IntervalTimeConfiguration; | ||
import org.opensearch.ad.ratelimit.CheckpointWriteWorker; | ||
|
@@ -220,6 +220,22 @@ private void coldStart( | |
) { | ||
logger.debug("Trigger cold start for {}", modelId); | ||
|
||
if (modelState == null || entity == null) { | ||
listener | ||
.onFailure( | ||
new IllegalArgumentException( | ||
String | ||
.format( | ||
Locale.ROOT, | ||
"Cannot have empty model state or entity: model state [%b], entity [%b]", | ||
modelState == null, | ||
entity == null | ||
) | ||
) | ||
); | ||
return; | ||
} | ||
|
||
if (lastThrottledColdStartTime.plus(Duration.ofMinutes(coolDownMinutes)).isAfter(clock.instant())) { | ||
listener.onResponse(null); | ||
return; | ||
|
@@ -252,7 +268,7 @@ private void coldStart( | |
try { | ||
if (trainingData.isPresent()) { | ||
List<double[][]> dataPoints = trainingData.get(); | ||
combineTrainSamples(dataPoints, modelId, modelState); | ||
extractTrainSamples(dataPoints, modelId, modelState); | ||
Queue<double[]> samples = modelState.getModel().getSamples(); | ||
// only train models if we have enough samples | ||
if (samples.size() >= numMinSamples) { | ||
|
@@ -272,7 +288,6 @@ private void coldStart( | |
} catch (Exception e) { | ||
listener.onFailure(e); | ||
} | ||
|
||
}, exception -> { | ||
try { | ||
logger.error(new ParameterizedMessage("Error while cold start {}", modelId), exception); | ||
|
@@ -404,42 +419,23 @@ private void getEntityColdStartData(String detectorId, Entity entity, ActionList | |
ActionListener<Optional<Long>> minTimeListener = ActionListener.wrap(earliest -> { | ||
if (earliest.isPresent()) { | ||
long startTimeMs = earliest.get().longValue(); | ||
nodeStateManager.getAnomalyDetectorJob(detectorId, ActionListener.wrap(jobOp -> { | ||
if (!jobOp.isPresent()) { | ||
listener.onFailure(new EndRunException(detectorId, "AnomalyDetector job is not available.", false)); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Before it seems like we used cold start data and samples for the training data. Can you further explain why we are moving away from this or was no sample data actually ever being used because we didn't add samples until success anyways (as you mention in description)? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No sample data actually ever being used because we didn't add samples until success. |
||
return; | ||
} | ||
|
||
AnomalyDetectorJob job = jobOp.get(); | ||
// End time uses milliseconds as start time is assumed to be in milliseconds. | ||
// Opensearch uses a set of preconfigured formats to recognize and parse these strings into a long value | ||
// representing milliseconds-since-the-epoch in UTC. | ||
// More on https://tinyurl.com/wub4fk92 | ||
|
||
// Existing samples either predates or coincide with cold start data. In either case, | ||
// combining them without reordering based on time stamps is not ok. We might introduce | ||
// anomalies in the process. | ||
// An ideal solution would be to record time stamps of data points and combine existing | ||
// samples and cold start samples and do interpolation afterwards. Recording time stamps | ||
// requires changes across the board like bwc in checkpoints. A pragmatic solution is to use | ||
// job enabled time as the end time of cold start period as it is easier to combine | ||
// existing samples with cold start data. We just need to appends existing samples after | ||
// cold start data as existing samples all happen after job enabled time. There might | ||
// be some gaps in between the last cold start sample and the first accumulated sample. | ||
// We will need to accept that precision loss in current solution. | ||
long endTimeMs = job.getEnabledTime().toEpochMilli(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If HC detector realtime job not restarted, the enabled time won't change. If there is no enough data before job enabled time and user don't backfill historical data, there is no chance to pass cold start, right? Seems a critical bug if that's true. We'd better backfill to 1.x too. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. right. yes, will backfill. |
||
Pair<Integer, Integer> params = selectRangeParam(detector); | ||
int stride = params.getLeft(); | ||
int numberOfSamples = params.getRight(); | ||
|
||
// we start with round 0 | ||
getFeatures(listener, 0, coldStartData, detector, entity, stride, numberOfSamples, startTimeMs, endTimeMs); | ||
|
||
}, listener::onFailure)); | ||
// End time uses milliseconds as start time is assumed to be in milliseconds. | ||
// Opensearch uses a set of preconfigured formats to recognize and parse these | ||
// strings into a long value | ||
// representing milliseconds-since-the-epoch in UTC. | ||
// More on https://tinyurl.com/wub4fk92 | ||
|
||
long endTimeMs = clock.millis(); | ||
Pair<Integer, Integer> params = selectRangeParam(detector); | ||
int stride = params.getLeft(); | ||
int numberOfSamples = params.getRight(); | ||
|
||
// we start with round 0 | ||
getFeatures(listener, 0, coldStartData, detector, entity, stride, numberOfSamples, startTimeMs, endTimeMs); | ||
} else { | ||
listener.onResponse(Optional.empty()); | ||
} | ||
|
||
}, listener::onFailure); | ||
|
||
searchFeatureDao | ||
|
@@ -694,40 +690,30 @@ public void trainModelFromExistingSamples(ModelState<EntityModel> modelState, in | |
} | ||
|
||
/** | ||
* Precondition: we don't have enough training data. | ||
* Combine training data with existing sample data. | ||
* Existing samples either predates or coincide with cold start data. In either case, | ||
* combining them without reordering based on time stamps is not ok. We might introduce | ||
* anomalies in the process. | ||
* An ideal solution would be to record time stamps of data points and combine existing | ||
* samples and cold start samples and do interpolation afterwards. Recording time stamps | ||
* requires changes across the board like bwc in checkpoints. A pragmatic solution is to use | ||
* job enabled time as the end time of cold start period as it is easier to combine | ||
* existing samples with cold start data. We just need to appends existing samples after | ||
* cold start data as existing samples all happen after job enabled time. There might | ||
* be some gaps in between the last cold start sample and the first accumulated sample. | ||
* We will need to accept that precision loss in current solution. | ||
* Extract training data and put them into ModelState | ||
* | ||
* @param coldstartDatapoints training data generated from cold start | ||
* @param modelId model Id | ||
* @param entityState entity State | ||
* @param modelState entity State | ||
*/ | ||
private void combineTrainSamples(List<double[][]> coldstartDatapoints, String modelId, ModelState<EntityModel> entityState) { | ||
if (coldstartDatapoints == null || coldstartDatapoints.size() == 0) { | ||
private void extractTrainSamples(List<double[][]> coldstartDatapoints, String modelId, ModelState<EntityModel> modelState) { | ||
if (coldstartDatapoints == null || coldstartDatapoints.size() == 0 || modelState == null) { | ||
return; | ||
} | ||
|
||
EntityModel model = entityState.getModel(); | ||
EntityModel model = modelState.getModel(); | ||
if (model == null) { | ||
model = new EntityModel(null, new ArrayDeque<>(), null); | ||
modelState.setModel(model); | ||
} | ||
|
||
Queue<double[]> newSamples = new ArrayDeque<>(); | ||
for (double[][] consecutivePoints : coldstartDatapoints) { | ||
for (int i = 0; i < consecutivePoints.length; i++) { | ||
newSamples.add(consecutivePoints[i]); | ||
} | ||
} | ||
newSamples.addAll(model.getSamples()); | ||
|
||
model.setSamples(newSamples); | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If data isn't present currently but will get ingested in the future and we are expecting a long initialization would either of this condition be met until then?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is method invariant and we don't expect any of them to be true, regardless of whether data is present or not.