Skip to content

Commit

Permalink
Use current time as training data end time (opensearch-project#547)
Browse files Browse the repository at this point in the history
* Use current time as training data end time

The bug happens because we use job enabled time as training data end time. But if the historical data before that time is deleted or does not exist at all, cold start might never finish. This PR uses current time as the training data end time so that cold start has a chance to succeed later. This PR also removes the code that combines cold start data and existing samples in EntityColdStartWorker because we don't add samples until cold start succeeds. Combining cold start data and existing samples is thus unnecessary.

Testing done:
1. manually verified the bug is fixed.
2. fixed all related unit tests.

Signed-off-by: Kaituo Li <[email protected]>
  • Loading branch information
kaituo committed May 23, 2022
1 parent 1ada5d7 commit 317efd5
Show file tree
Hide file tree
Showing 5 changed files with 134 additions and 110 deletions.
92 changes: 39 additions & 53 deletions src/main/java/org/opensearch/ad/ml/EntityColdStarter.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
Expand All @@ -47,7 +48,6 @@
import org.opensearch.ad.feature.FeatureManager;
import org.opensearch.ad.feature.SearchFeatureDao;
import org.opensearch.ad.model.AnomalyDetector;
import org.opensearch.ad.model.AnomalyDetectorJob;
import org.opensearch.ad.model.Entity;
import org.opensearch.ad.model.IntervalTimeConfiguration;
import org.opensearch.ad.ratelimit.CheckpointWriteWorker;
Expand Down Expand Up @@ -221,6 +221,22 @@ private void coldStart(
) {
logger.debug("Trigger cold start for {}", modelId);

if (modelState == null || entity == null) {
listener
.onFailure(
new IllegalArgumentException(
String
.format(
Locale.ROOT,
"Cannot have empty model state or entity: model state [%b], entity [%b]",
modelState == null,
entity == null
)
)
);
return;
}

if (lastThrottledColdStartTime.plus(Duration.ofMinutes(coolDownMinutes)).isAfter(clock.instant())) {
listener.onResponse(null);
return;
Expand Down Expand Up @@ -253,7 +269,7 @@ private void coldStart(
try {
if (trainingData.isPresent()) {
List<double[][]> dataPoints = trainingData.get();
combineTrainSamples(dataPoints, modelId, modelState);
extractTrainSamples(dataPoints, modelId, modelState);
Queue<double[]> samples = modelState.getModel().getSamples();
// only train models if we have enough samples
if (samples.size() >= numMinSamples) {
Expand All @@ -273,7 +289,6 @@ private void coldStart(
} catch (Exception e) {
listener.onFailure(e);
}

}, exception -> {
try {
logger.error(new ParameterizedMessage("Error while cold start {}", modelId), exception);
Expand Down Expand Up @@ -405,42 +420,23 @@ private void getEntityColdStartData(String detectorId, Entity entity, ActionList
ActionListener<Optional<Long>> minTimeListener = ActionListener.wrap(earliest -> {
if (earliest.isPresent()) {
long startTimeMs = earliest.get().longValue();
nodeStateManager.getAnomalyDetectorJob(detectorId, ActionListener.wrap(jobOp -> {
if (!jobOp.isPresent()) {
listener.onFailure(new EndRunException(detectorId, "AnomalyDetector job is not available.", false));
return;
}

AnomalyDetectorJob job = jobOp.get();
// End time uses milliseconds as start time is assumed to be in milliseconds.
// Opensearch uses a set of preconfigured formats to recognize and parse these strings into a long value
// representing milliseconds-since-the-epoch in UTC.
// More on https://tinyurl.com/wub4fk92

// Existing samples either predates or coincide with cold start data. In either case,
// combining them without reordering based on time stamps is not ok. We might introduce
// anomalies in the process.
// An ideal solution would be to record time stamps of data points and combine existing
// samples and cold start samples and do interpolation afterwards. Recording time stamps
// requires changes across the board like bwc in checkpoints. A pragmatic solution is to use
// job enabled time as the end time of cold start period as it is easier to combine
// existing samples with cold start data. We just need to appends existing samples after
// cold start data as existing samples all happen after job enabled time. There might
// be some gaps in between the last cold start sample and the first accumulated sample.
// We will need to accept that precision loss in current solution.
long endTimeMs = job.getEnabledTime().toEpochMilli();
Pair<Integer, Integer> params = selectRangeParam(detector);
int stride = params.getLeft();
int numberOfSamples = params.getRight();

// we start with round 0
getFeatures(listener, 0, coldStartData, detector, entity, stride, numberOfSamples, startTimeMs, endTimeMs);

}, listener::onFailure));
// End time uses milliseconds as start time is assumed to be in milliseconds.
// Opensearch uses a set of preconfigured formats to recognize and parse these
// strings into a long value
// representing milliseconds-since-the-epoch in UTC.
// More on https://tinyurl.com/wub4fk92

long endTimeMs = clock.millis();
Pair<Integer, Integer> params = selectRangeParam(detector);
int stride = params.getLeft();
int numberOfSamples = params.getRight();

// we start with round 0
getFeatures(listener, 0, coldStartData, detector, entity, stride, numberOfSamples, startTimeMs, endTimeMs);
} else {
listener.onResponse(Optional.empty());
}

}, listener::onFailure);

searchFeatureDao
Expand Down Expand Up @@ -695,40 +691,30 @@ public void trainModelFromExistingSamples(ModelState<EntityModel> modelState, in
}

/**
* Precondition: we don't have enough training data.
* Combine training data with existing sample data.
* Existing samples either predates or coincide with cold start data. In either case,
* combining them without reordering based on time stamps is not ok. We might introduce
* anomalies in the process.
* An ideal solution would be to record time stamps of data points and combine existing
* samples and cold start samples and do interpolation afterwards. Recording time stamps
* requires changes across the board like bwc in checkpoints. A pragmatic solution is to use
* job enabled time as the end time of cold start period as it is easier to combine
* existing samples with cold start data. We just need to appends existing samples after
* cold start data as existing samples all happen after job enabled time. There might
* be some gaps in between the last cold start sample and the first accumulated sample.
* We will need to accept that precision loss in current solution.
* Extract training data and put them into ModelState
*
* @param coldstartDatapoints training data generated from cold start
* @param modelId model Id
* @param entityState entity State
* @param modelState entity State
*/
private void combineTrainSamples(List<double[][]> coldstartDatapoints, String modelId, ModelState<EntityModel> entityState) {
if (coldstartDatapoints == null || coldstartDatapoints.size() == 0) {
private void extractTrainSamples(List<double[][]> coldstartDatapoints, String modelId, ModelState<EntityModel> modelState) {
if (coldstartDatapoints == null || coldstartDatapoints.size() == 0 || modelState == null) {
return;
}

EntityModel model = entityState.getModel();
EntityModel model = modelState.getModel();
if (model == null) {
model = new EntityModel(null, new ArrayDeque<>(), null);
modelState.setModel(model);
}

Queue<double[]> newSamples = new ArrayDeque<>();
for (double[][] consecutivePoints : coldstartDatapoints) {
for (int i = 0; i < consecutivePoints.length; i++) {
newSamples.add(consecutivePoints[i]);
}
}
newSamples.addAll(model.getSamples());

model.setSamples(newSamples);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,14 +354,8 @@ private ActionListener<Optional<AnomalyDetector>> onGetDetector(
ModelState<EntityModel> modelState = modelManager
.processEntityCheckpoint(checkpoint, entity, modelId, detectorId, detector.getShingleSize());

EntityModel entityModel = modelState.getModel();

ThresholdingResult result = null;
if (entityModel.getTrcf().isPresent()) {
result = modelManager.score(origRequest.getCurrentFeature(), modelId, modelState);
} else {
entityModel.addSample(origRequest.getCurrentFeature());
}
ThresholdingResult result = modelManager
.getAnomalyResultForEntity(origRequest.getCurrentFeature(), modelState, modelId, entity, detector.getShingleSize());

if (result != null && result.getRcfScore() > 0) {
AnomalyResult resultToSave = result
Expand Down
63 changes: 63 additions & 0 deletions src/test/java/org/opensearch/ad/NodeStateManagerTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
import org.opensearch.action.get.GetResponse;
import org.opensearch.action.search.SearchRequest;
import org.opensearch.ad.model.AnomalyDetector;
import org.opensearch.ad.model.AnomalyDetectorJob;
import org.opensearch.ad.settings.AnomalyDetectorSettings;
import org.opensearch.ad.transport.AnomalyResultTests;
import org.opensearch.ad.util.ClientUtil;
Expand Down Expand Up @@ -80,6 +81,7 @@ public class NodeStateManagerTests extends AbstractADTest {
private GetResponse checkpointResponse;
private ClusterService clusterService;
private ClusterSettings clusterSettings;
private AnomalyDetectorJob jobToCheck;

@Override
protected NamedXContentRegistry xContentRegistry() {
Expand Down Expand Up @@ -130,6 +132,7 @@ public void setUp() throws Exception {
stateManager = new NodeStateManager(client, xContentRegistry(), settings, clientUtil, clock, duration, clusterService);

checkpointResponse = mock(GetResponse.class);
jobToCheck = TestHelpers.randomAnomalyDetectorJob(true, Instant.ofEpochMilli(1602401500000L), null);
}

@Override
Expand Down Expand Up @@ -388,4 +391,64 @@ public void testSettingUpdateBackOffMin() {
when(clock.millis()).thenReturn(62000L);
assertTrue(!stateManager.isMuted(nodeId, adId));
}

@SuppressWarnings("unchecked")
private String setupJob() throws IOException {
String detectorId = jobToCheck.getName();

doAnswer(invocation -> {
GetRequest request = invocation.getArgument(0);
ActionListener<GetResponse> listener = invocation.getArgument(1);
if (request.index().equals(AnomalyDetectorJob.ANOMALY_DETECTOR_JOB_INDEX)) {
listener.onResponse(TestHelpers.createGetResponse(jobToCheck, detectorId, AnomalyDetectorJob.ANOMALY_DETECTOR_JOB_INDEX));
}
return null;
}).when(client).get(any(), any(ActionListener.class));

return detectorId;
}

public void testGetAnomalyJob() throws IOException, InterruptedException {
String detectorId = setupJob();
final CountDownLatch inProgressLatch = new CountDownLatch(1);
stateManager.getAnomalyDetectorJob(detectorId, ActionListener.wrap(asDetector -> {
assertEquals(jobToCheck, asDetector.get());
inProgressLatch.countDown();
}, exception -> {
assertTrue(false);
inProgressLatch.countDown();
}));
assertTrue(inProgressLatch.await(100, TimeUnit.SECONDS));
}

/**
* Test that we caches anomaly detector job definition after the first call
* @throws IOException if client throws exception
* @throws InterruptedException if the current thread is interrupted while waiting
*/
@SuppressWarnings("unchecked")
public void testRepeatedGetAnomalyJob() throws IOException, InterruptedException {
String detectorId = setupJob();
final CountDownLatch inProgressLatch = new CountDownLatch(2);

stateManager.getAnomalyDetectorJob(detectorId, ActionListener.wrap(asDetector -> {
assertEquals(jobToCheck, asDetector.get());
inProgressLatch.countDown();
}, exception -> {
assertTrue(false);
inProgressLatch.countDown();
}));

stateManager.getAnomalyDetectorJob(detectorId, ActionListener.wrap(asDetector -> {
assertEquals(jobToCheck, asDetector.get());
inProgressLatch.countDown();
}, exception -> {
assertTrue(false);
inProgressLatch.countDown();
}));

assertTrue(inProgressLatch.await(100, TimeUnit.SECONDS));

verify(client, times(1)).get(any(), any(ActionListener.class));
}
}
Loading

0 comments on commit 317efd5

Please sign in to comment.