Skip to content

Commit

Permalink
Don't refresh on _flush _force_merge and _upgrade (#27000)
Browse files Browse the repository at this point in the history
Today all these API calls have a sideeffect of making documents visible
to search requests. While this is sometimes desired it's an unnecessary sideeffect
and now that we have an internal (engine-private) index reader (#26972) we artificially
add a refresh call for bwc. This change removes this sideeffect in 7.0.
  • Loading branch information
s1monw authored Oct 16, 2017
1 parent 277637f commit 8dda827
Show file tree
Hide file tree
Showing 11 changed files with 32 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1005,7 +1005,6 @@ public Engine.CommitId flush(FlushRequest request) {
}
final long time = System.nanoTime();
final Engine.CommitId commitId = engine.flush(force, waitIfOngoing);
engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0
flushMetric.inc(System.nanoTime() - time);
return commitId;
}
Expand Down Expand Up @@ -1036,9 +1035,6 @@ public void forceMerge(ForceMergeRequest forceMerge) throws IOException {
Engine engine = getEngine();
engine.forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(),
forceMerge.onlyExpungeDeletes(), false, false);
if (forceMerge.flush()) {
engine.refresh("force_merge"); // TODO this is technically wrong we should remove this in 7.0
}
}

/**
Expand All @@ -1055,8 +1051,6 @@ public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) throws IOE
engine.forceMerge(true, // we need to flush at the end to make sure the upgrade is durable
Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment
false, true, upgrade.upgradeOnlyAncientSegments());
engine.refresh("upgrade"); // TODO this is technically wrong we should remove this in 7.0

org.apache.lucene.util.Version version = minimumCompatibleVersion();
if (logger.isTraceEnabled()) {
logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ public void setupIndex() {
client().prepareIndex("test", "type1", id).setSource("text", "sometext").get();
}
client().admin().indices().prepareFlush("test").get();
client().admin().indices().prepareRefresh().get();
}

public void testBasic() {
Expand Down
7 changes: 4 additions & 3 deletions core/src/test/java/org/elasticsearch/get/GetActionIT.java
Original file line number Diff line number Diff line change
Expand Up @@ -131,16 +131,17 @@ public void testSimpleGet() {
assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
assertThat(response.getField("field2"), nullValue());

logger.info("--> flush the index, so we load it from it");
flush();

logger.info("--> realtime get 1 (loaded from index)");
logger.info("--> realtime get 1");
response = client().prepareGet(indexOrAlias(), "type1", "1").get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));

logger.info("--> refresh the index, so we load it from it");
refresh();

logger.info("--> non realtime get 1 (loaded from index)");
response = client().prepareGet(indexOrAlias(), "type1", "1").setRealtime(false).get();
assertThat(response.isExists(), equalTo(true));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -573,6 +573,7 @@ public void testSegmentsStats() {

client().admin().indices().prepareFlush().get();
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
client().admin().indices().prepareRefresh().get();
stats = client().admin().indices().prepareStats().setSegments(true).get();

assertThat(stats.getTotal().getSegments(), notNullValue());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,12 +83,10 @@ public void testSimpleNested() throws Exception {
.endObject()).execute().actionGet();

waitForRelocation(ClusterHealthStatus.GREEN);
// flush, so we fetch it from the index (as see that we filter nested docs)
flush();
GetResponse getResponse = client().prepareGet("test", "type1", "1").get();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getSourceAsBytes(), notNullValue());

refresh();
// check the numDocs
assertDocumentCount("test", 3);

Expand Down Expand Up @@ -126,8 +124,7 @@ public void testSimpleNested() throws Exception {
.endArray()
.endObject()).execute().actionGet();
waitForRelocation(ClusterHealthStatus.GREEN);
// flush, so we fetch it from the index (as see that we filter nested docs)
flush();
refresh();
assertDocumentCount("test", 6);

searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
Expand All @@ -151,8 +148,7 @@ public void testSimpleNested() throws Exception {
DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "2").execute().actionGet();
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());

// flush, so we fetch it from the index (as see that we filter nested docs)
flush();
refresh();
assertDocumentCount("test", 3);

searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)).execute().actionGet();
Expand All @@ -179,11 +175,10 @@ public void testMultiNested() throws Exception {
.endArray()
.endObject()).execute().actionGet();

// flush, so we fetch it from the index (as see that we filter nested docs)
flush();
GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
waitForRelocation(ClusterHealthStatus.GREEN);
refresh();
// check the numDocs
assertDocumentCount("test", 7);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@

import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
Expand Down Expand Up @@ -268,12 +269,10 @@ public void testSimpleVersioningWithFlush() throws Exception {
assertThat(indexResponse.getVersion(), equalTo(1L));

client().admin().indices().prepareFlush().execute().actionGet();

indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(1).execute().actionGet();
assertThat(indexResponse.getVersion(), equalTo(2L));

client().admin().indices().prepareFlush().execute().actionGet();

assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
VersionConflictEngineException.class);

Expand All @@ -286,13 +285,16 @@ public void testSimpleVersioningWithFlush() throws Exception {
assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);

client().admin().indices().prepareRefresh().execute().actionGet();
for (int i = 0; i < 10; i++) {
assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2L));
}

client().admin().indices().prepareRefresh().execute().actionGet();

for (int i = 0; i < 10; i++) {
SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet();
SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).
execute().actionGet();
assertHitCount(searchResponse, 1);
assertThat(searchResponse.getHits().getAt(0).getVersion(), equalTo(2L));
}
}
Expand Down
9 changes: 9 additions & 0 deletions docs/reference/migration/migrate_7_0/indices.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,12 @@ index names may no longer contain `:`.

Negative values were interpreted as zero in earlier versions but are no
longer accepted.


==== `_flush` and `_force_merge` will no longer refresh

In previous versions issuing a `_flush` or `_force_merge` (with `flush=true`)
had the undocumented side-effect of refreshing the index which made new documents
visible to searches and non-realtime GET operations. From now on these operations
don't have this side-effect anymore. To make documents visible an explicit `_refresh`
call is needed unless the index is refreshed by the internal scheduler.
Original file line number Diff line number Diff line change
Expand Up @@ -593,10 +593,9 @@ public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrCh

createIndexRequest("test", "parent", "1", null, "p_field", 1).get();
createIndexRequest("test", "child", "2", "1", "c_field", 1).get();
client().admin().indices().prepareFlush("test").get();

client().prepareIndex("test", legacy() ? "type1" : "doc", "3").setSource("p_field", 1).get();
client().admin().indices().prepareFlush("test").get();
refresh();

SearchResponse searchResponse = client().prepareSearch("test")
.setQuery(boolQuery().must(matchAllQuery()).filter(hasChildQuery("child", matchAllQuery(), ScoreMode.None))).get();
Expand Down Expand Up @@ -881,8 +880,8 @@ public void testHasChildAndHasParentFilter_withFilter() throws Exception {
} else {
client().prepareIndex("test", "doc", "3").setSource("p_field", 2).get();
}
client().admin().indices().prepareFlush("test").get();

refresh();
SearchResponse searchResponse = client().prepareSearch("test")
.setQuery(boolQuery().must(matchAllQuery()).filter(hasChildQuery("child", termQuery("c_field", 1), ScoreMode.None)))
.get();
Expand Down Expand Up @@ -911,7 +910,7 @@ public void testHasChildInnerHitsHighlighting() throws Exception {

createIndexRequest("test", "parent", "1", null, "p_field", 1).get();
createIndexRequest("test", "child", "2", "1", "c_field", "foo bar").get();
client().admin().indices().prepareFlush("test").get();
refresh();

SearchResponse searchResponse = client().prepareSearch("test").setQuery(
hasChildQuery("child", matchQuery("c_field", "foo"), ScoreMode.None)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
body: {"f1": "v6_mixed", "f2": 10}

- do:
indices.flush:
indices.refresh:
index: test_index

- do:
Expand All @@ -56,7 +56,7 @@
id: d10

- do:
indices.flush:
indices.refresh:
index: test_index

- do:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
- '{"f1": "d_old"}'

- do:
indices.flush:
indices.refresh:
index: test_index,index_with_replicas

- do:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
- '{"f1": "v5_upgraded", "f2": 14}'

- do:
indices.flush:
indices.refresh:
index: test_index

- do:
Expand Down

0 comments on commit 8dda827

Please sign in to comment.