diff --git a/qa/restart-upgrade/build.gradle b/qa/restart-upgrade/build.gradle index 16c2454c9..29617e1b7 100644 --- a/qa/restart-upgrade/build.gradle +++ b/qa/restart-upgrade/build.gradle @@ -39,6 +39,7 @@ task testAgainstOldCluster(type: StandaloneRestIntegTestTask) { filter { excludeTestsMatching "org.opensearch.neuralsearch.bwc.MultiModalSearchIT.*" excludeTestsMatching "org.opensearch.neuralsearch.bwc.HybridSearchIT.*" + excludeTestsMatching "org.opensearch.neuralsearch.bwc.NeuralSparseSearchIT.*" } } @@ -64,6 +65,7 @@ task testAgainstNewCluster(type: StandaloneRestIntegTestTask) { filter { excludeTestsMatching "org.opensearch.neuralsearch.bwc.MultiModalSearchIT.*" excludeTestsMatching "org.opensearch.neuralsearch.bwc.HybridSearchIT.*" + excludeTestsMatching "org.opensearch.neuralsearch.bwc.NeuralSparseSearchIT.*" } } diff --git a/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRestartUpgradeRestTestCase.java b/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRestartUpgradeRestTestCase.java index cf985d759..832821b94 100644 --- a/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRestartUpgradeRestTestCase.java +++ b/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRestartUpgradeRestTestCase.java @@ -8,14 +8,14 @@ import java.util.Optional; import org.junit.Before; import org.opensearch.common.settings.Settings; -import org.opensearch.neuralsearch.BaseNeuralSearchIT; +import org.opensearch.neuralsearch.BaseSparseEncodingIT; import static org.opensearch.neuralsearch.TestUtils.CLIENT_TIMEOUT_VALUE; import static org.opensearch.neuralsearch.TestUtils.RESTART_UPGRADE_OLD_CLUSTER; import static org.opensearch.neuralsearch.TestUtils.BWC_VERSION; import static org.opensearch.neuralsearch.TestUtils.NEURAL_SEARCH_BWC_PREFIX; import org.opensearch.test.rest.OpenSearchRestTestCase; -public abstract class AbstractRestartUpgradeRestTestCase extends BaseNeuralSearchIT { +public abstract class AbstractRestartUpgradeRestTestCase extends BaseSparseEncodingIT { @Before protected String getIndexNameForTest() { diff --git a/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java b/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java index 061619382..a69c14d83 100644 --- a/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java +++ b/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java @@ -40,20 +40,19 @@ public class HybridSearchIT extends AbstractRestartUpgradeRestTestCase { public void testNormalizationProcessor_whenIndexWithMultipleShards_E2EFlow() throws Exception { waitForClusterHealthGreen(NODES_BWC_CLUSTER); if (isRunningAgainstOldCluster()) { - String index = getIndexNameForTest(); String modelId = uploadTextEmbeddingModel(); loadModel(modelId); createPipelineProcessor(modelId, PIPELINE_NAME); createIndexWithConfiguration( - index, + getIndexNameForTest(), Files.readString(Path.of(classLoader.getResource("processor/IndexMappings.json").toURI())), PIPELINE_NAME ); - addDocument(index, "0", TEST_FIELD, TEXT_1, null, null); - addDocument(index, "1", TEST_FIELD, TEXT_2, null, null); - addDocument(index, "2", TEST_FIELD, TEXT_3, null, null); - addDocument(index, "3", TEST_FIELD, TEXT_4, null, null); - addDocument(index, "4", TEST_FIELD, TEXT_5, null, null); + addDocument(getIndexNameForTest(), "0", TEST_FIELD, TEXT_1, null, null); + addDocument(getIndexNameForTest(), "1", TEST_FIELD, TEXT_2, null, null); + addDocument(getIndexNameForTest(), "2", TEST_FIELD, TEXT_3, null, null); + addDocument(getIndexNameForTest(), "3", TEST_FIELD, TEXT_4, null, null); + addDocument(getIndexNameForTest(), "4", TEST_FIELD, TEXT_5, null, null); createSearchPipeline( SEARCH_PIPELINE_NAME, DEFAULT_NORMALIZATION_METHOD, @@ -61,17 +60,16 @@ public void testNormalizationProcessor_whenIndexWithMultipleShards_E2EFlow() thr Map.of(PARAM_NAME_WEIGHTS, Arrays.toString(new float[] { 0.3f, 0.7f })) ); } else { - String index = getIndexNameForTest(); Map pipeline = getIngestionPipeline(PIPELINE_NAME); assertNotNull(pipeline); String modelId = getModelId(pipeline, TEXT_EMBEDDING_PROCESSOR); loadModel(modelId); - addDocument(index, "5", TEST_FIELD, TEXT_6, null, null); - validateTestIndex(modelId, index, SEARCH_PIPELINE_NAME); + addDocument(getIndexNameForTest(), "5", TEST_FIELD, TEXT_6, null, null); + validateTestIndex(modelId, getIndexNameForTest(), SEARCH_PIPELINE_NAME); deleteSearchPipeline(SEARCH_PIPELINE_NAME); deletePipeline(PIPELINE_NAME); deleteModel(modelId); - deleteIndex(index); + deleteIndex(getIndexNameForTest()); } } @@ -81,20 +79,19 @@ public void testNormalizationProcessor_whenIndexWithMultipleShards_E2EFlow() thr public void testNormalizationProcessor_whenIndexWithSingleShard_E2EFlow() throws Exception { waitForClusterHealthGreen(NODES_BWC_CLUSTER); if (isRunningAgainstOldCluster()) { - String index = getIndexNameForTest() + "1"; String modelId = uploadTextEmbeddingModel(); loadModel(modelId); createPipelineProcessor(modelId, PIPELINE1_NAME); createIndexWithConfiguration( - index, + getIndexNameForTest(), Files.readString(Path.of(classLoader.getResource("processor/Index1Mappings.json").toURI())), PIPELINE1_NAME ); - addDocument(index, "0", TEST_FIELD, TEXT_1, null, null); - addDocument(index, "1", TEST_FIELD, TEXT_2, null, null); - addDocument(index, "2", TEST_FIELD, TEXT_3, null, null); - addDocument(index, "3", TEST_FIELD, TEXT_4, null, null); - addDocument(index, "4", TEST_FIELD, TEXT_5, null, null); + addDocument(getIndexNameForTest(), "0", TEST_FIELD, TEXT_1, null, null); + addDocument(getIndexNameForTest(), "1", TEST_FIELD, TEXT_2, null, null); + addDocument(getIndexNameForTest(), "2", TEST_FIELD, TEXT_3, null, null); + addDocument(getIndexNameForTest(), "3", TEST_FIELD, TEXT_4, null, null); + addDocument(getIndexNameForTest(), "4", TEST_FIELD, TEXT_5, null, null); createSearchPipeline( SEARCH_PIPELINE1_NAME, DEFAULT_NORMALIZATION_METHOD, @@ -102,17 +99,16 @@ public void testNormalizationProcessor_whenIndexWithSingleShard_E2EFlow() throws Map.of(PARAM_NAME_WEIGHTS, Arrays.toString(new float[] { 0.3f, 0.7f })) ); } else { - String index = getIndexNameForTest() + "1"; Map pipeline = getIngestionPipeline(PIPELINE1_NAME); assertNotNull(pipeline); String modelId = getModelId(pipeline, TEXT_EMBEDDING_PROCESSOR); loadModel(modelId); - addDocument(index, "5", TEST_FIELD, TEXT_6, null, null); - validateTestIndex(modelId, index, SEARCH_PIPELINE1_NAME); + addDocument(getIndexNameForTest(), "5", TEST_FIELD, TEXT_6, null, null); + validateTestIndex(modelId, getIndexNameForTest(), SEARCH_PIPELINE1_NAME); deleteSearchPipeline(SEARCH_PIPELINE1_NAME); deletePipeline(PIPELINE1_NAME); deleteModel(modelId); - deleteIndex(index); + deleteIndex(getIndexNameForTest()); } } diff --git a/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java b/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java index da6261977..5135306e1 100644 --- a/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java +++ b/qa/restart-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java @@ -7,18 +7,26 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import java.nio.file.Files; import java.nio.file.Path; +import java.util.List; import java.util.Map; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.neuralsearch.TestUtils; import static org.opensearch.neuralsearch.TestUtils.NODES_BWC_CLUSTER; import static org.opensearch.neuralsearch.TestUtils.SPARSE_ENCODING_PROCESSOR; +import static org.opensearch.neuralsearch.TestUtils.objectToFloat; import org.opensearch.neuralsearch.query.NeuralSparseQueryBuilder; public class NeuralSparseSearchIT extends AbstractRestartUpgradeRestTestCase { private static final String PIPELINE_NAME = "nlp-ingest-pipeline-sparse"; - private static final String TEST_FIELD = "passage_text"; - private static final String TEXT_1 = "Hello world"; - private static final String TEXT_2 = "Hi planet"; - private static final String QUERY = "Hi world"; + private static final String TEST_SPARSE_ENCODING_FIELD = "passage_embedding"; + private static final String TEST_TEXT_FIELD = "passage_text"; + private static final String TEXT_1 = "Hello world a b"; + private static final String TEXT_2 = "Hello planet"; + private static final List TEST_TOKENS_1 = List.of("hello", "world", "a", "b", "c"); + private static final List TEST_TOKENS_2 = List.of("hello", "planet", "a", "b", "c"); + private final Map testRankFeaturesDoc1 = TestUtils.createRandomTokenWeightMap(TEST_TOKENS_1); + private final Map testRankFeaturesDoc2 = TestUtils.createRandomTokenWeightMap(TEST_TOKENS_2); // Test restart-upgrade test sparse embedding processor // Create Sparse Encoding Processor, Ingestion Pipeline and add document @@ -34,13 +42,28 @@ public void testSparseEncodingProcessor_E2EFlow() throws Exception { Files.readString(Path.of(classLoader.getResource("processor/SparseIndexMappings.json").toURI())), PIPELINE_NAME ); - addDocument(getIndexNameForTest(), "0", TEST_FIELD, TEXT_1, null, null); + + addSparseEncodingDoc( + getIndexNameForTest(), + "0", + List.of(TEST_SPARSE_ENCODING_FIELD), + List.of(testRankFeaturesDoc1), + List.of(TEST_TEXT_FIELD), + List.of(TEXT_1) + ); } else { Map pipeline = getIngestionPipeline(PIPELINE_NAME); assertNotNull(pipeline); String modelId = TestUtils.getModelId(pipeline, SPARSE_ENCODING_PROCESSOR); loadModel(modelId); - addDocument(getIndexNameForTest(), "1", TEST_FIELD, TEXT_2, null, null); + addSparseEncodingDoc( + getIndexNameForTest(), + "1", + List.of(TEST_SPARSE_ENCODING_FIELD), + List.of(testRankFeaturesDoc2), + List.of(TEST_TEXT_FIELD), + List.of(TEXT_2) + ); validateTestIndex(modelId); deletePipeline(PIPELINE_NAME); deleteModel(modelId); @@ -52,14 +75,18 @@ public void testSparseEncodingProcessor_E2EFlow() throws Exception { private void validateTestIndex(String modelId) throws Exception { int docCount = getDocCount(getIndexNameForTest()); assertEquals(2, docCount); - NeuralSparseQueryBuilder neuralSparseQueryBuilder = new NeuralSparseQueryBuilder(); - neuralSparseQueryBuilder.fieldName("passage_embedding"); - neuralSparseQueryBuilder.queryText(QUERY); - neuralSparseQueryBuilder.modelId(modelId); - Map response = search(getIndexNameForTest(), neuralSparseQueryBuilder, 1); - assertNotNull(response); - int hits = getHitCount(response); - assertEquals(2, hits); + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + NeuralSparseQueryBuilder sparseEncodingQueryBuilder = new NeuralSparseQueryBuilder().fieldName(TEST_SPARSE_ENCODING_FIELD) + .queryText(TEXT_1) + .modelId(modelId); + MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder(TEST_TEXT_FIELD, TEXT_1); + boolQueryBuilder.should(sparseEncodingQueryBuilder).should(matchQueryBuilder); + Map response = search(getIndexNameForTest(), boolQueryBuilder, 1); + Map firstInnerHit = getFirstInnerHit(response); + + assertEquals("1", firstInnerHit.get("_id")); + float minExpectedScore = computeExpectedScore(modelId, testRankFeaturesDoc1, TEXT_1); + assertTrue(minExpectedScore < objectToFloat(firstInnerHit.get("_score"))); } private String uploadTextEmbeddingModel() throws Exception { diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 10270e8d5..7356f4401 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -39,6 +39,7 @@ task testAgainstOldCluster(type: StandaloneRestIntegTestTask) { filter { excludeTestsMatching "org.opensearch.neuralsearch.bwc.MultiModalSearchIT.*" excludeTestsMatching "org.opensearch.neuralsearch.bwc.HybridSearchIT.*" + excludeTestsMatching "org.opensearch.neuralsearch.bwc.NeuralSparseSearchIT.*" } } @@ -65,6 +66,7 @@ task testAgainstOneThirdUpgradedCluster(type: StandaloneRestIntegTestTask) { filter { excludeTestsMatching "org.opensearch.neuralsearch.bwc.MultiModalSearchIT.*" excludeTestsMatching "org.opensearch.neuralsearch.bwc.HybridSearchIT.*" + excludeTestsMatching "org.opensearch.neuralsearch.bwc.NeuralSparseSearchIT.*" } } @@ -90,6 +92,7 @@ task testAgainstTwoThirdsUpgradedCluster(type: StandaloneRestIntegTestTask) { filter { excludeTestsMatching "org.opensearch.neuralsearch.bwc.MultiModalSearchIT.*" excludeTestsMatching "org.opensearch.neuralsearch.bwc.HybridSearchIT.*" + excludeTestsMatching "org.opensearch.neuralsearch.bwc.NeuralSparseSearchIT.*" } } @@ -115,6 +118,7 @@ task testRollingUpgrade(type: StandaloneRestIntegTestTask) { filter { excludeTestsMatching "org.opensearch.neuralsearch.bwc.MultiModalSearchIT.*" excludeTestsMatching "org.opensearch.neuralsearch.bwc.HybridSearchIT.*" + excludeTestsMatching "org.opensearch.neuralsearch.bwc.NeuralSparseSearchIT.*" } } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRollingUpgradeTestCase.java b/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRollingUpgradeTestCase.java index 98ce95b72..bcb4bc584 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRollingUpgradeTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/AbstractRollingUpgradeTestCase.java @@ -8,7 +8,7 @@ import java.util.Optional; import org.junit.Before; import org.opensearch.common.settings.Settings; -import org.opensearch.neuralsearch.BaseNeuralSearchIT; +import org.opensearch.neuralsearch.BaseSparseEncodingIT; import org.opensearch.test.rest.OpenSearchRestTestCase; import static org.opensearch.neuralsearch.TestUtils.OLD_CLUSTER; import static org.opensearch.neuralsearch.TestUtils.MIXED_CLUSTER; @@ -18,7 +18,7 @@ import static org.opensearch.neuralsearch.TestUtils.BWCSUITE_CLUSTER; import static org.opensearch.neuralsearch.TestUtils.NEURAL_SEARCH_BWC_PREFIX; -public abstract class AbstractRollingUpgradeTestCase extends BaseNeuralSearchIT { +public abstract class AbstractRollingUpgradeTestCase extends BaseSparseEncodingIT { @Before protected String getIndexNameForTest() { diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java index 79b3192ad..9e55a940a 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/HybridSearchIT.java @@ -8,6 +8,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import java.util.List; import java.util.Map; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.neuralsearch.TestUtils; @@ -30,10 +31,10 @@ public class HybridSearchIT extends AbstractRollingUpgradeTestCase { private static final String QUERY = "Hi world"; private static final int NUM_DOCS_PER_ROUND = 1; - // Test rolling-upgrade Hybrid Search - // Create Text Embedding Processor, Ingestion Pipeline, add document and search pipeline with normalization processor + // Test rolling-upgrade normalization processor when index with multiple shards + // Create Text Embedding Processor, Ingestion Pipeline, add document and search pipeline with noramlization processor // Validate process , pipeline and document count in rolling-upgrade scenario - public void testNormalizationProcessor_E2EFlow() throws Exception { + public void testNormalizationProcessor_whenIndexWithMultipleShards_E2EFlow() throws Exception { waitForClusterHealthGreen(NODES_BWC_CLUSTER); switch (getClusterType()) { case OLD: @@ -92,6 +93,12 @@ private void validateTestIndexOnUpgrade(int numberOfDocs, String modelId) throws Map.of("search_pipeline", SEARCH_PIPELINE_NAME) ); assertNotNull(searchResponseAsMap); + int hits = getHitCount(searchResponseAsMap); + assertEquals(1, hits); + List scoresList = getNormalizationScoreList(searchResponseAsMap); + for (Double score : scoresList) { + assertTrue(0 < score && score < 1); + } } private String uploadTextEmbeddingModel() throws Exception { diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java index 0d3112fb9..ef30fe7a7 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/neuralsearch/bwc/NeuralSparseSearchIT.java @@ -7,20 +7,31 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import java.nio.file.Files; import java.nio.file.Path; +import java.util.List; import java.util.Map; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.neuralsearch.TestUtils; import static org.opensearch.neuralsearch.TestUtils.NODES_BWC_CLUSTER; import static org.opensearch.neuralsearch.TestUtils.SPARSE_ENCODING_PROCESSOR; +import static org.opensearch.neuralsearch.TestUtils.objectToFloat; import org.opensearch.neuralsearch.query.NeuralSparseQueryBuilder; public class NeuralSparseSearchIT extends AbstractRollingUpgradeTestCase { private static final String PIPELINE_NAME = "nlp-ingest-pipeline-sparse"; - private static final String TEST_FIELD = "passage_text"; - private static final String TEXT = "Hello world"; - private static final String TEXT_MIXED = "Hi planet"; - private static final String TEXT_UPGRADED = "Hi earth"; - private static final int NUM_DOCS_PER_ROUND = 1; + private static final String TEST_SPARSE_ENCODING_FIELD = "passage_embedding"; + private static final String TEST_TEXT_FIELD = "passage_text"; + private static final String TEXT = "Hello world a b"; + private static final String TEXT_MIXED = "Hello planet"; + private static final String TEXT_UPGRADED = "Hello earth"; private static final String QUERY = "Hi world"; + private static final List TEST_TOKENS_1 = List.of("hello", "world", "a", "b", "c"); + private static final List TEST_TOKENS_2 = List.of("hello", "planet", "a", "b", "c"); + private static final List TEST_TOKENS_3 = List.of("hello", "earth", "a", "b", "c"); + private final Map testRankFeaturesDoc1 = TestUtils.createRandomTokenWeightMap(TEST_TOKENS_1); + private final Map testRankFeaturesDoc2 = TestUtils.createRandomTokenWeightMap(TEST_TOKENS_2); + private final Map testRankFeaturesDoc3 = TestUtils.createRandomTokenWeightMap(TEST_TOKENS_3); + private static final int NUM_DOCS_PER_ROUND = 1; // Test rolling-upgrade test sparse embedding processor // Create Sparse Encoding Processor, Ingestion Pipeline and add document @@ -37,7 +48,14 @@ public void testSparseEncodingProcessor_E2EFlow() throws Exception { Files.readString(Path.of(classLoader.getResource("processor/SparseIndexMappings.json").toURI())), PIPELINE_NAME ); - addDocument(getIndexNameForTest(), "0", TEST_FIELD, TEXT, null, null); + addSparseEncodingDoc( + getIndexNameForTest(), + "0", + List.of(TEST_SPARSE_ENCODING_FIELD), + List.of(testRankFeaturesDoc1), + List.of(TEST_TEXT_FIELD), + List.of(TEXT) + ); break; case MIXED: modelId = getModelId(PIPELINE_NAME); @@ -45,7 +63,14 @@ public void testSparseEncodingProcessor_E2EFlow() throws Exception { if (isFirstMixedRound()) { totalDocsCountMixed = NUM_DOCS_PER_ROUND; validateTestIndexOnUpgrade(totalDocsCountMixed, modelId); - addDocument(getIndexNameForTest(), "1", TEST_FIELD, TEXT_MIXED, null, null); + addSparseEncodingDoc( + getIndexNameForTest(), + "1", + List.of(TEST_SPARSE_ENCODING_FIELD), + List.of(testRankFeaturesDoc2), + List.of(TEST_TEXT_FIELD), + List.of(TEXT_MIXED) + ); } else { totalDocsCountMixed = 2 * NUM_DOCS_PER_ROUND; validateTestIndexOnUpgrade(totalDocsCountMixed, modelId); @@ -55,7 +80,14 @@ public void testSparseEncodingProcessor_E2EFlow() throws Exception { modelId = getModelId(PIPELINE_NAME); int totalDocsCountUpgraded = 3 * NUM_DOCS_PER_ROUND; loadModel(modelId); - addDocument(getIndexNameForTest(), "2", TEST_FIELD, TEXT_UPGRADED, null, null); + addSparseEncodingDoc( + getIndexNameForTest(), + "2", + List.of(TEST_SPARSE_ENCODING_FIELD), + List.of(testRankFeaturesDoc3), + List.of(TEST_TEXT_FIELD), + List.of(TEXT_UPGRADED) + ); validateTestIndexOnUpgrade(totalDocsCountUpgraded, modelId); deletePipeline(PIPELINE_NAME); deleteModel(modelId); @@ -68,12 +100,18 @@ private void validateTestIndexOnUpgrade(int numberOfDocs, String modelId) throws int docCount = getDocCount(getIndexNameForTest()); assertEquals(numberOfDocs, docCount); loadModel(modelId); - NeuralSparseQueryBuilder neuralSparseQueryBuilder = new NeuralSparseQueryBuilder(); - neuralSparseQueryBuilder.fieldName("passage_embedding"); - neuralSparseQueryBuilder.queryText(QUERY); - neuralSparseQueryBuilder.modelId(modelId); - Map response = search(getIndexNameForTest(), neuralSparseQueryBuilder, 1); - assertNotNull(response); + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + NeuralSparseQueryBuilder sparseEncodingQueryBuilder = new NeuralSparseQueryBuilder().fieldName(TEST_SPARSE_ENCODING_FIELD) + .queryText(TEXT) + .modelId(modelId); + MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder(TEST_TEXT_FIELD, TEXT); + boolQueryBuilder.should(sparseEncodingQueryBuilder).should(matchQueryBuilder); + Map response = search(getIndexNameForTest(), boolQueryBuilder, 1); + Map firstInnerHit = getFirstInnerHit(response); + + assertEquals("1", firstInnerHit.get("_id")); + float minExpectedScore = computeExpectedScore(modelId, testRankFeaturesDoc1, TEXT); + assertTrue(minExpectedScore < objectToFloat(firstInnerHit.get("_score"))); } private String uploadTextEmbeddingModel() throws Exception {